diff --git a/.claude/skills/review/SKILL.md b/.claude/skills/review/SKILL.md new file mode 100644 index 00000000000..a0dbd8d80f9 --- /dev/null +++ b/.claude/skills/review/SKILL.md @@ -0,0 +1,18 @@ +--- +name: review +description: Review code for common issues. Use when reviewing PRs, suggesting code changes, or when user asks to review code. +allowed-tools: Read, Grep, Glob +--- + +# Code Review Skill + +Apply these review patterns when reviewing code or suggesting changes. + +@.github/copilot-instructions.md + +## Review Output Format + +When reviewing, organize feedback by priority: +1. **Must fix**: Issues that will cause bugs or break conventions +2. **Should fix**: Redundant code, naming issues, test correctness +3. **Nit**: Style preferences, minor improvements diff --git a/.cursor/rules/review.mdc b/.cursor/rules/review.mdc new file mode 100644 index 00000000000..c8724b3bb4b --- /dev/null +++ b/.cursor/rules/review.mdc @@ -0,0 +1,9 @@ +--- +description: Code review guidelines - apply when reviewing code or suggesting changes +globs: "**/*.go" +alwaysApply: false +--- + +# Code Review Guidelines + +@.github/copilot-instructions.md diff --git a/.dockerignore b/.dockerignore index 41b5cfc42ab..2282bdfdae9 100644 --- a/.dockerignore +++ b/.dockerignore @@ -14,3 +14,4 @@ tdbg temporal-server temporal-cassandra-tool temporal-sql-tool +temporal-elasticsearch-tool diff --git a/.github/.codecov.yml b/.github/.codecov.yml new file mode 100644 index 00000000000..92e2186f227 --- /dev/null +++ b/.github/.codecov.yml @@ -0,0 +1,23 @@ +# see https://docs.codecov.com/docs/ignoring-paths +ignore: + - "**/*_gen.go" + - "**/*_mock.go" + - "**/*.pb.go" + - "**/pb.mock.go" + - "api" + - "cmd" + - "common/testing" + - "tests" + - "tools" + +# disable pull request comments +comment: false + +# disable GitHub checks +github_checks: false + +# disable GitHub status +coverage: + status: + project: off + patch: off diff --git a/.github/.golangci.yml b/.github/.golangci.yml new file mode 100644 index 00000000000..46e9021bddf --- /dev/null +++ b/.github/.golangci.yml @@ -0,0 +1,241 @@ +version: "2" + +formatters: + enable: + - gci + - goimports + +linters: + default: none + enable: + - errcheck + - importas + - depguard + - revive + - staticcheck + - govet + - forbidigo + - exhaustive + - godox + - iotamixing + - testifylint + settings: + staticcheck: + checks: + - "all" + - "-ST1000" # disable: package comment is missing + godox: + keywords: + - FIXME + govet: + disable: + - fieldalignment + forbidigo: + forbid: + - pattern: time.Sleep + msg: "Please use await.Require / s.Await unless there's no better option" + - pattern: "^panic$" + msg: "Please avoid using panic in application code" + - pattern: time\.Now + msg: "Using time.Now is not allowed in chasm/lib package (non-test files), use ctx.Now(component) instead" + - pattern: "^Unix$" + msg: "Do not use .Unix() for Cassandra timestamps (returns seconds). Use p.UnixMilliseconds() which returns milliseconds." + - pattern: "^UnixMilli$" + msg: "Do not use .UnixMilli() for Cassandra timestamps. Use p.UnixMilliseconds() for consistency and proper zero-time handling." + - pattern: "^UnixNano$" + msg: "Do not use .UnixNano() for Cassandra timestamps. Use p.UnixMilliseconds() which returns milliseconds." + - pattern: FunctionalTestBase + msg: "FunctionalTestBase is deprecated. Use testcore.NewEnv(t) instead. See docs/development/testing.md for details." + - pattern: context\.Background\(\) + msg: "Avoid context.Background() in tests; use t.Context() to respect test timeouts and cancellation" + - pattern: '(^|\.)(Eventually|Eventuallyf|EventuallyWithT|EventuallyWithTf)(\(|$)' + msg: "Use await.Require / s.Await for assertion conditions, or await.RequireTrue / s.AwaitTrue for bool predicates, instead of testify Eventually helpers" + - pattern: 'assert\.\w+' + msg: "Use require.X / protorequire.X instead of assert.X / protoassert.X — assert doesn't stop the test on failure." + depguard: + rules: + main: + list-mode: lax + deny: + - pkg: github.com/pborman/uuid + desc: "Importing github.com/pborman/uuid is disallowed; use github.com/google/uuid instead" + importas: + # Enforce the aliases below. + no-unaliased: true + # Still allow aliases outside of the rules below. + no-extra-aliases: false + alias: + # no pb services (or their mocks) are aliased - must be at the top! + - pkg: go.temporal.io(/server)?/api/(\w+)service(mock)?/v1 + alias: "" # ie no alias - this can only be specified once! public API pbs have a suffix + - pkg: go.temporal.io/api/(\w+)/v1 + alias: ${1}pb + # internal server pbs have their own suffix to avoid naming conflicts + - pkg: go.temporal.io/server/api/(\w+)/v1 + alias: ${1}spb + testifylint: + disable: + - suite-method-signature # parallelsuite.Run supports extra args passed to Test* methods + exhaustive: + # Presence of "default" case in switch statements satisfies exhaustiveness, + # even if all enum members are not listed. + # Default: false + default-signifies-exhaustive: true + revive: + severity: error + confidence: 0.8 + enable-all-rules: true + rules: + # Disabled rules + - name: add-constant + disabled: true + - name: argument-limit + disabled: true + - name: bare-return + disabled: true + - name: banned-characters + disabled: true + - name: bool-literal-in-expr + disabled: true + - name: confusing-naming + disabled: true + - name: empty-lines + disabled: true + - name: error-naming + disabled: true + - name: errorf + disabled: true + - name: exported + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: imports-blocklist + disabled: true + - name: increment-decrement + disabled: true + - name: line-length-limit + disabled: true + - name: max-public-structs + disabled: true + - name: nested-structs + disabled: true + - name: package-comments + disabled: true + - name: string-format + disabled: true + - name: unexported-naming + disabled: true + - name: unexported-return + disabled: true + - name: unused-parameter + disabled: true + - name: unused-receiver + disabled: true + - name: use-any + disabled: true + - name: var-naming + disabled: true + - name: empty-block + disabled: true + - name: flag-parameter + disabled: true + - name: unnecessary-stmt + disabled: true + - name: range-val-in-closure + disabled: true + + # Rule tuning + - name: cognitive-complexity + arguments: + - 25 + - name: cyclomatic + arguments: + - 25 + - name: function-result-limit + arguments: + - 5 + - name: struct-tag + arguments: + - "validate,persistence_custom_search_attributes" + - name: unhandled-error + arguments: + - "fmt.*" + - "bytes.Buffer.*" + - "strings.Builder.*" + exclusions: + paths: + - ^api + - ^proto + - ^.git + rules: + - path-except: _test\.go|tests/.+\.go + text: "time.Sleep" + linters: + - forbidigo + - path-except: chasm/lib/.*\.go$ + text: "time.Now" + linters: + - forbidigo + - path: chasm/lib/.*_test\.go$ + text: "time.Now" + linters: + - forbidigo + # Cassandra timestamp rules only apply to cassandra persistence package + - path-except: common/persistence/cassandra/.*\.go$ + text: "Unix|UnixMilli|UnixNano" + linters: + - forbidigo + # Allow in tests + - path: _test\.go$ + text: "Unix|UnixMilli|UnixNano" + linters: + - forbidigo + - path: _test\.go|tests/.+\.go|common/testing/ + text: "panic" + linters: + - forbidigo + - path: tests/testcore/.*\.go$ # still needed in tests/testcore/ until we remove testify suites entirely + text: "FunctionalTestBase" + linters: + - forbidigo + - path-except: tests/.+_test\.go # only enforce in test files + text: "context.Background" + linters: + - forbidigo + # Existing legacy call sites are tracked separately; keep this PR scoped + # to preventing new usage while migrating touched tests. + - path: tests/(nexus_standalone|nexus_workflow|schedule|schedule_migration)_test\.go$ + text: "Eventually" + linters: + - forbidigo + - path: tests/(nexus_standalone|nexus_workflow)_test\.go$ + text: "assert\\.CollectT" + linters: + - forbidigo + - text: "use of `softassert\\.\\w+`" + linters: + - forbidigo + - path: _test\.go|tests/.+\.go|common/testing/ + text: "(cyclomatic|cognitive)" # false positives when using subtests + linters: + - revive + - path: _test\.go|tests/.+\.go|common/testing/ + text: "(dot-imports|unchecked-type-assertion)" # helpful in tests + linters: + - revive + - path: ^tools\/.+\.go + linters: + - revive +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + +run: + timeout: 10m + modules-download-mode: readonly + +output: + sort-order: + - file diff --git a/.github/.yamlfmt b/.github/.yamlfmt new file mode 100644 index 00000000000..2fed3a427cd --- /dev/null +++ b/.github/.yamlfmt @@ -0,0 +1,18 @@ +# Config for yamlfmt +# see https://github.com/google/yamlfmt/blob/main/docs/config-file.md + +# No need to format untracked files +gitignore_excludes: true + +exclude: + # Go template files + - common/config/config_template_embedded.yaml + - config/docker.yaml + - docker/config_template.yaml + +formatter: + # allow newlines + retain_line_breaks: true + # requires for GitHub Actions files + scan_folded_as_literal: true + force_quote_style: double diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 3b074db7990..5e4c0194a92 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,4 +1,95 @@ # Syntax is here: # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax -* @temporalio/server +* @temporalio/server @temporalio/cgs @temporalio/nexus + +# CHASM + +# This only matching files directly in /chasm/ folder, but not any nested files +/chasm/* @temporalio/oss-foundations + +/chasm/lib/activity/ @temporalio/act +/chasm/lib/scheduler/ @temporalio/act + +/chasm/lib/nexusoperation/ @temporalio/act @temporalio/nexus +/chasm/lib/callback/ @temporalio/act @temporalio/nexus + +# Service Clients + +/client/matching/ @temporalio/oss-matching + +# Common + +/common/taskqueue/ @temporalio/oss-matching +/common/worker_versioning/ @temporalio/oss-matching +/common/tqid/ @temporalio/oss-matching + +/common/archiver/ @temporalio/oss-foundations +/common/searchattribute/ @temporalio/oss-foundations + +/common/nexus/ @temporalio/act @temporalio/nexus + +/common/tasks/ @temporalio/oss-foundations @temporalio/cgs +/common/persistence/ @temporalio/oss-foundations @temporalio/oss-matching @temporalio/cgs + +# Components + +/components/ @temporalio/act @temporalio/nexus + +# Proto Definitions + +/proto/internal/temporal/server/api/schedule/ @temporalio/act + +/proto/internal/temporal/server/api/deployment/ @temporalio/oss-matching +/proto/internal/temporal/server/api/matchingservice/ @temporalio/oss-matching +/proto/internal/temporal/server/api/taskqueue/ @temporalio/oss-matching + +/proto/internal/temporal/server/api/archiver/ @temporalio/oss-foundations + +/proto/internal/temporal/server/api/replication/ @temporalio/cgs + +# DB Schema +/schema/**/visibility/ @temporalio/oss-foundations + +# History Service + +/service/history/archival/ @temporalio/oss-foundations +/service/history/*archival* @temporalio/oss-foundations +/service/history/*chasm* @temporalio/oss-foundations +/service/history/deletemanager/ @temporalio/oss-foundations +/service/history/queues/ @temporalio/oss-foundations +/service/history/shard/ @temporalio/oss-foundations +/service/history/*queue_factory* @temporalio/oss-foundations +/service/history/*statemachine* @temporalio/oss-foundations +/service/history/*visibility* @temporalio/oss-foundations + +/service/history/ndc/ @temporalio/cgs +/service/history/replication/ @temporalio/cgs + +# Matching Service + +/service/matching/ @temporalio/oss-matching + +# Worker Service + +/service/worker/batcher/ @temporalio/act +/service/worker/scheduler/ @temporalio/act + +/service/worker/workerdeployment/ @temporalio/oss-matching + +/service/worker/addsearchattributes/ @temporalio/oss-foundations +/service/worker/deletenamespace/ @temporalio/oss-foundations +/service/worker/dlq/ @temporalio/oss-foundations + +/service/worker/scanner/ @temporalio/oss-foundations @temporalio/oss-matching +/service/worker/scanner/executions/ @temporalio/oss-foundations +/service/worker/scanner/history/ @temporalio/oss-foundations +/service/worker/scanner/build_ids/ @temporalio/oss-matching +/service/worker/scanner/taskqueue/ @temporalio/oss-matching + +/service/worker/migration/ @temporalio/cgs +/service/worker/replicator/ @temporalio/cgs + +/service/worker/parentclosepolicy/ @temporalio/oss-foundations @temporalio/act + +/tools/ @temporalio/server diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index d476c38c281..68005f6d8b1 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,18 +1,15 @@ ## What changed? - +_Describe what has changed in this PR._ ## Why? - +_Tell your future self why have you made these changes._ ## How did you test it? - +- [ ] built +- [ ] run locally and tested manually +- [ ] covered by existing tests +- [ ] added new unit test(s) +- [ ] added new functional test(s) ## Potential risks - - -## Documentation - - -## Is hotfix candidate? - +_Any change is risky. Identify all risks you are aware of. If none, remove this section._ diff --git a/.github/actionlint-matcher.json b/.github/actionlint-matcher.json new file mode 100644 index 00000000000..4613e1617bf --- /dev/null +++ b/.github/actionlint-matcher.json @@ -0,0 +1,17 @@ +{ + "problemMatcher": [ + { + "owner": "actionlint", + "pattern": [ + { + "regexp": "^(?:\\x1b\\[\\d+m)?(.+?)(?:\\x1b\\[\\d+m)*:(?:\\x1b\\[\\d+m)*(\\d+)(?:\\x1b\\[\\d+m)*:(?:\\x1b\\[\\d+m)*(\\d+)(?:\\x1b\\[\\d+m)*: (?:\\x1b\\[\\d+m)*(.+?)(?:\\x1b\\[\\d+m)* \\[(.+?)\\]$", + "file": 1, + "line": 2, + "column": 3, + "message": 4, + "code": 5 + } + ] + } + ] +} diff --git a/.github/actions/build-binaries/action.yml b/.github/actions/build-binaries/action.yml new file mode 100644 index 00000000000..cb4b1fc585e --- /dev/null +++ b/.github/actions/build-binaries/action.yml @@ -0,0 +1,57 @@ +name: Build Binaries +description: Build Temporal binaries using GoReleaser + +inputs: + snapshot: + description: "Use snapshot mode (true) or release mode (false). Only applies to release command (build always uses snapshot)." + required: false + default: "true" + single-arch: + description: "Single architecture to build (amd64 or arm64, empty for all). Only used with build command, ignored if release is true." + required: false + default: "" + release: + description: "Use release command (true) or build command (false). When true, single-arch is ignored and snapshot is respected." + required: false + default: "false" + +runs: + using: composite + steps: + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + cache: true + + - name: Run GoReleaser (release) + if: inputs.release == 'true' + uses: goreleaser/goreleaser-action@v6 + with: + distribution: goreleaser + version: v2.13.1 + args: release ${{ inputs.snapshot == 'true' && '--snapshot --skip=publish' || '' }} --clean + env: + GITHUB_TOKEN: ${{ github.token }} + + - name: Run GoReleaser (build - all architectures) + if: inputs.release != 'true' && inputs.single-arch == '' + uses: goreleaser/goreleaser-action@v6 + with: + distribution: goreleaser + version: v2.13.1 + args: build --snapshot + env: + GITHUB_TOKEN: ${{ github.token }} + + - name: Run GoReleaser (build - single architecture) + if: inputs.release != 'true' && inputs.single-arch != '' + uses: goreleaser/goreleaser-action@v6 + with: + distribution: goreleaser + version: v2.13.1 + args: build --snapshot --single-target + env: + GITHUB_TOKEN: ${{ github.token }} + GOOS: linux + GOARCH: ${{ inputs.single-arch }} diff --git a/.github/actions/build-docker-images/action.yml b/.github/actions/build-docker-images/action.yml new file mode 100644 index 00000000000..dbb92a3dde8 --- /dev/null +++ b/.github/actions/build-docker-images/action.yml @@ -0,0 +1,177 @@ +name: Build Docker Images +description: | + Build Temporal Docker images from binaries. + + Prerequisites: + - The build-binaries action must run before this action to produce the binaries. + +inputs: + push: + description: "Push images to Docker Hub" + required: false + default: "false" + tag-latest: + description: "Tag images as latest" + required: false + default: "false" + platform: + description: "Single platform to build (e.g., linux/amd64)" + required: false + default: "" + load: + description: "Load image into local Docker daemon (only works with linux/amd64 - the runner architecture)" + required: false + default: "false" + cli-version: + description: "Temporal CLI version to download (uses default from build helper if empty)" + required: false + default: "" + alpine-tag: + description: "Alpine base image tag" + required: false + default: "" + dockerhub-username: + description: "Docker Hub username" + required: false + dockerhub-token: + description: "Docker Hub token" + required: false + +outputs: + branch-tag: + description: "Docker-safe branch tag (e.g., branch-main, branch-release-v1.30.0)" + value: ${{ steps.image-tags.outputs.tag }} + sha-tag: + description: "Docker SHA tag (e.g., sha-abc1234)" + value: ${{ steps.image-tags.outputs.sha }} + sha-full-tag: + description: "Docker full SHA tag (e.g., sha-abcdef123456...)" + value: ${{ steps.image-tags.outputs.sha-full }} + +runs: + using: composite + steps: + - name: Build docker-build-helper + shell: bash + working-directory: ${{ github.workspace }}/.github/actions/build-docker-images/scripts + run: | + go build -o docker-build-helper . + + - name: Set image tags + id: image-tags + shell: bash + working-directory: ${{ github.workspace }} + run: .github/actions/build-docker-images/scripts/docker-build-helper set-image-tags + + - name: Organize binaries for Docker + id: organize-binaries + shell: bash + working-directory: ${{ github.workspace }} + env: + PLATFORM: ${{ inputs.platform }} + run: | + .github/actions/build-docker-images/scripts/docker-build-helper organize-binaries + + - name: Download Temporal CLI + shell: bash + working-directory: ${{ github.workspace }} + env: + AVAILABLE_ARCHS: ${{ steps.organize-binaries.outputs.available-archs }} + CLI_VERSION: ${{ inputs.cli-version }} + run: | + .github/actions/build-docker-images/scripts/docker-build-helper download-cli + + - name: Extract CLI version from binary + id: extract-cli-version + shell: bash + working-directory: ${{ github.workspace }} + run: | + .github/actions/build-docker-images/scripts/docker-build-helper extract-binary-version temporal cli-version + + - name: Extract server version from binary + id: extract-version + shell: bash + working-directory: ${{ github.workspace }} + run: | + .github/actions/build-docker-images/scripts/docker-build-helper extract-binary-version temporal-server server-version + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + if: inputs.push == 'true' + uses: docker/login-action@v3 + with: + username: ${{ inputs.dockerhub-username }} + password: ${{ inputs.dockerhub-token }} + + - name: Build Docker images + if: inputs.push != 'true' + shell: bash + working-directory: ${{ github.workspace }} + env: + IMAGE_REPO: temporaliotest + IMAGE_SHA_SHORT_TAG: ${{ steps.image-tags.outputs.sha }} + IMAGE_SHA_FULL_TAG: ${{ steps.image-tags.outputs.sha-full }} + IMAGE_BRANCH_TAG: ${{ steps.image-tags.outputs.tag }} + TEMPORAL_SHA: ${{ steps.image-tags.outputs.git-sha }} + TAG_LATEST: ${{ inputs.tag-latest }} + ALPINE_TAG: ${{ inputs.alpine-tag }} + PLATFORM: ${{ inputs.platform }} + LOAD: ${{ inputs.load }} + SERVER_VERSION: ${{ steps.extract-version.outputs.server-version }} + CLI_VERSION: ${{ steps.extract-cli-version.outputs.cli-version }} + run: | + if [ -z "${ALPINE_TAG}" ]; then + unset ALPINE_TAG + fi + + if [ -n "${PLATFORM}" ]; then + docker buildx bake \ + --set "*.platform=${PLATFORM}" \ + $( [ "${LOAD}" = "true" ] && echo --load ) \ + -f docker/docker-bake.hcl \ + server admin-tools + else + docker buildx bake \ + $( [ "${LOAD}" = "true" ] && echo --load ) \ + -f docker/docker-bake.hcl \ + server admin-tools + fi + + - name: Build and push Docker images + if: inputs.push == 'true' + shell: bash + working-directory: ${{ github.workspace }} + env: + IMAGE_REPO: temporaliotest + IMAGE_SHA_SHORT_TAG: ${{ steps.image-tags.outputs.sha }} + IMAGE_SHA_FULL_TAG: ${{ steps.image-tags.outputs.sha-full }} + IMAGE_BRANCH_TAG: ${{ steps.image-tags.outputs.tag }} + TEMPORAL_SHA: ${{ steps.image-tags.outputs.git-sha }} + TAG_LATEST: ${{ inputs.tag-latest }} + ALPINE_TAG: ${{ inputs.alpine-tag }} + SERVER_VERSION: ${{ steps.extract-version.outputs.server-version }} + CLI_VERSION: ${{ steps.extract-cli-version.outputs.cli-version }} + run: | + if [ -z "${ALPINE_TAG}" ]; then + unset ALPINE_TAG + fi + + for attempt in 1 2 3; do + echo "Docker build/push attempt $attempt/3" + if timeout 1200 docker buildx bake \ + --push \ + -f docker/docker-bake.hcl \ + server admin-tools; then + exit 0 + fi + if [ "$attempt" -lt 3 ]; then + echo "Attempt $attempt failed, retrying in 30s..." + sleep 30 + fi + done + exit 1 diff --git a/.github/actions/build-docker-images/scripts/.gitignore b/.github/actions/build-docker-images/scripts/.gitignore new file mode 100644 index 00000000000..f67e892977c --- /dev/null +++ b/.github/actions/build-docker-images/scripts/.gitignore @@ -0,0 +1 @@ +docker-build-helper diff --git a/.github/actions/build-docker-images/scripts/main.go b/.github/actions/build-docker-images/scripts/main.go new file mode 100644 index 00000000000..b609d8c6179 --- /dev/null +++ b/.github/actions/build-docker-images/scripts/main.go @@ -0,0 +1,637 @@ +package main + +import ( + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" +) + +var validArchs = []string{"amd64", "arm64"} + +// defaultCliVersion should be updated to the latest cli version +const defaultCliVersion = "1.6.1" + +func main() { + if len(os.Args) < 2 { + fmt.Fprintf(os.Stderr, "Usage: %s \n", os.Args[0]) + fmt.Fprintf(os.Stderr, "Commands:\n") + fmt.Fprintf(os.Stderr, " set-image-tags - Generate Docker image tags from branch and SHA\n") + fmt.Fprintf(os.Stderr, " organize-binaries - Organize binaries for Docker\n") + fmt.Fprintf(os.Stderr, " download-cli - Download Temporal CLI\n") + fmt.Fprintf(os.Stderr, " extract-binary-version - Extract version from a binary\n") + os.Exit(1) + } + + command := os.Args[1] + + switch command { + case "set-image-tags": + if err := setImageTags(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + case "organize-binaries": + if err := organizeBinaries(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + case "download-cli": + if err := downloadCLI(); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + case "extract-binary-version": + if len(os.Args) != 4 { + fmt.Fprintf(os.Stderr, "Usage: %s extract-binary-version \n", os.Args[0]) + os.Exit(1) + } + if err := extractBinaryVersion(os.Args[2], os.Args[3]); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } + default: + fmt.Fprintf(os.Stderr, "Unknown command: %s\n", command) + os.Exit(1) + } +} + +// resolveGitInfo resolves the current git ref and SHA from the working tree. +func resolveGitInfo() (ref string, sha string, err error) { + shaCmd := exec.Command("git", "rev-parse", "HEAD") + shaOut, err := shaCmd.Output() + if err != nil { + return "", "", fmt.Errorf("failed to resolve git SHA: %w", err) + } + sha = strings.TrimSpace(string(shaOut)) + + // Use the symbolic ref (branch name) if available, otherwise fall back + // to a tag name or the raw SHA. + refCmd := exec.Command("git", "symbolic-ref", "HEAD") + if refOut, err := refCmd.Output(); err == nil { + ref = strings.TrimSpace(string(refOut)) + } else { + tagCmd := exec.Command("git", "describe", "--tags", "--exact-match", "HEAD") + if tagOut, tagErr := tagCmd.Output(); tagErr == nil { + ref = strings.TrimSpace(string(tagOut)) + } else { + ref = sha + } + } + + return ref, sha, nil +} + +// setImageTags generates Docker image tags from branch name and commit SHA +func setImageTags() error { + ref, sha, err := resolveGitInfo() + if err != nil { + return err + } + + // Remove refs/heads/ or refs/tags/ prefix + ref = strings.TrimPrefix(ref, "refs/heads/") + ref = strings.TrimPrefix(ref, "refs/tags/") + + // Sanitize ref name first + // Replace any non-alphanumeric (except .-_) with dash + reg := regexp.MustCompile(`[^a-zA-Z0-9._-]`) + sanitizedRef := reg.ReplaceAllString(ref, "-") + + // Collapse multiple consecutive dashes + multiDashReg := regexp.MustCompile(`-+`) + sanitizedRef = multiDashReg.ReplaceAllString(sanitizedRef, "-") + + // Remove leading and trailing dashes + sanitizedRef = strings.Trim(sanitizedRef, "-") + + // Prefix with "branch-" for branch builds + safeTag := fmt.Sprintf("branch-%s", sanitizedRef) + + // Docker tags must be lowercase + safeTag = strings.ToLower(safeTag) + + // Truncate to 128 characters (Docker tag limit) + if len(safeTag) > 128 { + safeTag = safeTag[:128] + } + + if safeTag == "" { + return fmt.Errorf("failed to generate valid Docker tag from branch name") + } + + // Generate SHA tags. Keep the short tag for compatibility and add the full + // SHA tag to avoid collisions for automation. + shortSha := sha + if len(shortSha) > 7 { + shortSha = shortSha[:7] + } + shaTag := fmt.Sprintf("sha-%s", shortSha) + fullShaTag := fmt.Sprintf("sha-%s", sha) + + fmt.Printf("Original: %s\n", ref) + fmt.Printf("Sanitized: %s\n", safeTag) + fmt.Printf("Short SHA tag: %s\n", shaTag) + fmt.Printf("Full SHA tag: %s\n", fullShaTag) + + // Set outputs for GitHub Actions + if err := setOutput("tag", safeTag); err != nil { + return fmt.Errorf("failed to set tag output: %w", err) + } + if err := setOutput("sha", shaTag); err != nil { + return fmt.Errorf("failed to set sha output: %w", err) + } + if err := setOutput("sha-full", fullShaTag); err != nil { + return fmt.Errorf("failed to set sha-full output: %w", err) + } + if err := setOutput("git-sha", sha); err != nil { + return fmt.Errorf("failed to set git-sha output: %w", err) + } + + return nil +} + +// organizeBinaries organizes binaries for Docker builds +func organizeBinaries() error { + // Determine target architectures based on PLATFORM environment variable + platform := os.Getenv("PLATFORM") + var archs []string + + if platform != "" { + // Parse platform (e.g., "linux/amd64" -> "amd64") + parts := strings.Split(platform, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid platform format: %s (expected format: os/arch)", platform) + } + arch := parts[1] + + // Check if arch is in valid list + found := false + for _, validArch := range validArchs { + if arch == validArch { + found = true + break + } + } + if !found { + return fmt.Errorf("architecture %s not in supported list: %v", arch, validArchs) + } + + archs = []string{arch} + fmt.Printf("Single architecture build: %s\n", arch) + } else { + // Default to all architectures + archs = []string{"amd64", "arm64"} + fmt.Println("Multi-architecture build: amd64, arm64") + } + + // Admin tool binaries (for admin-tools image) + adminToolBinaries := []string{ + "temporal-cassandra-tool", + "temporal-sql-tool", + "temporal-elasticsearch-tool", + "tdbg", + } + + // Server binaries (for server image) + serverBinaries := []string{ + "temporal-server", + } + + // All binaries to copy + binaries := append(adminToolBinaries, serverBinaries...) + + // Validate architecture and binary names + archReg := regexp.MustCompile(`^[a-z0-9]+$`) + for _, arch := range archs { + if !archReg.MatchString(arch) { + return fmt.Errorf("invalid architecture name: %s", arch) + } + } + + binReg := regexp.MustCompile(`^[a-z0-9-]+$`) + for _, binary := range binaries { + if !binReg.MatchString(binary) { + return fmt.Errorf("invalid binary name: %s", binary) + } + } + + // Create architecture directories + for _, arch := range archs { + dir := filepath.Join("docker", "build", arch) + if err := validatePath(dir, "docker/build"); err != nil { + return err + } + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("failed to create directory %s: %w", dir, err) + } + } + + // Map GoReleaser dist structure to build structure + // GoReleaser adds version suffixes: amd64_v1 (GOAMD64=v1), arm64_v8.0 (GOARM64=v8.0) + archMap := map[string]string{ + "amd64": "amd64_v1", + "arm64": "arm64_v8.0", + } + + // Copy binaries + for _, binary := range binaries { + for _, arch := range archs { + distArch := archMap[arch] + distPath := filepath.Join("dist", fmt.Sprintf("%s_linux_%s", binary, distArch), binary) + buildPath := filepath.Join("docker", "build", arch, binary) + + // Validate paths before file operations + if err := validatePath(distPath, "dist"); err != nil { + return fmt.Errorf("invalid dist path: %w", err) + } + if err := validatePath(buildPath, "docker/build"); err != nil { + return fmt.Errorf("invalid build path: %w", err) + } + + if _, err := os.Stat(distPath); err == nil { + if err := copyFile(distPath, buildPath); err != nil { + return fmt.Errorf("failed to copy %s to %s: %w", distPath, buildPath, err) + } + if err := os.Chmod(buildPath, 0755); err != nil { + return fmt.Errorf("failed to chmod %s: %w", buildPath, err) + } + fmt.Printf("Copied %s -> %s\n", distPath, buildPath) + } else { + printDirectoryContents("dist") + return fmt.Errorf("binary not found: %s for architecture %s (expected at %s)", binary, arch, distPath) + } + } + } + + // Copy schema directory for admin-tools + schemaDir := filepath.Join("docker", "build", "temporal", "schema") + if err := validatePath(schemaDir, "docker/build"); err != nil { + return err + } + if err := os.MkdirAll(schemaDir, 0755); err != nil { + return fmt.Errorf("failed to create schema directory: %w", err) + } + + // Copy all schema files recursively with path validation + if _, err := os.Stat("schema"); err == nil { + if err := copyRecursive("schema", schemaDir); err != nil { + return fmt.Errorf("failed to copy schema directory: %w", err) + } + fmt.Println("Copied schema directory") + } + + // Validate required binaries for Docker images + fmt.Println("\nValidating required binaries for Docker images...") + + // Check which architectures have binaries + var availableArchs []string + for _, arch := range archs { + testBinary := filepath.Join("docker", "build", arch, "temporal-server") + if _, err := os.Stat(testBinary); err == nil { + availableArchs = append(availableArchs, arch) + } + } + + if len(availableArchs) == 0 { + return fmt.Errorf("❌ No binaries found for any architecture") + } + + fmt.Printf("Found binaries for architectures: %s\n", strings.Join(availableArchs, ", ")) + + // Validate that each available architecture has all required binaries + missingFiles := false + for _, arch := range availableArchs { + for _, binary := range binaries { + binaryPath := filepath.Join("docker", "build", arch, binary) + if _, err := os.Stat(binaryPath); err != nil { + fmt.Fprintf(os.Stderr, "Error: Missing %s\n", binaryPath) + missingFiles = true + } + } + } + + // Validate schema directory exists + if _, err := os.Stat(filepath.Join("docker", "build", "temporal", "schema")); err != nil { + fmt.Fprintln(os.Stderr, "Error: Missing docker/build/temporal/schema directory") + missingFiles = true + } + + if missingFiles { + return fmt.Errorf("❌ Binary validation failed") + } + + fmt.Println("✓ All required binaries present for available architectures") + + // Export available architectures for Docker build + if err := setOutput("available-archs", strings.Join(availableArchs, ",")); err != nil { + return fmt.Errorf("failed to set output: %w", err) + } + + return nil +} + +// downloadCLI downloads the Temporal CLI for available architectures +func downloadCLI() error { + // Get available architectures from environment or input + availableArchsStr := os.Getenv("AVAILABLE_ARCHS") + if availableArchsStr == "" { + return fmt.Errorf("AVAILABLE_ARCHS environment variable not set") + } + + availableArchs := strings.Split(availableArchsStr, ",") + + // Filter to only valid architectures + var validAvailableArchs []string + for _, arch := range availableArchs { + arch = strings.TrimSpace(arch) + for _, validArch := range validArchs { + if arch == validArch { + validAvailableArchs = append(validAvailableArchs, arch) + break + } + } + } + + if len(validAvailableArchs) == 0 { + return fmt.Errorf("no valid architectures found in: %s", availableArchsStr) + } + + for _, arch := range validAvailableArchs { + if err := downloadCLIForArch(arch); err != nil { + return fmt.Errorf("failed to download CLI for %s: %w", arch, err) + } + } + + return nil +} + +func downloadCLIForArch(arch string) error { + cliVersion := os.Getenv("CLI_VERSION") + if cliVersion == "" { + cliVersion = defaultCliVersion + } + + tarballName := fmt.Sprintf("temporal_cli_%s_linux_%s.tar.gz", cliVersion, arch) + downloadURL := fmt.Sprintf("https://github.com/temporalio/cli/releases/download/v%s/%s", cliVersion, tarballName) + + fmt.Printf("Downloading Temporal CLI v%s for %s from %s\n", cliVersion, arch, downloadURL) + + tempDir := filepath.Join(os.TempDir(), fmt.Sprintf("temporal-cli-%s", arch)) + tarballPath := filepath.Join(os.TempDir(), tarballName) + + // Download tarball + if err := downloadFile(downloadURL, tarballPath); err != nil { + return fmt.Errorf("failed to download: %w", err) + } + defer os.Remove(tarballPath) + + // Create temp directory + if err := os.MkdirAll(tempDir, 0755); err != nil { + return fmt.Errorf("failed to create temp directory: %w", err) + } + defer os.RemoveAll(tempDir) + + // Extract tarball + cmd := exec.Command("tar", "-xzf", tarballPath, "-C", tempDir) + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("failed to extract: %w\nOutput: %s", err, string(output)) + } + + // Move to build directory + destDir := filepath.Join("docker", "build", arch) + if err := validatePath(destDir, "docker/build"); err != nil { + return fmt.Errorf("invalid build directory path: %w", err) + } + if err := os.MkdirAll(destDir, 0755); err != nil { + return fmt.Errorf("failed to create build directory: %w", err) + } + + sourcePath := filepath.Join(tempDir, "temporal") + destPath := filepath.Join(destDir, "temporal") + + if err := validatePath(destPath, "docker/build"); err != nil { + return fmt.Errorf("invalid destination path: %w", err) + } + + if err := os.Rename(sourcePath, destPath); err != nil { + // If rename fails (e.g., cross-device), try copy and delete + if err := copyFile(sourcePath, destPath); err != nil { + return fmt.Errorf("failed to copy binary: %w", err) + } + os.Remove(sourcePath) + } + + if err := os.Chmod(destPath, 0755); err != nil { + return fmt.Errorf("failed to chmod binary: %w", err) + } + + fmt.Printf("Installed Temporal CLI to %s\n", destPath) + + return nil +} + +// findBuildBinary finds a binary by name in the docker/build/{arch}/ directories. +func findBuildBinary(name string) (string, error) { + for _, arch := range validArchs { + candidatePath := filepath.Join("docker", "build", arch, name) + if _, err := os.Stat(candidatePath); err == nil { + return candidatePath, nil + } + } + return "", fmt.Errorf("%s binary not found in docker/build/{amd64,arm64}/", name) +} + +// extractBinaryVersion finds a binary, runs --version, parses the output, and sets a GitHub Actions output. +func extractBinaryVersion(binaryName, outputName string) error { + binaryPath, err := findBuildBinary(binaryName) + if err != nil { + return err + } + + fmt.Printf("Extracting version from %s\n", binaryPath) + + cmd := exec.Command(binaryPath, "--version") + output, err := cmd.Output() + if err != nil { + return fmt.Errorf("failed to run %s --version: %w", binaryPath, err) + } + + version, err := parseTemporalVersion(string(output)) + if err != nil { + return err + } + fmt.Printf("Extracted version: %s\n", version) + + if err := setOutput(outputName, version); err != nil { + return fmt.Errorf("failed to set output: %w", err) + } + + return nil +} + +// parseTemporalVersion extracts the version from output like +// "temporal version 1.29.0" or "temporal version 0.0.0-DEV (Server 1.30.1, UI 2.45.3)" +func parseTemporalVersion(output string) (string, error) { + s := strings.TrimSpace(output) + re := regexp.MustCompile(`^temporal version\s+(\d+\.\d+\.\d+\S*)`) + matches := re.FindStringSubmatch(s) + if len(matches) < 2 { + return "", fmt.Errorf("failed to parse version from output: %s", s) + } + return matches[1], nil +} + +// Helper functions + +func setOutput(name, value string) error { + outputFile := os.Getenv("GITHUB_OUTPUT") + if outputFile == "" { + return fmt.Errorf("GITHUB_OUTPUT environment variable not set") + } + + f, err := os.OpenFile(outputFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + _, err = fmt.Fprintf(f, "%s=%s\n", name, value) + return err +} + +func validatePath(path, allowedPrefix string) error { + // Clean and resolve paths + normalized := filepath.Clean(path) + resolved, err := filepath.Abs(normalized) + if err != nil { + return fmt.Errorf("failed to resolve path: %w", err) + } + + allowedResolved, err := filepath.Abs(allowedPrefix) + if err != nil { + return fmt.Errorf("failed to resolve allowed prefix: %w", err) + } + + // Check for path traversal + if strings.Contains(normalized, "..") { + return fmt.Errorf("path traversal detected in: %s", path) + } + + // Ensure path is within allowed directory + if !strings.HasPrefix(resolved, allowedResolved) { + return fmt.Errorf("path outside allowed directory: %s", path) + } + + return nil +} + +func copyFile(src, dst string) error { + sourceFile, err := os.Open(src) + if err != nil { + return err + } + defer sourceFile.Close() + + destFile, err := os.Create(dst) + if err != nil { + return err + } + defer destFile.Close() + + _, err = io.Copy(destFile, sourceFile) + return err +} + +func copyRecursive(src, dst string) error { + // Validate paths + if err := validatePath(src, "schema"); err != nil { + return err + } + if err := validatePath(dst, "docker/build"); err != nil { + return err + } + + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + + if srcInfo.IsDir() { + // Create destination directory + if err := os.MkdirAll(dst, 0755); err != nil { + return err + } + + // Read directory entries + entries, err := os.ReadDir(src) + if err != nil { + return err + } + + // Copy each entry + for _, entry := range entries { + // Validate item name to prevent directory traversal + if strings.Contains(entry.Name(), "..") || strings.Contains(entry.Name(), "/") || strings.Contains(entry.Name(), "\\") { + return fmt.Errorf("invalid file name: %s", entry.Name()) + } + + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + + if err := copyRecursive(srcPath, dstPath); err != nil { + return err + } + } + } else { + // Copy file + if err := copyFile(src, dst); err != nil { + return err + } + } + + return nil +} + +func downloadFile(url, fpath string) error { + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("bad status: %s", resp.Status) + } + + out, err := os.Create(fpath) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, resp.Body) + return err +} + +func printDirectoryContents(dir string) { + fmt.Fprintf(os.Stderr, "\nContents of %s directory:\n", dir) + err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil // skip errors + } + if info.IsDir() { + fmt.Fprintf(os.Stderr, " %s/\n", path) + } else { + fmt.Fprintf(os.Stderr, " %s\n", path) + } + return nil + }) + if err != nil { + fmt.Fprintf(os.Stderr, " (failed to list %s directory: %v)\n", dir, err) + } +} diff --git a/.github/actions/build-docker-images/scripts/main_test.go b/.github/actions/build-docker-images/scripts/main_test.go new file mode 100644 index 00000000000..7e431e7759c --- /dev/null +++ b/.github/actions/build-docker-images/scripts/main_test.go @@ -0,0 +1,78 @@ +package main + +import ( + "testing" +) + +func TestParseTemporalVersion(t *testing.T) { + tests := []struct { + name string + output string + want string + wantErr bool + }{ + { + name: "server version", + output: "temporal version 1.29.0", + want: "1.29.0", + }, + { + name: "cli release version", + output: "temporal version 1.6.0 (Server 1.30.0, UI 2.45.0)", + want: "1.6.0", + }, + { + name: "cli dev version", + output: "temporal version 0.0.0-DEV (Server 1.30.1, UI 2.45.3)", + want: "0.0.0-DEV", + }, + { + name: "with trailing newline", + output: "temporal version 1.6.0 (Server 1.30.0, UI 2.45.0)\n", + want: "1.6.0", + }, + { + name: "with leading and trailing whitespace", + output: " \n temporal version 1.6.0 (Server 1.30.0, UI 2.45.0) \n ", + want: "1.6.0", + }, + { + name: "pre-release version", + output: "temporal version 1.31.0-151.5", + want: "1.31.0-151.5", + }, + { + name: "extra spaces between version and number", + output: "temporal version 1.6.1", + want: "1.6.1", + }, + { + name: "empty output", + output: "", + wantErr: true, + }, + { + name: "unexpected format", + output: "something else entirely", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseTemporalVersion(tt.output) + if tt.wantErr { + if err == nil { + t.Fatalf("expected error, got version %q", got) + } + } else { + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got != tt.want { + t.Fatalf("got %q, want %q", got, tt.want) + } + } + }) + } +} diff --git a/.github/actions/get-job-id/action.yml b/.github/actions/get-job-id/action.yml new file mode 100644 index 00000000000..6605bb53e4d --- /dev/null +++ b/.github/actions/get-job-id/action.yml @@ -0,0 +1,73 @@ +name: Get Job ID +description: Resolve the numeric job ID for a job name within a workflow run +# NOTE: +# - The GitHub API returns jobs keyed by their display name (the job's `name:` field), +# not the YAML job key. The `inputs.job_name` you pass here MUST exactly match +# the job's display name as it appears in the workflow run UI. +# - For matrix jobs with identical names, include matrix parameters in the name +# (e.g., "Functional test xdc (${matrix.name})") to make each instance unique. +inputs: + job_name: + description: Exact job name to search for + required: true + run_id: + description: The workflow run ID to search within + required: true +outputs: + job_id: + description: The numeric job ID + value: ${{ steps.get_id.outputs.job_id }} +runs: + using: composite + steps: + - id: get_id + shell: bash + env: + GITHUB_TOKEN: ${{ github.token }} + JOB_NAME: ${{ inputs.job_name }} + RUN_ID: ${{ inputs.run_id }} + # RUNNER_NAME helps disambiguate if multiple jobs share the same name. + # The script will first try to match by (name, runner_name), then fall back to name-only. + run: | + set -euo pipefail + + job_url="https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/runs/${RUN_ID}/jobs?per_page=100" + max_retries=6 + job_id="" + json="" + + for ((attempt = 1; attempt <= max_retries; attempt++)); do + json=$(curl -sSL \ + -H "Accept: application/vnd.github+json" \ + -H "Authorization: Bearer $GITHUB_TOKEN" \ + "$job_url") + + # Prefer matching both job name and the current runner name to disambiguate matrix jobs + job_id=$(jq -r --arg name "$JOB_NAME" --arg runner "$RUNNER_NAME" ' + (.jobs // []) + | map(select(.name == $name and (.runner_name // "") == $runner)) + | (.[0].id // empty) + ' <<< "$json" ) + + # Fallback: match by name only + if [ -z "${job_id:-}" ]; then + job_id=$(jq -r --arg name "$JOB_NAME" ' + (.jobs // []) | map(select(.name == $name)) | (.[0].id // empty) + ' <<< "$json" ) + fi + + if [ -n "${job_id:-}" ] && [ "$job_id" != "null" ]; then + break + fi + + if [ "$attempt" -lt "$max_retries" ]; then + echo "::notice::Job ID for '$JOB_NAME' not visible yet (attempt $attempt/$max_retries); retrying in 5s" + sleep 5 + fi + done + + if [ -z "${job_id:-}" ] || [ "$job_id" = "null" ]; then + echo "::error::Failed to resolve job ID for name '$JOB_NAME' on runner '$RUNNER_NAME' in run '$RUN_ID' after retries" >&2 + exit 1 + fi + echo "job_id=$job_id" >> "$GITHUB_OUTPUT" diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 00000000000..8bbb22cbc4c --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,82 @@ +# Code Review Guidelines + +Apply these patterns when reviewing PRs or suggesting code changes. + +## 1. Remove Redundant Code (Highest Priority) + +- Remove code that doesn't add value to tests or implementation +- Don't add unnecessary activities/complexity in tests - test only what you need +- Question randomness in tests - test explicitly what you want +- Don't add assertions for things you can assume work (e.g., "You are not testing TerminateWorkflowExecution here, you can assume it works") +- Remove redundant nil checks after you just set a value +- Do not export anything that doesn't need to be exported + +## 2. Go Naming Conventions + +- Don't use `Get` prefix for getters: `func (a *Activity) Store()` not `GetStore()` +- Don't use `Impl` suffix for implementations +- Don't put underscore after `Test` in test names: `TestRetry` not `Test_Retry` +- Avoid stuttering: don't use `ActivityStatus` in package `activity`, just `Status` +- Use `ok` boolean pattern instead of nil checks where idiomatic + +## 3. Testify Suite Correctness and Reliability + +- Never use `s.T()` in subtests - use the subtest's `t` parameter +- Never use suite assertion methods (`s.NoError`, `s.Equal`) from goroutines - causes panics +- Use `EventuallyWithT` when you need assertions inside eventually blocks, and use that block's `t` +- Use `require.ErrorAs(t, err, &specificErr)` for specific error type checks +- Prefer `require` over `assert` - it's rarely useful to continue a test after a failed assertion +- Add comments explaining why `Eventually` is needed (e.g., eventual consistency) +- Do not use single-value type assertions on errors (`err.(*T)`); this panics instead of failing the test when the type doesn't match. Use `errors.As` with a guarded return. +- When launching a goroutine to maintain a precondition for later assertions (e.g., keeping pollers active so a deployment version gets registered), loop until context cancellation rather than running once. A single attempt that times out exits silently, leaving downstream Eventually/propagation waits to hang until their own deadline. +- Never call testify assertions (`s.NoError`, `s.Equal`, `require.NoError`, even `assert.NoError`) inside a `go func()` — if the goroutine outlives the test, the assertion panics the binary with `panic: Fail in goroutine after TestXxx has completed`. Move assertions to the test goroutine or use a buffered error channel. +- Any `<-ch` that isn't inside a `select` with `ctx.Done()` will hang indefinitely if the sender never sends. Always provide a context cancellation fallback. +- Never write to package-level or global variables in tests — parallel tests share the same process; thread values through function parameters instead. +- Never use `time.Sleep` or `time.Since(start) > threshold` to enforce ordering — use channels, `sync.WaitGroup`, or `EventuallyWithT` instead. +- When using `EventuallyWithT` (or similar) to wait for a condition driven by a background goroutine, ensure the goroutine's timeout is longer than the `EventuallyWithT` deadline — if the background op times out first, the condition will never be satisfied and the wait will hang until its own deadline. +- Do not silently discard errors from precondition operations with `_, _ = f()` — if `f()` failing invalidates the rest of the test, surface the error or loop until it succeeds. +- Be suspicious of `go s.someHelper(ctx, ...)` calls where the goroutine runs exactly once and the test then immediately waits for something that helper was supposed to cause. If the operation can fail transiently (network, tight deadline, busy CI), the single attempt may fail silently and the wait will never succeed. Either loop the goroutine until `ctx.Done()`, or check that the operation succeeded before proceeding. + +## 4. Inline Code / Avoid Abstractions + +- Repeat strings instead of adding constants for single use +- Inline struct field assignments when possible +- Avoid unnecessary wrapper types and generic structs +- Don't add dependencies for 5 lines of code - "just write 5 lines of code instead of adding more dependency bloat" +- Don't create testsuite-level helpers that can't be safely used in subtests +- Prefer explicit code over reflection + +## 5. Proper Error Handling + +- Use standard error types (`InvalidArgument`, `NotFound`, `FailedPrecondition`) over custom error types +- Mark errors as non-retryable when task shouldn't retry in queue +- Wrap errors with context when there's something interesting or informative to add, e.g. `fmt.Errorf("multi-operation part 2: %w", err)` +- Don't panic in library code - return errors and let caller decide +- Validate early in handlers, not deep in business logic +- Use `errors.AsType` instead of `errors.As` +- Use `require.ErrorContains` instead of two separate assertions (`require.Error` + `require.Contains`) + +## 6. Consistency with Codebase + +- Follow existing patterns: "We have been passing through the frontend request in other libraries. Let's keep the same pattern here" +- Use existing utilities before creating new ones +- Follow CLI documentation conventions (capitalize proper nouns) +- Match existing metric tag formats (CONSTANT_CASE for enum values) +- Use the same error message style (no punctuation for single sentences) + +## 7. API and Proto Design + +- Document all proto fields with comments +- Use proper field names: `request_id` not `requestId`, `schedule_time` not `scheduledTime` +- Don't expose internal concepts in user-facing errors: "LowCardinalityKeyword is not a user facing concept" +- Accept event attributes structs instead of growing function signatures +- Prefer enums over int/string for well-known values + +## 8. Concurrency and Safety + +- Prefer immutable data patterns (for normal structs and especially proto messages) to avoid data races and synchronization +- Default to `sync.Mutex` for synchronization; atomics are an advanced tool for specific patterns or performance concerns +- Prefer `sync.Mutex` over `sync.RWMutex` almost always, except when reads are much more common than writes (>1000×) or readers hold the lock for significant time +- Don't do IO while holding locks - use side effect tasks +- Clone data before releasing locks if it might be modified +- Proto message fields accessed outside the workflow lock must be cloned, not aliased: use `common.CloneProto(...)` rather than returning the pointer directly. diff --git a/.github/workflows/auto-approve-cicd-release-pr.yml b/.github/workflows/auto-approve-cicd-release-pr.yml new file mode 100644 index 00000000000..d73cbcef1dd --- /dev/null +++ b/.github/workflows/auto-approve-cicd-release-pr.yml @@ -0,0 +1,17 @@ +name: Auto-approve temporal-cicd[bot] PRs on cloud release branches +on: + pull_request: + branches: + - cloud/** +permissions: + contents: write + pull-requests: write +jobs: + auto_approve: + runs-on: ubuntu-latest + if: ${{ github.actor == 'temporal-cicd[bot]' }} + steps: + - name: Approve PR + run: gh pr review --approve ${{ github.event.pull_request.html_url }} + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/build-and-publish.yml b/.github/workflows/build-and-publish.yml new file mode 100644 index 00000000000..3d636f6d45b --- /dev/null +++ b/.github/workflows/build-and-publish.yml @@ -0,0 +1,63 @@ +name: Build and Publish + +on: + push: + branches: + - main + - cloud/* + - feature/* + - release/* + +permissions: + contents: read + +jobs: + build-and-push-docker: + runs-on: ubuntu-latest + timeout-minutes: 90 + # Only push for main, cloud, release branches (not feature) + if: | + github.ref == 'refs/heads/main' || + startsWith(github.ref, 'refs/heads/cloud/') || + startsWith(github.ref, 'refs/heads/release/') + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Build binaries + uses: ./.github/actions/build-binaries + with: + snapshot: true + + - name: Build and push Docker images + uses: ./.github/actions/build-docker-images + with: + push: true + tag-latest: ${{ github.ref == 'refs/heads/main' }} + dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} + + # For feature branches, just build (no push) + build-docker-feature: + runs-on: ubuntu-latest + if: startsWith(github.ref, 'refs/heads/feature/') + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Build binaries + uses: ./.github/actions/build-binaries + with: + snapshot: true + + - name: Build Docker images + uses: ./.github/actions/build-docker-images + with: + push: false + tag-latest: false diff --git a/.github/workflows/check-pr-placeholders.yml b/.github/workflows/check-pr-placeholders.yml new file mode 100644 index 00000000000..51ade33a61c --- /dev/null +++ b/.github/workflows/check-pr-placeholders.yml @@ -0,0 +1,59 @@ +name: Validate PR description for placeholder lines or empty sections + +on: + pull_request: + types: [opened, edited, synchronize, reopened] + +permissions: + pull-requests: read + +jobs: + validate-pr-description: + runs-on: ubuntu-latest + + steps: + - name: Validate PR description for placeholder lines or empty sections + uses: actions/github-script@v8 + with: + script: | + const pr = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: context.payload.pull_request.number + }); + + const body = pr.data.body || ''; + const lines = body.split(/\r?\n/); + + let violations = []; + + // Detect placeholder lines: entire line starts and ends with _ + lines.forEach((line, idx) => { + if (/^_.*_$/.test(line.trim())) { + violations.push(`Line ${idx + 1}: Placeholder "${line.trim()}"`); + } + }); + + // Detect empty sections: look for headers like '## Why?' followed by no meaningful content + const requiredSections = ['## What changed?', '## Why?', '## How did you test it?']; + requiredSections.forEach((header) => { + const idx = lines.findIndex(line => line.trim().toLowerCase() === header.toLowerCase()); + if (idx !== -1) { + let contentIdx = idx + 1; + while (contentIdx < lines.length && lines[contentIdx].trim() === '') { + contentIdx++; + } + const nextLine = lines[contentIdx]?.trim(); + if (!nextLine || /^## /.test(nextLine)) { + violations.push(`Section "${header}" appears to be empty.`); + } + } + }); + + if (violations.length > 0) { + console.log("❌ PR description issues found:"); + violations.forEach(v => console.log(`- ${v}`)); + core.setFailed(`PR description must not contain placeholders or empty sections.`); + } else { + console.log("✅ PR description passed all checks."); + } diff --git a/.github/workflows/check-release-dependencies.yml b/.github/workflows/check-release-dependencies.yml new file mode 100644 index 00000000000..7cdd4a4b928 --- /dev/null +++ b/.github/workflows/check-release-dependencies.yml @@ -0,0 +1,29 @@ +name: Check Release Dependencies +on: + pull_request: + branches: + - main + - "release/**" + - "cloud/**" + +permissions: + contents: read + +jobs: + check-dependencies: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v6 + + - name: Setup Go + uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + check-latest: true + cache: true + + - name: Validate dependency versions for PR base branch + run: >- + go run ./cmd/tools/check-dependencies + --base-branch "${{ github.event.pull_request.base.ref }}" diff --git a/.github/workflows/ci-success-report.yml b/.github/workflows/ci-success-report.yml new file mode 100644 index 00000000000..1538c4eadb7 --- /dev/null +++ b/.github/workflows/ci-success-report.yml @@ -0,0 +1,72 @@ +name: CI Success Report + +on: + schedule: + # Run on Tuesdays at noon Eastern time (5 PM UTC) + - cron: '0 17 * * 2' + workflow_dispatch: + inputs: + days: + description: 'Number of days to include in report' + required: false + default: '7' + type: string + workflow: + description: 'Workflow name to report on' + required: false + default: 'All Tests' + type: string + branch: + description: 'Branch to generate report for' + required: false + default: 'main' + type: string + +permissions: + contents: read + actions: read + +jobs: + ci-success-report: + runs-on: ubuntu-latest + steps: + - name: Generate token + id: generate_token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.TEMPORAL_CICD_APP_ID }} + private-key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} + owner: ${{ github.repository_owner }} + + - name: Checkout repository + uses: actions/checkout@v6 + with: + persist-credentials: false + token: ${{ steps.generate_token.outputs.token }} + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: 'go.mod' + + - name: Generate CI Success Report + env: + GH_TOKEN: ${{ steps.generate_token.outputs.token }} + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + DAYS_PARAM: ${{ github.event.inputs.days || '7' }} + WORKFLOW_PARAM: ${{ github.event.inputs.workflow || 'All Tests' }} + BRANCH_PARAM: ${{ github.event.inputs.branch || 'main' }} + run: | + set -euo pipefail + + echo "Branch: $BRANCH_PARAM" + echo "Workflow: $WORKFLOW_PARAM" + echo "Days: $DAYS_PARAM" + + go run ./cmd/tools/ci-notify/main.go digest \ + --branch "$BRANCH_PARAM" \ + --workflow "$WORKFLOW_PARAM" \ + --days "$DAYS_PARAM" \ + --slack-webhook "$SLACK_WEBHOOK" + + echo "Report sent successfully" diff --git a/.github/workflows/create-tag.yml b/.github/workflows/create-tag.yml deleted file mode 100644 index a9f7490a887..00000000000 --- a/.github/workflows/create-tag.yml +++ /dev/null @@ -1,120 +0,0 @@ -name: "Create a tag" - -on: - workflow_dispatch: - inputs: - branch: - description: "Branch to be tagged" - required: true - tag: - description: "Tag for new version (1.23.4)" - required: true - release_notes: - type: boolean - description: "Create draft release notes" - default: false - base_tag: - description: "Base tag to generate commit list for release notes" - required: false - -jobs: - create-tag: - name: "Create a tag" - runs-on: ubuntu-latest - - defaults: - run: - shell: bash - - steps: - - name: Generate token - id: generate_token - uses: actions/create-github-app-token@v1 - with: - app-id: ${{ secrets.TEMPORAL_CICD_APP_ID }} - private-key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} - - - name: Checkout - uses: actions/checkout@v4 - with: - persist-credentials: true - token: ${{ steps.generate_token.outputs.token }} - ref: ${{ github.event.inputs.branch }} - fetch-depth: 0 - fetch-tags: true - - - name: Set up Github credentials - run: | - git config --local user.name 'Temporal Data' - git config --local user.email 'commander-data@temporal.io' - - - name: Get current version - id: get_current_version - run: | - CURRENT_VERSION=$(grep '^\s*ServerVersion = ".*"$' common/headers/version_checker.go | sed 's/^.*"\(.*\)"$/\1/') - [ -z "$CURRENT_VERSION" ] && exit 1 - echo "CURRENT_VERSION=$CURRENT_VERSION" >> "$GITHUB_OUTPUT" - - - name: Update Server version - if: ${{ steps.get_current_version.outputs.CURRENT_VERSION != github.event.inputs.tag }} - env: - TAG: ${{ github.event.inputs.tag }} - BRANCH: ${{ github.event.inputs.branch }} - run: | - sed -i -e "s/ServerVersion = \".*\"$/ServerVersion = \"$TAG\"/g" common/headers/version_checker.go - git add . - git commit -m "Bump Server version to $TAG" - git push origin "$BRANCH" - - - name: Create and push tag - env: - TAG: 'v${{ github.event.inputs.tag }}' - BRANCH: ${{ github.event.inputs.branch }} - run: | - if [ -z "$(git tag -l $TAG)" ]; then - git tag "$TAG" - git push origin "$TAG" - elif [ "$(git rev-list -n 1 $TAG)" != "$(git rev-parse HEAD)" ]; then - echo "::error::Tag already exists and it doesn't reference current HEAD of branch $BRANCH" - exit 1 - fi - - - name: Create draft release notes - if: ${{ github.event.inputs.release_notes == 'true' }} - env: - GH_TOKEN: ${{ steps.generate_token.outputs.token }} - BASE_TAG: ${{ github.event.inputs.base_tag }} - TAG: 'v${{ github.event.inputs.tag}}' - run: | - if [ -z "$BASE_TAG" ] || [ -z "$(git tag -l $BASE_TAG)" ]; then - echo "::error::Base tag not specified or does not exist" - exit 1 - fi - - TEMPFILE=$(mktemp) - cat > $TEMPFILE <<- EOF - ## Breaking Changes - Document them here, if any - - ## Deprecation Announcements - Document them here, if any. - - ## Release Highlights - Add highlights if any. - - ### Helpful links to get you started with Temporal - [Temporal Docs](https://docs.temporal.io/) - [Server](https://github.com/temporalio/temporal) - [Docker Compose](https://github.com/temporalio/docker-compose) - [Helm Chart](https://github.com/temporalio/helm-charts) - - ### Docker images for this release (use the tag \`${TAG#v}\`) - [Server](https://hub.docker.com/repository/docker/temporalio/server) - [Server With Auto Setup](https://hub.docker.com/repository/docker/temporalio/auto-setup) ([what is Auto-Setup?](https://docs.temporal.io/blog/auto-setup)) - [Admin-Tools](https://hub.docker.com/repository/docker/temporalio/admin-tools) - - **Full Changelog**: https://github.com/temporalio/temporal/compare/${BASE_TAG}...${TAG} - EOF - - gh repo set-default ${{ github.repository }} - gh release create "$TAG" --verify-tag --draft --title "$TAG" -F $TEMPFILE diff --git a/.github/workflows/docker-build-manual.yml b/.github/workflows/docker-build-manual.yml new file mode 100644 index 00000000000..8d84da70503 --- /dev/null +++ b/.github/workflows/docker-build-manual.yml @@ -0,0 +1,89 @@ +name: Manual Docker Build +# Dispatch this workflow from the branch you want to build from. + +on: + workflow_dispatch: + inputs: + cli-version: + description: "Optional Temporal CLI version override (leave empty to use default)" + default: "" + alpine-tag: + description: "Optional Alpine base image tag override (leave empty to use default from docker-bake.hcl)" + default: "" + push: + description: "Push images to Docker Hub (temporaliotest/server and temporaliotest/admin-tools)" + required: true + type: boolean + default: false + tag-latest: + description: "Tag images as latest" + required: true + type: boolean + default: false + +permissions: + contents: read + +jobs: + build-docker: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - name: Build binaries + uses: ./.github/actions/build-binaries + with: + snapshot: true + + - name: Build Docker images + id: build-docker + if: ${{ !inputs.push }} + uses: ./.github/actions/build-docker-images + with: + push: false + tag-latest: ${{ inputs.tag-latest }} + alpine-tag: ${{ inputs.alpine-tag }} + cli-version: ${{ inputs.cli-version }} + + - name: Build and push Docker images + id: push-docker + if: ${{ inputs.push }} + uses: ./.github/actions/build-docker-images + with: + push: true + tag-latest: ${{ inputs.tag-latest }} + alpine-tag: ${{ inputs.alpine-tag }} + cli-version: ${{ inputs.cli-version }} + dockerhub-username: ${{ secrets.DOCKERHUB_USERNAME }} + dockerhub-token: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Output image tags + env: + SHA_TAG: ${{ steps.build-docker.outputs.sha-tag || steps.push-docker.outputs.sha-tag }} + SHA_FULL_TAG: ${{ steps.build-docker.outputs.sha-full-tag || steps.push-docker.outputs.sha-full-tag }} + BRANCH_TAG: ${{ steps.build-docker.outputs.branch-tag || steps.push-docker.outputs.branch-tag }} + PUSHED: ${{ inputs.push }} + TAG_LATEST: ${{ inputs.tag-latest }} + run: | + { + echo "### Docker Images Built" + echo "" + echo "**Branch:** ${GITHUB_REF_NAME}" + echo "**Short SHA Tag:** ${SHA_TAG}" + echo "**Full SHA Tag:** ${SHA_FULL_TAG}" + echo "**Branch Tag:** ${BRANCH_TAG}" + echo "**Platform:** linux/amd64,linux/arm64" + echo "**Pushed to Docker Hub:** ${PUSHED}" + echo "**Tagged as latest:** ${TAG_LATEST}" + echo "" + echo "**Image Tags:**" + echo "- temporaliotest/server:${SHA_TAG}" + echo "- temporaliotest/admin-tools:${SHA_TAG}" + echo "- temporaliotest/server:${SHA_FULL_TAG}" + echo "- temporaliotest/admin-tools:${SHA_FULL_TAG}" + echo "- temporaliotest/server:${BRANCH_TAG}" + echo "- temporaliotest/admin-tools:${BRANCH_TAG}" + } >> "$GITHUB_STEP_SUMMARY" diff --git a/.github/workflows/features-integration.yml b/.github/workflows/features-integration.yml index d1370eaa9fc..217d31c0d7f 100644 --- a/.github/workflows/features-integration.yml +++ b/.github/workflows/features-integration.yml @@ -14,16 +14,66 @@ concurrency: # Auto-cancel existing runs in the PR when a new commit is pushed jobs: build-docker-image: - uses: temporalio/docker-builds/.github/workflows/docker-build-only.yml@main - with: - temporal-server-repo-path: ${{github.event.pull_request.head.repo.full_name}} - temporal-server-repo-ref: ${{github.event.pull_request.head.ref}} + runs-on: ubuntu-latest + permissions: + actions: write + contents: read + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + ref: ${{ github.event.pull_request.head.sha || github.sha }} + fetch-depth: 0 + + - name: Build binaries + uses: ./.github/actions/build-binaries + with: + snapshot: true + single-arch: amd64 + + - name: Build Docker images + id: build-docker + uses: ./.github/actions/build-docker-images + with: + push: false + tag-latest: false + platform: linux/amd64 + load: true + + - name: Save Docker image as artifact + env: + BRANCH_TAG: ${{ steps.build-docker.outputs.branch-tag }} + run: | + docker save "temporaliotest/server:${BRANCH_TAG}" -o /tmp/temporal-server.tar + docker save "temporaliotest/admin-tools:${BRANCH_TAG}" -o /tmp/temporal-admin-tools.tar + echo "${BRANCH_TAG}" > /tmp/image_tag + + - name: Prepare artifact + working-directory: ${{ github.workspace }} + run: | + # Upload-artifact has no good way to flatten paths, so we need to move the compose file + # to avoid some disgustingly long inner path inside the artifact zip. + cp ./develop/docker-compose/docker-compose.yml /tmp/docker-compose.yml + + - name: Upload Docker artifact + uses: actions/upload-artifact@v6 + with: + name: temporal-server-docker + path: | + /tmp/temporal-server.tar + /tmp/temporal-admin-tools.tar + /tmp/image_tag + /tmp/docker-compose.yml + + retention-days: 7 feature-tests-ts: needs: build-docker-image uses: temporalio/features/.github/workflows/typescript.yaml@main with: - version: 1.5.2 + # This field is not actually used by these workflow if docker-image-artifact-name + # is set, but it's marked as required, so supply some string. + version: __latest_features_docker_image__ version-is-repo-ref: false docker-image-artifact-name: temporal-server-docker @@ -31,15 +81,30 @@ jobs: needs: build-docker-image uses: temporalio/features/.github/workflows/go.yaml@main with: - version: f9d73bfdf7c8d3ec0311306140fbfafa7fb6f9cf - version-is-repo-ref: true + version: __latest_features_docker_image__ + version-is-repo-ref: false + docker-image-artifact-name: temporal-server-docker + + feature-tests-go-chasm-scheduler: + needs: build-docker-image + uses: temporalio/features/.github/workflows/go.yaml@main + with: + version: __latest_features_docker_image__ + version-is-repo-ref: false docker-image-artifact-name: temporal-server-docker + dynamic-config-values: | + history.enableTransitionHistory: + - value: true + history.enableChasm: + - value: true + history.enableCHASMSchedulerCreation: + - value: true feature-tests-python: needs: build-docker-image uses: temporalio/features/.github/workflows/python.yaml@main with: - version: 0.1b4 + version: __latest_features_docker_image__ version-is-repo-ref: false docker-image-artifact-name: temporal-server-docker @@ -47,7 +112,7 @@ jobs: needs: build-docker-image uses: temporalio/features/.github/workflows/java.yaml@main with: - version: v1.17.0 + version: __latest_features_docker_image__ version-is-repo-ref: false docker-image-artifact-name: temporal-server-docker @@ -55,6 +120,35 @@ jobs: needs: build-docker-image uses: temporalio/features/.github/workflows/dotnet.yaml@main with: - version: 1.0.0 + version: __latest_features_docker_image__ + version-is-repo-ref: false + docker-image-artifact-name: temporal-server-docker + + feature-tests-ruby: + needs: build-docker-image + uses: temporalio/features/.github/workflows/ruby.yaml@main + with: + version: __latest_features_docker_image__ version-is-repo-ref: false docker-image-artifact-name: temporal-server-docker + + feature-tests-status: + name: Tests Status + needs: + - feature-tests-ts + - feature-tests-go + - feature-tests-go-chasm-scheduler + - feature-tests-python + - feature-tests-java + - feature-tests-dotnet + - feature-tests-ruby + runs-on: ubuntu-latest + if: always() + env: + RESULTS: ${{ toJSON(needs.*.result) }} + steps: + - name: Check results + run: | + if [[ -n $(echo "$RESULTS" | jq '.[] | select (. != "success")') ]]; then + exit 1 + fi diff --git a/.github/workflows/flaky-tests-report.yml b/.github/workflows/flaky-tests-report.yml new file mode 100644 index 00000000000..591fdc1b69d --- /dev/null +++ b/.github/workflows/flaky-tests-report.yml @@ -0,0 +1,83 @@ +name: Flaky Tests Report + +on: + schedule: + # Run on Wednesdays at noon Eastern time (5 PM UTC) + - cron: "0 17 * * 3" + workflow_dispatch: + inputs: + days: + description: "Number of days to look back for flaky tests" + required: false + default: "7" + type: string + max_links: + description: "Maximum number of failure links to show per test" + required: false + default: "3" + type: string + notify_slack: + description: "Send Slack notification" + required: false + default: true + type: boolean + +permissions: + contents: read + actions: read + +jobs: + flaky-tests-report: + runs-on: ubuntu-latest + steps: + - name: Generate token + id: generate_token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.TEMPORAL_CICD_APP_ID }} + private-key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} + owner: ${{ github.repository_owner }} + + - name: Checkout repository + uses: actions/checkout@v6 + with: + persist-credentials: false + token: ${{ steps.generate_token.outputs.token }} + fetch-depth: 0 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + + - name: Generate flaky test report + id: process-flaky-tests + env: + GH_TOKEN: ${{ steps.generate_token.outputs.token }} + SLACK_WEBHOOK: ${{ (github.event_name == 'schedule' || github.event.inputs.notify_slack == 'true') && secrets.SLACK_WEBHOOK || '' }} + DAYS_PARAM: ${{ github.event.inputs.days || '7' }} + MAX_LINKS_PARAM: ${{ github.event.inputs.max_links || '3' }} + RUN_ID: ${{ github.run_id }} + REF_NAME: ${{ github.ref_name }} + SHA: ${{ github.sha }} + run: | + set -euo pipefail + + go run ./cmd/tools/flakereport generate \ + --days "$DAYS_PARAM" \ + --max-links "$MAX_LINKS_PARAM" \ + --output-dir tools/flakes/out \ + --slack-webhook "$SLACK_WEBHOOK" \ + --run-id "$RUN_ID" \ + --ref-name "$REF_NAME" \ + --sha "$SHA" \ + --bisect \ + --bisect-days 28 + + - name: Upload generated reports + uses: actions/upload-artifact@v6 + if: steps.process-flaky-tests.outcome == 'success' + with: + name: flaky-tests-reports-${{ github.run_number }} + path: tools/flakes/out/* + retention-days: 30 diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml deleted file mode 100644 index 7db2713727a..00000000000 --- a/.github/workflows/golangci-lint.yml +++ /dev/null @@ -1,33 +0,0 @@ -name: golangci-lint -on: - pull_request: -permissions: - contents: read -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - check-latest: true - - - name: golangci-lint - uses: golangci/golangci-lint-action@v4 - with: - version: v1.54.2 - args: --verbose --timeout 10m --fix=false --new-from-rev=HEAD~ --config=.golangci.yml - - - name: check-is-dirty - run: | - if [[ -n $(git status --porcelain) ]]; then - echo "Detected uncommitted changes." - git status - git diff - exit 1 - fi diff --git a/.github/workflows/goreleaser.yml b/.github/workflows/goreleaser.yml deleted file mode 100644 index 6a1c7234f63..00000000000 --- a/.github/workflows/goreleaser.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: goreleaser - -on: - release: - types: - - released - -permissions: - contents: write - -jobs: - goreleaser: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up Go - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - check-latest: true - - - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v5 - with: - version: latest - args: release --clean - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/govulncheck.yml b/.github/workflows/govulncheck.yml new file mode 100644 index 00000000000..683c8e65d03 --- /dev/null +++ b/.github/workflows/govulncheck.yml @@ -0,0 +1,16 @@ +name: govulncheck +on: + pull_request: + +permissions: + contents: read + +jobs: + govulncheck: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - uses: actions/setup-go@v6 + with: + go-version-file: go.mod + - uses: temporalio/public-actions/golang/govulncheck@main diff --git a/.github/workflows/linters.yml b/.github/workflows/linters.yml new file mode 100644 index 00000000000..a3110beb51a --- /dev/null +++ b/.github/workflows/linters.yml @@ -0,0 +1,185 @@ +name: linters +on: + pull_request: +permissions: + contents: read +jobs: + lint-actions: + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + check-latest: true + cache: true + + - name: lint actions + run: | + echo "::add-matcher::.github/actionlint-matcher.json" + bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + make lint-actions + shell: bash + + lint-protos: + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + with: + submodules: true + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + check-latest: true + cache: true + + - uses: arduino/setup-protoc@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: lint protobuf definitions + run: | + make lint-protos + + lint-api: + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + with: + submodules: true + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + check-latest: true + cache: true + + - uses: arduino/setup-protoc@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: lint protobuf API definitions + run: | + make lint-api + + lint-workflows: + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + check-latest: true + cache: true + + - uses: arduino/setup-protoc@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: lint system workflows with workflowcheck + run: make workflowcheck + + fmt: + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + check-latest: true + cache: true + + - name: apply formatters + run: | + make fmt + + - name: check-is-dirty + run: | + if [[ -n $(git status --porcelain) ]]; then + echo "Detected uncommitted changes." + git status + git diff + exit 1 + fi + + parallelize-tests: + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + check-latest: true + cache: true + + - name: check test parallelization + run: make parallelize-tests + + - name: check-is-dirty + run: | + if [[ -n $(git status --porcelain) ]]; then + echo "Detected uncommitted changes after running parallelize-tests." + echo "Run 'make parallelize-tests' locally and commit the changes." + git status + git diff + exit 1 + fi + + golangci: + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + check-latest: true + cache: true + + - name: lint code + run: | + make GOLANGCI_LINT_FIX=false GOLANGCI_LINT_BASE_REV=HEAD~ lint-code + + - name: check-is-dirty + run: | + if [[ -n $(git status --porcelain) ]]; then + echo "Detected uncommitted changes." + git status + git diff + exit 1 + fi + + linters-succeed: + name: All Linters Succeed + needs: + - fmt + - lint-api + - lint-protos + - lint-actions + - golangci + - parallelize-tests + runs-on: ubuntu-24.04-arm + if: always() + env: + RESULTS: ${{ toJSON(needs.*.result) }} + steps: + - name: Check results + run: | + if [[ -n $(echo "$RESULTS" | jq '.[] | select (. != "success")') ]]; then + exit 1 + fi diff --git a/.github/workflows/optimize-test-sharding.yml b/.github/workflows/optimize-test-sharding.yml new file mode 100644 index 00000000000..9220235d741 --- /dev/null +++ b/.github/workflows/optimize-test-sharding.yml @@ -0,0 +1,78 @@ +name: Optimize Test Sharding + +on: + schedule: + # Runs at 0700 UTC daily + - cron: "0 7 * * *" + workflow_dispatch: # Allow manual trigger + +permissions: + contents: read + +env: + SALT_FILE: tests/testcore/shard_salt.txt + BRANCH: auto/optimize-test-sharding + +jobs: + update-salts: + runs-on: ubuntu-latest + + steps: + - name: Generate GitHub token + id: generate-token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.TEMPORAL_CICD_APP_ID }} + private-key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} + owner: temporalio + + - name: Checkout code + uses: actions/checkout@v6 + with: + ref: main + token: ${{ steps.generate-token.outputs.token }} + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: go.mod + cache: true + + - name: Optimize functional test sharding + run: | + # NOTE: shard count must match shards in run-tests.yml + go run ./cmd/tools/optimize-test-sharding \ + -shards 3 \ + -workflow run-tests.yml \ + -artifact-pattern 'junit-xml--*shard*--functional-test' \ + -file "${{ env.SALT_FILE }}" \ + -threshold 0.05 + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} + + - name: Create pull request + run: | + if git diff --quiet; then + echo "No changes to shard salt" + exit 0 + fi + + # Delete remote branch if it exists from a previous run. + # This will also close a previous, stuck PR if it exists. + git push origin --delete ${{ env.BRANCH }} 2>/dev/null || true + + git config --local user.name 'Temporal Data' + git config --local user.email 'commander-data@temporal.io' + + git checkout -b ${{ env.BRANCH }} + git add "${{ env.SALT_FILE }}" + git commit -m "Update test shard salt" + git push origin ${{ env.BRANCH }} + + gh pr create \ + --title "Update test shard salt" \ + --body "Automatically generated by the optimize-test-sharding workflow." + + gh pr merge ${{ env.BRANCH }} --auto --squash + env: + GH_TOKEN: ${{ steps.generate-token.outputs.token }} diff --git a/.github/workflows/promote-admin-tools-image.yml b/.github/workflows/promote-admin-tools-image.yml new file mode 100644 index 00000000000..cbe98019e9f --- /dev/null +++ b/.github/workflows/promote-admin-tools-image.yml @@ -0,0 +1,25 @@ +name: Promote Admin Tools Image + +on: + workflow_dispatch: + inputs: + source-tag: + description: "Source tag from temporaliotest registry (e.g. sha-abc123)" + required: true + target-tags: + description: "Target tags for temporalio registry (comma or newline separated, e.g., 1.29.1, latest)" + required: true + +permissions: + contents: read + +jobs: + promote: + uses: ./.github/workflows/promote-docker-image.yml + with: + image-name: admin-tools + source-tag: ${{ inputs.source-tag }} + target-tags: ${{ inputs.target-tags }} + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/promote-docker-image.yml b/.github/workflows/promote-docker-image.yml new file mode 100644 index 00000000000..e545aa75023 --- /dev/null +++ b/.github/workflows/promote-docker-image.yml @@ -0,0 +1,120 @@ +name: Promote Docker Image + +on: + workflow_call: + inputs: + image-name: + description: "Image name (e.g., server, admin-tools)" + required: true + type: string + source-tag: + description: "Source tag from temporaliotest registry (e.g. sha-abc123)" + required: true + type: string + target-tags: + description: "Target tags for temporalio registry (comma or newline separated, e.g., 1.29.1, latest)" + required: true + type: string + secrets: + DOCKERHUB_USERNAME: + required: true + DOCKERHUB_TOKEN: + required: true + +jobs: + validate-inputs: + runs-on: ubuntu-latest + outputs: + source-tag-safe: ${{ steps.validate.outputs.source-tag }} + target-tags-safe: ${{ steps.validate.outputs.target-tags }} + steps: + - name: Validate input tags + id: validate + uses: actions/github-script@v8 + env: + SOURCE_TAG: ${{ inputs.source-tag }} + TARGET_TAGS: ${{ inputs.target-tags }} + with: + script: | + const sourceTag = process.env.SOURCE_TAG; + const targetTagsInput = process.env.TARGET_TAGS; + + // Source tag: must be short SHA (sha-XXXXXXX) or full sha256 digest + // Examples: sha-b5b2dfe, sha256:082943409e71ae50d8dd8693593070eac8173f01fb5bfd4970ae59e52176753e + const sourceTagPattern = /^sha-[a-f0-9]{7,}$|^sha256:[a-f0-9]{64}$/; + + // Target tag: semantic version pattern (major.minor.patch.build or shorter) + // Examples: 1.29.1, 1.29.1.1, 1.29, latest + const targetTagPattern = /^(\d+\.)*\d+$|^latest$/; + + // Validate source tag format + if (!sourceTagPattern.test(sourceTag)) { + core.setFailed('Error: Invalid source tag format. Must be sha-XXXXXXX or sha256:...'); + return; + } + + // Parse and validate target tags (split by newline or comma) + const targetTags = targetTagsInput + .split(/[\n,]/) + .map(tag => tag.trim()) + .filter(tag => tag.length > 0); + + if (targetTags.length === 0) { + core.setFailed('Error: At least one target tag must be provided'); + return; + } + + for (const tag of targetTags) { + if (!targetTagPattern.test(tag)) { + core.setFailed(`Error: Invalid target tag format: "${tag}". Must be semantic version (e.g., 1.29.1, 1.29.1.1) or "latest"`); + return; + } + } + + core.setOutput('source-tag', sourceTag); + core.setOutput('target-tags', JSON.stringify(targetTags)); + + core.info('✓ Tag validation passed'); + core.info(` Source: ${sourceTag}`); + core.info(` Target tags: ${targetTags.join(', ')}`); + + promote: + needs: [validate-inputs] + runs-on: ubuntu-latest + steps: + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Install crane + run: | + curl -sL https://github.com/google/go-containerregistry/releases/latest/download/go-containerregistry_Linux_x86_64.tar.gz | tar xz crane + sudo mv crane /usr/local/bin/ + + - name: Promote image + uses: actions/github-script@v8 + env: + SOURCE_TAG: ${{ needs.validate-inputs.outputs.source-tag-safe }} + TARGET_TAGS: ${{ needs.validate-inputs.outputs.target-tags-safe }} + IMAGE_NAME: ${{ inputs.image-name }} + with: + script: | + const { execSync } = require('child_process'); + const sourceTag = process.env.SOURCE_TAG; + const targetTags = JSON.parse(process.env.TARGET_TAGS); + const imageName = process.env.IMAGE_NAME; + const source = `temporaliotest/${imageName}:${sourceTag}`; + + core.info(`Promoting ${imageName} image...`); + core.info(` From: ${source}`); + core.info(` To: ${targetTags.map(t => `temporalio/${imageName}:${t}`).join(', ')}`); + + for (const tag of targetTags) { + const target = `temporalio/${imageName}:${tag}`; + core.info(`Copying ${source} -> ${target}`); + execSync(`crane copy ${source} ${target}`, { stdio: 'inherit' }); + } + + core.info(`${imageName} image promoted successfully to all tags`); diff --git a/.github/workflows/promote-server-image.yml b/.github/workflows/promote-server-image.yml new file mode 100644 index 00000000000..5ea7c36124d --- /dev/null +++ b/.github/workflows/promote-server-image.yml @@ -0,0 +1,25 @@ +name: Promote Server Image + +on: + workflow_dispatch: + inputs: + source-tag: + description: "Source tag from temporaliotest registry (e.g. sha-abc123)" + required: true + target-tags: + description: "Target tags for temporalio registry (comma or newline separated, e.g., 1.29.1, latest)" + required: true + +permissions: + contents: read + +jobs: + promote: + uses: ./.github/workflows/promote-docker-image.yml + with: + image-name: server + source-tag: ${{ inputs.source-tag }} + target-tags: ${{ inputs.target-tags }} + secrets: + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..c68f6104872 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,25 @@ +name: Release + +on: + release: + types: + - released + +permissions: + contents: write + +jobs: + release: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v6 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Build binaries + uses: ./.github/actions/build-binaries + with: + snapshot: false + release: true diff --git a/.github/workflows/run-single-test.yml b/.github/workflows/run-single-test.yml new file mode 100644 index 00000000000..b033f8a9947 --- /dev/null +++ b/.github/workflows/run-single-test.yml @@ -0,0 +1,153 @@ +name: Run Single Test +run-name: ${{ inputs.test_name }}${{ inputs.test_dbs }} + +on: + workflow_dispatch: + inputs: + commit: + description: "Commit SHA" + required: true + run_functional_test: + description: "Run a functional test (otherwise unit test)" + type: boolean + default: true + unit_test_directory: + description: "[Unit Test Only] Directory to run unit tests in" + type: string + default: "./temporal" + n_runs: + description: "Number of times to repeat the test (start with n=25 or lower to avoid OOMKill)" + type: number + default: 1 + test_name: + description: "Name of the test to run (e.g. 'TestFunctionalSuite/TestUpdateWorkflow')" + type: string + required: true + timeout_minutes: + description: "Test timeout in minutes" + type: number + default: 120 + test_runner: + description: "Which runner to use. Choose higher RAM if your n_runs is high." + type: choice + default: "16GB RAM (ubuntu-latest)" + options: + - "16GB RAM (ubuntu-latest)" + - "64GB RAM (ubuntu-latest-16-cores)" + test_dbs: + description: '[Functional Test Only] DBs to test on (e.g. ["sqlite", "mysql8"])' + type: string + default: '["sqlite"]' + +permissions: + contents: read + +env: + COMMIT: ${{ inputs.commit }} + DOCKER_COMPOSE_FILE: ./develop/github/docker-compose.yml + TEMPORAL_VERSION_CHECK_DISABLED: 1 + +jobs: + unit-test: + if: ${{ inputs.run_functional_test != true }} + name: Unit test + runs-on: ${{ inputs.test_runner == '64GB RAM (ubuntu-latest-16-cores)' && 'ubuntu-latest-16-cores' || 'ubuntu-latest' }} + steps: + - uses: actions/checkout@v6 + with: + token: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ env.COMMIT }} + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + + - name: Run unit test + timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }} + run: make unit-test-coverage + env: + UNIT_TEST_DIRS: ${{ inputs.unit_test_directory }} + TEST_ARGS: "-run ${{ inputs.test_name }} -count ${{ inputs.n_runs }}" + TEST_TIMEOUT: "${{ inputs.timeout_minutes }}m" + + functional-test: + if: ${{ inputs.run_functional_test == true }} + name: Functional test (${{ matrix.name }}) + strategy: + fail-fast: false + matrix: + name: + - cass_es + - cass_es8 + - cass_os2 + - cass_os3 + - sqlite + - mysql8 + - postgres12 + - postgres12_pgx + include: + - name: cass_es + persistence_type: nosql + persistence_driver: cassandra + containers: [cassandra, elasticsearch] + runs_on_override: ubuntu-latest-8-cores + - name: cass_es8 + persistence_type: nosql + persistence_driver: cassandra + containers: [cassandra, elasticsearch8] + runs_on_override: ubuntu-latest-8-cores + - name: cass_os2 + persistence_type: nosql + persistence_driver: cassandra + containers: [cassandra, opensearch2] + - name: cass_os3 + persistence_type: nosql + persistence_driver: cassandra + containers: [cassandra, opensearch3] + - name: sqlite + persistence_type: sql + persistence_driver: sqlite + containers: [] + - name: mysql8 + persistence_type: sql + persistence_driver: mysql8 + containers: [mysql] + - name: postgres12 + persistence_type: sql + persistence_driver: postgres12 + containers: [postgresql] + - name: postgres12_pgx + persistence_type: sql + persistence_driver: postgres12_pgx + containers: [postgresql] + runs-on: ${{ inputs.test_runner == '64GB RAM (ubuntu-latest-16-cores)' && 'ubuntu-latest-16-cores' || (matrix.runs_on_override || 'ubuntu-latest') }} + env: + PERSISTENCE_TYPE: ${{ matrix.persistence_type }} + PERSISTENCE_DRIVER: ${{ matrix.persistence_driver }} + steps: + - uses: actions/checkout@v6 + if: ${{ contains(fromJSON(inputs.test_dbs), matrix.name) }} + with: + token: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ env.COMMIT }} + + - name: Start containerized dependencies + if: ${{ contains(fromJSON(inputs.test_dbs), matrix.name) && toJson(matrix.containers) != '[]' }} + uses: hoverkraft-tech/compose-action@v2.0.1 + with: + compose-file: ${{ env.DOCKER_COMPOSE_FILE }} + services: "${{ join(matrix.containers, '\n') }}" + down-flags: -v + + - uses: actions/setup-go@v6 + if: ${{ contains(fromJSON(inputs.test_dbs), matrix.name) }} + with: + go-version-file: "go.mod" + + - name: Run functional test + if: ${{ contains(fromJSON(inputs.test_dbs), matrix.name) }} + timeout-minutes: ${{ fromJSON(inputs.timeout_minutes) }} + run: make functional-test-coverage + env: + TEST_ARGS: "-run ${{ inputs.test_name }} -count ${{ inputs.n_runs }}" + TEST_TIMEOUT: "${{ inputs.timeout_minutes }}m" diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index b0d0f73ddd7..1f56c6e5293 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -5,334 +5,713 @@ on: push: branches: - main - - release/** + - cloud/* + - feature/* + - release/* - workflow_dispatch: - inputs: - commit: - description: "Commit SHA" - required: true +permissions: + contents: read concurrency: # Auto-cancel existing runs in the PR when a new commit is pushed group: run-tests-${{ github.head_ref || github.run_id }} cancel-in-progress: true env: - COMMIT: ${{ github.event.inputs.commit || github.sha }} + # For pull_request: use the head of the PR branch (not the merge branch which is the default!) + # For push: use the pushed commit. + COMMIT: ${{ github.event.pull_request.head.sha || github.sha }} + PR_BASE_COMMIT: ${{ github.event.pull_request.base.sha }} DOCKER_COMPOSE_FILE: ./develop/github/docker-compose.yml TEMPORAL_VERSION_CHECK_DISABLED: 1 + MAX_TEST_ATTEMPTS: 3 + SHARD_COUNT: 3 # NOTE: must match shard count in optimize-test-sharding.yml jobs: - misc-checks: - name: Misc checks - strategy: - fail-fast: false - matrix: - runs-on: [ubuntu-latest] - runs-on: ${{ matrix.runs-on }} + test-setup: + name: Test setup + runs-on: ubuntu-latest + outputs: + job_matrix: ${{ steps.build_matrix.outputs.job_matrix }} + full_test_reason: ${{ steps.determine_scope.outputs.full_test_reason }} + runner_arm: ${{ steps.configure_runners.outputs.runner_arm }} steps: - - uses: actions/checkout@v4 + - name: Checkout Code + uses: actions/checkout@v6 with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} - submodules: true + fetch-depth: 0 + + - name: Fetch base branch + if: ${{ github.event_name == 'pull_request' }} + run: git fetch origin ${{ github.event.pull_request.base.ref }}:${{ github.event.pull_request.base.ref }} - - uses: actions/setup-go@v5 + - name: Compute merge base + if: ${{ github.event_name == 'pull_request' }} + run: | + MERGE_BASE="$(git merge-base "${{ env.COMMIT }}" "${{ github.event.pull_request.base.ref }}")" + echo "MERGE_BASE=${MERGE_BASE}" >> "$GITHUB_ENV" + + # If possible, run the "abridged" jobs on PRs to save time. + # + # The goal is to still cover all persistence code paths: + # 1. SQL + # 2. NoSQL (ie Cassandra) + # 3. Elasticsearch/OpenSearch + # All other jobs for the remaining databases only run a small set of smoke tests. + # + # Exceptions to this optimization: + # 1. commit is not a PR + # 2. `test-all-dbs` label set on PR + # 3. changes to persistence packages were made + - name: Determine test scope + id: determine_scope + run: | + FULL_TEST_REASON="" + + event_name="${{ github.event_name }}" + labels='${{ toJson(github.event.pull_request.labels.*.name) }}' + merge_base="${MERGE_BASE:-}" + + echo "event_name=$event_name" + echo "labels=$labels" + echo "merge_base=$merge_base" + + # Push events (main, release branches) run all tests on all DBs + if [[ "$event_name" == "push" ]]; then + FULL_TEST_REASON="Running full tests on all DBs (push event)." + # Check for test-all-dbs label + elif echo "$labels" | jq -e 'any(. == "test-all-dbs")' > /dev/null 2>&1; then + FULL_TEST_REASON="Running full tests on all DBs (test-all-dbs label)." + # Check for persistence code changes + elif [[ -n "$merge_base" ]]; then + echo "Changed persistence files:" + git diff --name-only "$merge_base" "$COMMIT" | grep -E "^(common/persistence/|schema/)" || echo "(none)" + if git diff --name-only "$merge_base" "$COMMIT" | grep -qE "^(common/persistence/|schema/)"; then + FULL_TEST_REASON="Running full tests on all DBs (persistence code changes)." + fi + fi + + echo "full_test_reason=$FULL_TEST_REASON" >> "$GITHUB_OUTPUT" + + - name: ${{ steps.determine_scope.outputs.full_test_reason && 'ℹ️ Full tests' || 'ℹ️ Smoke tests' }} + run: echo "::notice::${{ steps.determine_scope.outputs.full_test_reason || 'Running smoke tests on extended DBs. Add the test-all-dbs label to run all tests on all DBs.' }}" + + - name: Configure runners + id: configure_runners + run: | + # Use 8-core runners for temporalio org, standard runners for forks + if [[ "${{ github.repository_owner }}" == "temporalio" ]]; then + runner_arm="ubuntu-24.04-arm64-8-cores" + else + runner_arm="ubuntu-24.04-arm" + fi + echo "runner_arm=$runner_arm" >> "$GITHUB_OUTPUT" + + # Primary DBs always get full tests, extended DBs get smoke tests (1 job) in abridged PRs. + - name: Build job matrix + id: build_matrix + env: + FULL_TEST_REASON: ${{ steps.determine_scope.outputs.full_test_reason }} + DB_CONFIGS: | + cass_es: + persistence_type: nosql + persistence_driver: cassandra + containers: [cassandra, elasticsearch] + required: true + cass_es8: + persistence_type: nosql + persistence_driver: cassandra + containers: [cassandra, elasticsearch8] + cass_os2: + persistence_type: nosql + persistence_driver: cassandra + containers: [cassandra, opensearch2] + cass_os3: + persistence_type: nosql + persistence_driver: cassandra + containers: [cassandra, opensearch3] + sqlite: + persistence_type: sql + persistence_driver: sqlite + containers: [] + mysql8: + persistence_type: sql + persistence_driver: mysql8 + containers: [mysql] + postgres12: + persistence_type: sql + persistence_driver: postgres12 + containers: [postgresql] + required: true + postgres12_pgx: + persistence_type: sql + persistence_driver: postgres12_pgx + containers: [postgresql] + JOB_TYPES: | + functest: + cmd: make functional-test-coverage + test_timeout: 35m + github_timeout: 40 + sharded: true + smoke: + cmd: make functional-test-coverage + test_timeout: 5m + github_timeout: 10 + test_args: '"-run=TestActivityTestSuite|TestSignalWorkflowTestSuite|TestWorkflowTestSuite"' + ndc: + cmd: make functional-test-ndc-coverage + test_timeout: 10m + github_timeout: 15 + xdc: + cmd: make functional-test-xdc-coverage + test_timeout: 30m + github_timeout: 35 + run: | + # Convert YAML inputs to JSON for jq. + DBS_JSON=$(yq -o=json <<< "$DB_CONFIGS") + JOBS_JSON=$(yq -o=json <<< "$JOB_TYPES") + + # Build the job matrix as a JSON array. + MATRIX=$( + jq -c -n \ + --argjson dbs "$DBS_JSON" \ + --argjson jobs "$JOBS_JSON" \ + --argjson shard_count "$SHARD_COUNT" \ + --arg full_test_reason "$FULL_TEST_REASON" \ + ' + [ + $dbs | to_entries[] as $db | + + # Full test suite for required DBs (or all DBs when full_test_reason is set). + (if $db.value.required or ($full_test_reason != "") then + $jobs | to_entries[] | select(.key != "smoke") + else + $jobs | to_entries[] | select(.key == "smoke") + end) as $job | + + # Base entry: merge db config + job config + name + ($db.value + ($job.value | del(.sharded)) + {name: $db.key}) as $base | + + # If sharded, emit one entry per shard; otherwise emit one entry + if $job.value.sharded then + range($shard_count) as $i | + $base + { + display_name: "shard\($i)", + shard_index: $i, + total_shards: $shard_count + } + else + $base + {display_name: $job.key} + end + ] + ' + ) + + echo "job_matrix=$MATRIX" >> "$GITHUB_OUTPUT" + echo "Generated $(jq length <<< "$MATRIX") jobs" + + pre-build: + name: Pre-build for cache + needs: test-setup + runs-on: ${{ needs.test-setup.outputs.runner_arm }} + steps: + - uses: actions/checkout@v6 with: - go-version-file: 'go.mod' - check-latest: true + token: ${{ secrets.GITHUB_TOKEN }} + ref: ${{ env.COMMIT }} - - uses: arduino/setup-protoc@v3 + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + cache: false # do our own caching - - run: make ci-build-misc + - name: Restore dependencies + id: restore-deps + uses: actions/cache/restore@v5 + with: + path: ~/go/pkg/mod + key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} - - run: make build-tests + - run: make pre-build-functional-test-coverage - cache-docker-images: - name: Cache Docker images - strategy: - fail-fast: false - matrix: - runs-on: [ubuntu-latest] - runs-on: ${{ matrix.runs-on }} - steps: - - uses: ScribeMD/docker-cache@0.3.7 + - name: Save dependencies + uses: actions/cache/save@v5 + if: ${{ steps.restore-deps.outputs.cache-hit != 'true' }} + with: + path: ~/go/pkg/mod + key: ${{ steps.restore-deps.outputs.cache-primary-key }} + + - name: Save build outputs + uses: actions/cache/save@v5 with: - key: docker-${{ runner.os }}-${{ hashFiles(env.DOCKER_COMPOSE_FILE) }} + path: ~/.cache/go-build + key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} - - uses: actions/checkout@v4 + misc-checks: + name: Misc checks + needs: [pre-build, test-setup] + runs-on: ${{ needs.test-setup.outputs.runner_arm }} + steps: + - uses: actions/checkout@v6 with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} + # buf-breaking tries to compare HEAD against merge base so we need to be able to find it + fetch-depth: 100 + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + cache: false # do our own caching + + - name: Restore dependencies + uses: actions/cache/restore@v5 + with: + path: ~/go/pkg/mod + key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} + + - name: Restore build outputs + uses: actions/cache/restore@v5 + with: + path: ~/.cache/go-build + key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} + + - uses: arduino/setup-protoc@v3 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - run: GOOS=windows GOARCH=amd64 make clean-bins bins + + - run: GOOS=darwin GOARCH=arm64 make clean-bins bins - - run: docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} pull + - run: make clean-bins ci-build-misc unit-test: name: Unit test - needs: misc-checks - strategy: - fail-fast: false - matrix: - runs-on: [ubuntu-latest] - runs-on: ${{ matrix.runs-on }} + needs: [pre-build, test-setup] + runs-on: ${{ needs.test-setup.outputs.runner_arm }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: - go-version-file: 'go.mod' - check-latest: true + go-version-file: "go.mod" + cache: false # do our own caching - - name: Run unit test - timeout-minutes: 15 - run: make unit-test-coverage + - name: Restore dependencies + uses: actions/cache/restore@v5 + with: + path: ~/go/pkg/mod + key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} + + - name: Restore build outputs + uses: actions/cache/restore@v5 + with: + path: ~/.cache/go-build + key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} + + - name: Run unit tests + timeout-minutes: 20 + run: TEST_TIMEOUT=15m ./develop/github/monitor_test.sh make unit-test-coverage + + - name: Print memory snapshot + if: always() + run: cat /tmp/memory_snapshot.txt || true + + - name: Generate crash report + if: failure() # if the tests failed, we would expect one JUnit XML report per attempt; otherwise it must have crashed + run: | + [ "$(find .testoutput -maxdepth 1 -name 'junit.*.xml' | wc -l)" -lt "$MAX_TEST_ATTEMPTS" ] && + CRASH_REPORT_NAME="$GITHUB_JOB" make report-test-crash + + - name: Write test summary + if: ${{ !cancelled() }} + run: | + summary="$(make -s print-test-summary)" + if [ -n "$summary" ]; then + printf '%s\n' "$summary" > "$GITHUB_STEP_SUMMARY" + fi + + - name: Upload code coverage to Codecov + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + directory: ./.testoutput + flags: unit-test + + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + directory: ./.testoutput + flags: unit-test + report_type: test_results + + - name: Get job ID + id: get_job_id + uses: ./.github/actions/get-job-id + with: + job_name: Unit test + run_id: ${{ github.run_id }} + + - name: Upload test results to GitHub + # Can't pin to major because the action linter doesn't recognize the include-hidden-files flag. + uses: actions/upload-artifact@v6 + if: ${{ !cancelled() }} + with: + name: junit-xml--${{ github.run_id }}--${{ steps.get_job_id.outputs.job_id }}--${{ github.run_attempt }}--unit-test + path: ./.testoutput/junit.*.xml + include-hidden-files: true + retention-days: 28 integration-test: name: Integration test - needs: [misc-checks, cache-docker-images] - strategy: - fail-fast: false - matrix: - runs-on: [ubuntu-latest] - runs-on: ${{ matrix.runs-on }} + needs: [pre-build, test-setup] + runs-on: ${{ needs.test-setup.outputs.runner_arm }} steps: - - uses: ScribeMD/docker-cache@0.3.7 - with: - key: docker-${{ runner.os }}-${{ hashFiles(env.DOCKER_COMPOSE_FILE) }} - - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} - - uses: actions/setup-go@v5 + - name: Start containerized dependencies + uses: hoverkraft-tech/compose-action@v2.0.1 + with: + compose-file: ${{ env.DOCKER_COMPOSE_FILE }} + services: | + cassandra + mysql + postgresql + down-flags: -v + + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + cache: false # do our own caching + + - name: Restore dependencies + uses: actions/cache/restore@v5 with: - go-version-file: 'go.mod' - check-latest: true + path: ~/go/pkg/mod + key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} - - name: Start containerized dependencies + - name: Restore build outputs + uses: actions/cache/restore@v5 + with: + path: ~/.cache/go-build + key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} + + - name: Wait for containerized dependencies to be healthy run: | - docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} up -d cassandra mysql postgresql + # Word splitting is intentional here. + # shellcheck disable=SC2046 + docker compose -f ${{ env.DOCKER_COMPOSE_FILE }} up --wait $(docker compose -f ${{ env.DOCKER_COMPOSE_FILE }} ps --services) - name: Run integration test timeout-minutes: 15 - run: make integration-test-coverage + run: ./develop/github/monitor_test.sh make integration-test-coverage + + - name: Print memory snapshot + if: always() + run: cat /tmp/memory_snapshot.txt || true + + - name: Generate crash report + if: failure() # if the tests failed, we would expect one JUnit XML report per attempt; otherwise it must have crashed + run: | + [ "$(find .testoutput -maxdepth 1 -name 'junit.*.xml' | wc -l)" -lt "$MAX_TEST_ATTEMPTS" ] && + CRASH_REPORT_NAME="$GITHUB_JOB" make report-test-crash + + - name: Write test summary + if: ${{ !cancelled() }} + run: | + summary="$(make -s print-test-summary)" + if [ -n "$summary" ]; then + printf '%s\n' "$summary" > "$GITHUB_STEP_SUMMARY" + fi + + - name: Upload code coverage to Codecov + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + directory: ./.testoutput + flags: integration-test + + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + directory: ./.testoutput + flags: integration-test + report_type: test_results + + - name: Get job ID + id: get_job_id + uses: ./.github/actions/get-job-id + with: + job_name: Integration test + run_id: ${{ github.run_id }} + + - name: Upload test results to GitHub + # Can't pin to major because the action linter doesn't recognize the include-hidden-files flag. + uses: actions/upload-artifact@v6 + if: ${{ !cancelled() }} + with: + name: junit-xml--${{ github.run_id }}--${{ steps.get_job_id.outputs.job_id }}--${{ github.run_attempt }}--integration-test + path: ./.testoutput/junit.*.xml + include-hidden-files: true + retention-days: 28 - name: Tear down docker compose if: ${{ always() }} run: | - docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} down -v + docker compose -f ${{ env.DOCKER_COMPOSE_FILE }} down -v + # Root job name includes matrix details so it is unique per job variant. + # This MUST stay in sync with the `job_name` passed to the job-id action below. functional-test: - name: Functional test - needs: [misc-checks, cache-docker-images] + # Display name shown in the UI. The job-id lookup uses this exact value. + name: Functional test (${{ matrix.name }}, ${{ matrix.display_name }}) + needs: [pre-build, test-setup] strategy: fail-fast: false matrix: - runs-on: [ubuntu-latest] - name: [cass_es, cass_es8, sqlite, mysql8, postgres12, postgres12_pgx] - shard_index: [0, 1, 2] - include: - - name: cass_es - persistence_type: nosql - persistence_driver: cassandra - containers: [cassandra, elasticsearch] - - name: cass_es8 - persistence_type: nosql - persistence_driver: cassandra - containers: [cassandra, elasticsearch8] - - name: sqlite - persistence_type: sql - persistence_driver: sqlite - containers: [] - - name: mysql8 - persistence_type: sql - persistence_driver: mysql8 - containers: [mysql] - - name: postgres12 - persistence_type: sql - persistence_driver: postgres12 - containers: [postgresql] - - name: postgres12_pgx - persistence_type: sql - persistence_driver: postgres12_pgx - containers: [postgresql] - runs-on: ${{ matrix.runs-on }} + include: ${{ fromJson(needs.test-setup.outputs.job_matrix) }} + runs-on: ${{ needs.test-setup.outputs.runner_arm }} env: - TEST_TOTAL_SHARDS: 3 - TEST_SHARD_INDEX: ${{ matrix.shard_index }} PERSISTENCE_TYPE: ${{ matrix.persistence_type }} PERSISTENCE_DRIVER: ${{ matrix.persistence_driver }} + TEST_TIMEOUT: ${{ matrix.test_timeout }} steps: - uses: ScribeMD/docker-cache@0.3.7 with: - key: docker-${{ runner.os }}-${{ hashFiles(env.DOCKER_COMPOSE_FILE) }} + key: docker-${{ runner.os }}${{ runner.arch }}-${{ hashFiles(env.DOCKER_COMPOSE_FILE) }} - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} - - uses: actions/setup-go@v5 - with: - go-version-file: 'go.mod' - check-latest: true - - name: Start containerized dependencies if: ${{ toJson(matrix.containers) != '[]' }} - run: | - docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} up -d ${{ join(matrix.containers, ' ') }} - - - name: Run functional test - timeout-minutes: 15 - run: make functional-test-coverage + uses: hoverkraft-tech/compose-action@v2.0.1 + with: + compose-file: ${{ env.DOCKER_COMPOSE_FILE }} + services: "${{ join(matrix.containers, '\n') }}" + down-flags: -v - - name: Tear down docker compose - if: ${{ always() }} - run: | - docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} down -v + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + cache: false # do our own caching - functional-test-xdc: - name: Functional test xdc - needs: [misc-checks, cache-docker-images] - strategy: - fail-fast: false - matrix: - runs-on: [ubuntu-latest] - name: [cass_es, cass_es8, mysql8, postgres12, postgres12_pgx] - include: - - name: cass_es - persistence_type: nosql - persistence_driver: elasticsearch - containers: [cassandra, elasticsearch] - - name: cass_es8 - persistence_type: nosql - persistence_driver: elasticsearch - containers: [cassandra, elasticsearch8] - - name: mysql8 - persistence_type: sql - persistence_driver: mysql8 - containers: [mysql] - - name: postgres12 - persistence_type: sql - persistence_driver: postgres12 - containers: [postgresql] - - name: postgres12_pgx - persistence_type: sql - persistence_driver: postgres12_pgx - containers: [postgresql] - runs-on: ${{ matrix.runs-on }} - env: - PERSISTENCE_TYPE: ${{ matrix.persistence_type }} - PERSISTENCE_DRIVER: ${{ matrix.persistence_driver }} - steps: - - uses: ScribeMD/docker-cache@0.3.7 + - name: Restore dependencies + uses: actions/cache/restore@v5 with: - key: docker-${{ runner.os }}-${{ hashFiles(env.DOCKER_COMPOSE_FILE) }} + path: ~/go/pkg/mod + key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} - - uses: actions/checkout@v4 + - name: Restore build outputs + uses: actions/cache/restore@v5 with: - token: ${{ secrets.GITHUB_TOKEN }} - ref: ${{ env.COMMIT }} + path: ~/.cache/go-build + key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} - - uses: actions/setup-go@v5 + - name: Get job ID + id: get_job_id + uses: ./.github/actions/get-job-id with: - go-version-file: 'go.mod' - check-latest: true + job_name: Functional test (${{ matrix.name }}, ${{ matrix.display_name }}) + run_id: ${{ github.run_id }} - - name: Start containerized dependencies + - name: ${{ matrix.display_name == 'smoke' && 'ℹ️ Smoke test' || 'ℹ️ Full test' }} + run: echo "::notice::${{ matrix.display_name == 'smoke' && 'This is a smoke test. Add the test-all-dbs label to run all tests on all DBs.' || needs.test-setup.outputs.full_test_reason }}" + + - name: Wait for containerized dependencies to be healthy if: ${{ toJson(matrix.containers) != '[]' }} run: | - docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} up -d ${{ join(matrix.containers, ' ') }} + # Word splitting is intentional here. + # shellcheck disable=SC2046 + docker compose -f ${{ env.DOCKER_COMPOSE_FILE }} up --wait $(docker compose -f ${{ env.DOCKER_COMPOSE_FILE }} ps --services) - - name: Run functional test xdc - timeout-minutes: 15 - run: make functional-test-xdc-coverage + - name: Run functional test + timeout-minutes: ${{ matrix.github_timeout }} + run: ./develop/github/monitor_test.sh ${{ matrix.cmd }} + env: + TEST_TOTAL_SHARDS: ${{ matrix.total_shards }} + TEST_SHARD_INDEX: ${{ matrix.total_shards && matrix.shard_index }} # guard with total_shards to avoid falsy eval of shard_index=0 + TEST_ARGS: "${{ matrix.test_args }}" + TEMPORAL_TEST_LOG_FILE: ${{ github.workspace }}/.testoutput/debug.log + TEMPORAL_TEST_LOG_LEVEL: info + + - name: Dump container logs + if: ${{ failure() && toJson(matrix.containers) != '[]' }} + run: docker compose -f ${{ env.DOCKER_COMPOSE_FILE }} logs --no-color + + - name: Print memory snapshot + if: always() + run: cat /tmp/memory_snapshot.txt || true + + - name: Generate crash report + if: failure() # if the tests failed, we would expect one JUnit XML report per attempt; otherwise it must have crashed + run: | + [ "$(find .testoutput -maxdepth 1 -name 'junit.*.xml' | wc -l)" -lt "$MAX_TEST_ATTEMPTS" ] && + CRASH_REPORT_NAME="$GITHUB_JOB" make report-test-crash - - name: Tear down docker compose - if: ${{ always() }} + - name: Write test summary + if: ${{ !cancelled() }} run: | - docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} down -v + summary="$(make -s print-test-summary)" + if [ -n "$summary" ]; then + printf '%s\n' "$summary" > "$GITHUB_STEP_SUMMARY" + fi - functional-test-ndc: - name: Functional test ndc - needs: [misc-checks, cache-docker-images] - strategy: - fail-fast: false - matrix: - runs-on: [ubuntu-latest] - name: [cass_es, cass_es8, mysql8, postgres12, postgres12_pgx] - include: - - name: cass_es - persistence_type: nosql - persistence_driver: elasticsearch - containers: [cassandra, elasticsearch] - - name: cass_es8 - persistence_type: nosql - persistence_driver: elasticsearch - containers: [cassandra, elasticsearch8] - - name: mysql8 - persistence_type: sql - persistence_driver: mysql8 - containers: [mysql] - - name: postgres12 - persistence_type: sql - persistence_driver: postgres12 - containers: [postgresql] - - name: postgres12_pgx - persistence_type: sql - persistence_driver: postgres12_pgx - containers: [postgresql] - runs-on: ${{ matrix.runs-on }} - env: - PERSISTENCE_TYPE: ${{ matrix.persistence_type }} - PERSISTENCE_DRIVER: ${{ matrix.persistence_driver }} - steps: - - uses: ScribeMD/docker-cache@0.3.7 + - name: Upload code coverage to Codecov + uses: codecov/codecov-action@v5 with: - key: docker-${{ runner.os }}-${{ hashFiles(env.DOCKER_COMPOSE_FILE) }} + token: ${{ secrets.CODECOV_TOKEN }} + directory: ./.testoutput + flags: functional-test - - uses: actions/checkout@v4 + - name: Upload test results to Codecov + if: ${{ !cancelled() }} + uses: codecov/codecov-action@v5 + with: + token: ${{ secrets.CODECOV_TOKEN }} + directory: ./.testoutput + flags: functional-test + report_type: test_results + + - name: Upload test results to GitHub + # Can't pin to major because the action linter doesn't recognize the include-hidden-files flag. + uses: actions/upload-artifact@v6 + if: ${{ !cancelled() }} + with: + name: junit-xml--${{ github.run_id }}--${{ steps.get_job_id.outputs.job_id }}--${{ github.run_attempt }}--${{ matrix.name }}--${{ matrix.display_name }}--functional-test + path: ./.testoutput/junit.*.xml + include-hidden-files: true + retention-days: 28 + + - name: Upload debug logs + uses: actions/upload-artifact@v6 + if: ${{ !cancelled() }} + with: + name: debug-logs--${{ github.run_id }}--${{ steps.get_job_id.outputs.job_id }}--${{ github.run_attempt }}--${{ matrix.name }}--${{ matrix.display_name }}--functional-test + path: ${{ github.workspace }}/.testoutput/debug.log + if-no-files-found: ignore + retention-days: 14 + + mixed-brain-test: + name: Mixed brain test + needs: [pre-build, test-setup] + runs-on: ${{ needs.test-setup.outputs.runner_arm }} + steps: + - uses: actions/checkout@v6 with: token: ${{ secrets.GITHUB_TOKEN }} ref: ${{ env.COMMIT }} - - uses: actions/setup-go@v5 + - name: Start PostgreSQL + uses: hoverkraft-tech/compose-action@v2.0.1 with: - go-version-file: 'go.mod' - check-latest: true + compose-file: ${{ env.DOCKER_COMPOSE_FILE }} + services: postgresql + down-flags: -v - - name: Start containerized dependencies - if: ${{ toJson(matrix.containers) != '[]' }} - run: | - docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} up -d ${{ join(matrix.containers, ' ') }} + - uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + cache: false - - name: Run functional test ndc - timeout-minutes: 15 - run: make functional-test-ndc-coverage + - name: Restore dependencies + uses: actions/cache/restore@v5 + with: + path: ~/go/pkg/mod + key: go-${{ runner.os }}${{ runner.arch }}-${{ hashFiles('go.mod') }}-deps-${{ hashFiles('go.sum') }} - - name: Tear down docker compose - if: ${{ always() }} - run: | - docker-compose -f ${{ env.DOCKER_COMPOSE_FILE }} down -v + - name: Restore build outputs + uses: actions/cache/restore@v5 + with: + path: ~/.cache/go-build + key: go-${{ runner.os }}${{ runner.arch }}-build-${{ env.COMMIT }} + + - name: Install PostgreSQL schema + run: make install-schema-postgresql12 + + - name: Run mixed brain test + timeout-minutes: 20 + run: ./develop/github/monitor_test.sh make mixed-brain-test + env: + TEST_TIMEOUT: 18m + MIXED_BRAIN_TEST_DURATION: 5m + PERSISTENCE_DRIVER: postgres12 + + - name: Print memory snapshot + if: always() + run: cat /tmp/memory_snapshot.txt || true + + - name: Print current server logs + if: always() + run: cat .testoutput/mixedbrain_process-current.log || true + + - name: Print release server logs + if: always() + run: cat .testoutput/mixedbrain_process-release.log || true + + - name: Print Omes logs + if: always() + run: cat .testoutput/mixedbrain_omes.log || true test-status: + if: always() name: Test Status needs: + - misc-checks - unit-test - integration-test - functional-test - - functional-test-xdc - - functional-test-ndc runs-on: ubuntu-latest - if: always() + env: + RESULTS: ${{ toJSON(needs.*.result) }} + steps: + - name: Check results + run: | + # all statuses must be success + if [[ -n $(echo "$RESULTS" | jq '.[] | select (. != "success")') ]]; then + exit 1 + fi + + notify-failure: + name: Notify Slack on Failure + if: | + always() && + github.ref == 'refs/heads/main' && + needs.test-status.result == 'failure' + needs: test-status + runs-on: ubuntu-latest + permissions: + contents: read + actions: read steps: - - name: Success - if: ${{ !(contains(needs.*.result, 'failure')) }} - run: exit 0 - - name: Failure - if: ${{ contains(needs.*.result, 'failure') }} - run: exit 1 + - name: Checkout code + uses: actions/checkout@v6 + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version-file: "go.mod" + cache: true + + - name: Send Slack notification + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + GH_TOKEN: ${{ github.token }} + run: | + go run ./cmd/tools/ci-notify alert \ + --run-id "${{ github.run_id }}" \ + --slack-webhook "$SLACK_WEBHOOK" diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 8365b1f5cf1..88b2ae3cbaa 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -4,6 +4,9 @@ on: schedule: - cron: '0 0 * * *' +permissions: + pull-requests: write + jobs: stale_prs: runs-on: ubuntu-latest @@ -11,8 +14,8 @@ jobs: - name: label stale pull requests uses: actions/stale@v9 with: - days-before-close: -1 # ie disabled + days-before-close: -1 # ie disabled days-before-stale: 120 - days-before-issue-stale: -1 # ie disabled + days-before-issue-stale: -1 # ie disabled stale-pr-label: stale stale-pr-message: 'This PR was marked as stale. Please update or close it.' diff --git a/.github/workflows/trigger-publish.yml b/.github/workflows/trigger-publish.yml deleted file mode 100644 index 0f6bcd0c64d..00000000000 --- a/.github/workflows/trigger-publish.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: 'Trigger Docker image build' - -on: - push: - branches: - - main - - release/* - -jobs: - trigger: - name: 'trigger Docker image build' - runs-on: ubuntu-latest - - defaults: - run: - shell: bash - - steps: - - name: Get git branch name - id: get_branch - run: | - echo "::set-output name=branch::${GITHUB_REF#refs/heads/}" - - - name: Generate a token - id: generate_token - uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 - with: - app_id: ${{ secrets.TEMPORAL_CICD_APP_ID }} - private_key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} - - - name: Dispatch docker builds Github Action - env: - PAT: ${{ steps.generate_token.outputs.token }} - PARENT_REPO: temporalio/docker-builds - PARENT_BRANCH: ${{ toJSON('main') }} - WORKFLOW_ID: update-submodules.yml - REPO: ${{ toJSON('temporal') }} - BRANCH: ${{ toJSON(steps.get_branch.outputs.branch) }} - run: | - curl -fL -X POST -H "Accept: application/vnd.github.v3+json" -H "Authorization: token $PAT" "https://api.github.com/repos/$PARENT_REPO/actions/workflows/$WORKFLOW_ID/dispatches" -d '{"ref":'"$PARENT_BRANCH"', "inputs": { "repo":'"$REPO"', "branch":'"$BRANCH"' }}' diff --git a/.github/workflows/trigger-version-info-service.yml b/.github/workflows/trigger-version-info-service.yml new file mode 100644 index 00000000000..3058242ede2 --- /dev/null +++ b/.github/workflows/trigger-version-info-service.yml @@ -0,0 +1,39 @@ +name: 'Trigger version-info-service release' + +on: + release: + types: + - published +permissions: + contents: read + +jobs: + trigger-vis: + name: 'Trigger version-info-service release' + runs-on: ubuntu-latest + + defaults: + run: + shell: bash + + steps: + - name: Generate token + id: generate_token + uses: actions/create-github-app-token@v2 + with: + app-id: ${{ secrets.TEMPORAL_CICD_APP_ID }} + private-key: ${{ secrets.TEMPORAL_CICD_PRIVATE_KEY }} + owner: ${{ github.repository_owner }} + repositories: | + version-info-service + + - name: Dispatch version-info-service Github Action + env: + GH_TOKEN: ${{ steps.generate_token.outputs.token }} + VERSION: ${{ github.event.release.tag_name }} + RELEASE_TIME: ${{ github.event.release.published_at }} + run: | + gh workflow run server-release.yml -R https://github.com/temporalio/version-info-service \ + -r main \ + -f version="${VERSION}" \ + -f release-time="${RELEASE_TIME}" diff --git a/.gitignore b/.gitignore index 4c68f0c8204..f6e1955edc7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ .tmp/ +.bin/ +.stamp/ .idea/ .vscode/settings.json @@ -11,6 +13,7 @@ *.out *.test *.xml +!tools/testrunner/testdata/*.xml *.swp /*.iml *.cov @@ -21,9 +24,13 @@ /temporal-* /tctl* /tdbg +/fairsim -# Buf proto image +# proto images /proto/image.bin +/proto/chasm.bin +# api+google proto dependencies +/proto/api.binpb # Goreleaser /dist @@ -33,3 +40,11 @@ # Git SPR: https://github.com/ejoffe/spr .spr.yml + +/proto.tmp + +**/.venv/ +**/.ruff_cache/ + +# Ignoring AI agent files +.agents/ diff --git a/.gitmodules b/.gitmodules index 72a7f348ea7..7704d96df23 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,7 +1,3 @@ -[submodule "proto/api"] - path = proto/api - url = https://github.com/temporalio/api - branch = master [submodule "develop/docker-compose/grafana/provisioning/temporalio-dashboards"] path = develop/docker-compose/grafana/provisioning/temporalio-dashboards url = https://github.com/temporalio/dashboards diff --git a/.golangci.yml b/.golangci.yml deleted file mode 100644 index 390cd830ab3..00000000000 --- a/.golangci.yml +++ /dev/null @@ -1,115 +0,0 @@ -# https://golangci-lint.run/usage/configuration/#config-file -linters: - disable-all: true - enable: - - goerr113 - - errcheck - - goimports - # - paralleltest # missing the call to method parallel, but testify does not seem to work well with parallel test: https://github.com/stretchr/testify/issues/187 - - revive # revive supersedes golint, which is now archived - - staticcheck - - vet - - forbidigo -run: - skip-dirs: - - ^api - - ^proto - - ^.git -linters-settings: - govet: - fieldalignment: 0 - forbidigo: - forbid: - - p: ^time\.After$ - msg: "time.After may leak resources. Use time.NewTimer instead." - revive: - severity: error - confidence: 0.8 - enable-all-rules: true - rules: - # Disabled rules - - name: add-constant - disabled: true - - name: argument-limit - disabled: true - - name: bare-return - disabled: true - - name: banned-characters - disabled: true - - name: bool-literal-in-expr - disabled: true - - name: confusing-naming - disabled: true - - name: empty-lines - disabled: true - - name: error-naming - disabled: true - - name: errorf - disabled: true - - name: exported - disabled: true - - name: file-header - disabled: true - - name: function-length - disabled: true - - name: imports-blacklist - disabled: true - - name: increment-decrement - disabled: true - - name: line-length-limit - disabled: true - - name: max-public-structs - disabled: true - - name: nested-structs - disabled: true - - name: package-comments - disabled: true - - name: string-format - disabled: true - - name: unexported-naming - disabled: true - - name: unexported-return - disabled: true - - name: unused-parameter - disabled: true - - name: unused-receiver - disabled: true - - name: use-any - disabled: true - - name: var-naming - disabled: true - - name: empty-block - disabled: true - - name: flag-parameter - disabled: true - - name: unnecessary-stmt - disabled: true - - # Rule tuning - - name: cognitive-complexity - arguments: - - 25 - - name: cyclomatic - arguments: - - 25 - - name: function-result-limit - arguments: - - 5 - - name: unhandled-error - arguments: - - "fmt.*" - - "bytes.Buffer.*" - - "strings.Builder.*" -issues: - exclude-rules: - - path: _test\.go|tests/.+\.go - text: "(cyclomatic|cognitive)" # false positives when using subtests - linters: - - revive - - path: _test\.go|tests/.+\.go - linters: - - goerr113 # like err = errors.New("test error") - - path: ^tools\/.+\.go - linters: - - goerr113 - - revive diff --git a/.goreleaser.yml b/.goreleaser.yml index 5c7a495eb9d..b352df02de0 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,17 +1,25 @@ +# GoReleaser v2 configuration +version: 2 + before: hooks: - go mod download +snapshot: + version_template: "{{ .Version }}-SNAPSHOT-{{ .ShortCommit }}" + archives: - id: default - builds: + ids: - temporal-server - temporal-cassandra-tool - temporal-sql-tool + - temporal-elasticsearch-tool + - tdbg name_template: "{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}" format_overrides: - goos: windows - format: zip + formats: [zip] files: - ./config/* @@ -52,6 +60,18 @@ builds: goarch: - amd64 - arm64 + - id: temporal-elasticsearch-tool + dir: cmd/tools/elasticsearch + binary: temporal-elasticsearch-tool + env: + - CGO_ENABLED=0 + goos: + - linux + - darwin + - windows + goarch: + - amd64 + - arm64 - id: tdbg dir: cmd/tools/tdbg binary: tdbg @@ -66,11 +86,11 @@ builds: - arm64 checksum: - name_template: 'checksums.txt' + name_template: "checksums.txt" algorithm: sha256 changelog: - skip: true + disable: true announce: - skip: "true" + skip: true diff --git a/.vscode/launch.json b/.vscode/launch.json index 0d8f4710f98..cc2486031b6 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -1,6 +1,28 @@ { "version": "0.2.0", "configurations": [ + { + "name": "Debug Running Server", + "type": "go", + "request": "attach", + "mode": "local", + "cwd": "${workspaceFolder}", + }, + { + "name": "Debug single functional test method", + "type": "go", + "request": "launch", + "mode": "test", + "buildFlags": [ + "-tags=test_dep" + ], + "program": "${relativeFileDirname}", + "args": [ + "-testify.m", + "${input:functionalTestSuiteMethod}" + ] + + }, { "name": "Debug Server", "type": "go", @@ -64,5 +86,13 @@ "start", ] }, + ], + "inputs": [ + { + "type": "promptString", + "id": "functionalTestSuiteMethod", + "description": "The method name within the functional test suite that you wish to debug.", + "default": "." + } ] } diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 00000000000..a4807125da7 --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,104 @@ +You are an experienced developer working on the temporal project. Your task is to fix a bug or implement a new feature while adhering to the project's best practices and development guidelines. Your background is in distributed systems, database engines, and scalable platforms. +Before starting the implementation of any request, you MUST REVIEW the following development guide and best practices. + +# Core Mandates +- **Conventions:** Rigorously adhere to existing project conventions when reading or modifying code. Analyze surrounding code, tests, and configuration first. +- **Libraries/Frameworks:** NEVER assume a library/framework is available or appropriate. Verify its established usage within the project (check imports, and 'go.mod') before employing it. +- **Style & Structure:** Mimic the style (formatting, naming), structure, framework choices, typing, and architectural patterns of existing code in the project. +- **Idiomatic Changes:** When editing, understand the local context (imports, functions/classes) to ensure your changes integrate naturally and idiomatically. +- **Comments:** Add code comments sparingly. Focus on *why* something is done, especially for complex logic, rather than *what* is done. Only add high-value comments if necessary for clarity or if requested by the user. Do not edit comments that are separate from the code you are changing. *NEVER* talk to the user or describe your changes through comments. +- **Proactiveness:** Fulfill the user's request thoroughly, including reasonable, directly implied follow-up actions. +- **Confirm Ambiguity/Expansion:** Do not take significant actions beyond the clear scope of the request without confirming with the user. If asked *how* to do something, explain first, don't just do it. +- **Explaining Changes:** After completing a code modification or file operation provide summaries. +- **Do Not revert changes:** Do not revert changes to the codebase unless asked to do so by the user. Only revert changes made by you if they have resulted in an error or if the user has explicitly asked you to revert the changes. + +# Tone and Style +- **Concise & Direct:** Adopt a professional, direct, and concise tone suitable for a chat environment. +- **Minimal Output:** Aim for fewer than 3 lines of text output (excluding tool use/code generation) per response whenever practical. Focus strictly on the user's query. +- **Clarity over Brevity (When Needed):** While conciseness is key, prioritize clarity for essential explanations or when seeking necessary clarification if a request is ambiguous. +- **No Chitchat:** Avoid conversational filler, preambles ("Okay, I will now..."), or postambles ("I have finished the changes..."). Get straight to the action or answer. +- **Formatting:** Use GitHub-flavored Markdown. Responses will be rendered in monospace. +- **Tools vs. Text:** Use tools for actions, text output *only* for communication. Do not add explanatory comments within tool calls or code blocks unless specifically part of the required code/command itself. +- **Handling Inability:** If unable/unwilling to fulfill a request, state so briefly (1-2 sentences) without excessive justification. Offer alternatives if appropriate. + + +# Development Guide +## Project Structure +- `/api`: proto definitions and generated code +- `/chasm`: library for Chasm (Coordinated Heterogeneous Application State Machines) +- `/client`: client libraries for inter-service communication between frontend/history/matching etc. +- `/cmd`: CLI commands and main applications +- `/common`: modules shared across all services +- `/common/dynamicconfig`: dynamic configuration library +- `/common/membership`: cluster membership management +- `/common/metrics`: metrics definition and library +- `/common/namespace`: namespace cache and utilities +- `/common/nexus`: Nexus service client and utilities +- `/common/persistence`: persistence layer abstractions and implementations +- `/components`: nexus components +- `/config`: configuration files and templates +- `/docs`: documentation +- `/proto`: proto definitions for internal services +- `/schema`: database schema definitions for core databases store and visibility store +- `/service`: main services (frontend, history, matching, worker, etc.) +- `/service/frontend`: frontend service implementation +- `/service/history`: history service implementation +- `/service/matching`: matching service implementation +- `/service/worker`: worker service implementation + +## Important Commands: +- Linting: `make lint-code` +- Formatting imports: `make fmt-imports` +- Code generation: `make proto` +- Update API proto: `make update-go-api` +- Unit Testing: `make unit-test` + +## Best Practices: +- Mimic the style (formatting, naming), structure, framework choices, typing, and architectural patterns of existing code in the project +- Do not litter our codebase with unnecessary comments. Comments should describe WHY something was done, never WHAT was done +- Implement tests for both best-case scenarios and failure modes +- Handle errors appropriately + - errors MUST be handled, not ignored +- Leave `CONSIDER(name):` comments for future design considerations +- Regenerate code when interface definitions change +- Always include `-tags test_dep` when running tests +- Include the `integration` tag only for integration tests +- Do not introduce new third party libraries unless specifically requested. + +## Error Handling: +- Check and handle all errors +- Use appropriate logging methods based on error severity + - Use `logger.Fatal` for core invariant violations + - Use `logger.DPanic` for issues that are important but should not crash production + +## Testing: +- Write tests for new functionality +- Run tests after altering code or tests +- Start with unit tests for fastest feedback +- Prefer `require` over `assert`, avoid testify suites in unit tests (functional tests require suites for test cluster setup), use `require.Eventually` instead of `time.Sleep` (forbidden by linter) +- For float comparisons in tests, use `InDelta` or `InEpsilon` instead of `Equal` (enforced by `testifylint`) +- For error assertions in testify suites, use `s.Require().NoError(err)` instead of `s.NoError(err)` (enforced by `testifylint`) + +# Primary Workflows +## Software Engineering Tasks +When requested to perform tasks like fixing bugs, adding features, refactoring, or explaining code, follow this sequence: +1. **Understand:** Think about the user's request and the relevant codebase context. +2. **Plan:** Build a coherent and grounded (based on the understanding in step 1) plan for how you intend to resolve the user's task. Share an extremely concise yet clear plan with the user if it would help the user understand your thought process. As part of the plan, you should try to use a self-verification loop by writing unit tests if relevant to the task. Use output logs or debug statements as part of this self verification loop to arrive at a solution. +3. **Implement:** Use the available tools to act on the plan, strictly adhering to the project's established conventions (detailed under 'Core Mandates'). +4. **Regenerate:** If necessary, regenerate code based on your changes. If you alter anything annotated with `//go:generate` or in a `.proto` file you will need to do this. +5. **Verify (Tests):** If applicable and feasible, verify the changes using the project's testing procedures. Identify the correct test commands and frameworks by examining 'README' files, build/package configuration (e.g., 'Makefile'), or existing test execution patterns. NEVER assume standard test commands. +6. **Verify (Standards):** VERY IMPORTANT: After making code changes, execute the project-specific build, linting and type-checking commands (`make lint-code`) + +## Planning +When planning (under 'Software Engineering Tasks'): +1. Break down the feature into smaller, manageable tasks. +2. Consider potential challenges for each task and how to address them. +3. Provide a high-level outline of the code structure, including function names and their purposes. +4. List specific test cases you plan to implement. +5. State which error handling approaches you will use for different scenarios. +6. Discuss the trade-offs inherent in your design decisions, including: + a. Performance trade-offs + b. Scalability trade-offs + c. Complexity trade-offs + d. Security trade-offs +7. Reason about the failure modes of your design. How does it handle crashes? A 10x increase in load? diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a10ca9129bb..e6559886d3b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,40 +1,59 @@ # Develop Temporal Server + This doc is for contributors to Temporal Server (hopefully that's you!) -**Note:** All contributors also need to fill out the [Temporal Contributor License Agreement](docs/development/temporal-cla.md) before we can merge in any of your changes. +**Note:** All contributors also need to fill out the [Temporal Contributor License Agreement](./docs/development/temporal-cla.md) before we can merge in any of your changes. ## Prerequisites -### Build prerequisites -* [Go Lang](https://golang.org/) (minimum version required is 1.19): +### Build prerequisites + +- [Go Lang](https://go.dev/) (minimum version required listed in `go.mod` [file](go.mod)): - Install on macOS with `brew install go`. - Install on Ubuntu with `sudo apt install golang`. -* [Protocol buffers compiler](https://github.com/protocolbuffers/protobuf/) (only if you are going to change `proto` files): +- [Protocol buffers compiler](https://github.com/protocolbuffers/protobuf/) (only if you are going to change `proto` files): - Install on macOS with `brew install protobuf`. - Download all other versions from [protoc release page](https://github.com/protocolbuffers/protobuf/releases). -* [Temporal CLI](https://github.com/temporalio/cli) +- [Temporal CLI](https://github.com/temporalio/cli) - Homebrew `brew install temporal` - - Go install `make update-cli` - Or download it from here https://github.com/temporalio/cli - ### Runtime (server and tests) prerequisites -* [docker](https://docs.docker.com/engine/install/) + +- [docker](https://docs.docker.com/engine/install/) > Note: it is possible to run Temporal server without a `docker`. If for some reason (for example, performance on macOS) -> you want to run dependencies on the host OS, please follow the [doc](docs/development/run-dependencies-host.md). +> you want to run dependencies on the host OS, please follow the [doc](./docs/development/run-dependencies-host.md). + +- Runtime dependencies are optional support services that can be helpful during development and testing, providing: 1) UI, 2) +databases, and 3) metrics services via `docker compose`. By default, the server utilizes SQLite as an in-memory +database, so the runtime dependencies are optional. To start dependencies, open new terminal window and run: + +```bash +make start-dependencies +``` + +To stop the dependencies: +```bash +make stop-dependencies +``` ### For Windows developers + For developing on Windows, install [Windows Subsystem for Linux 2 (WSL2)](https://aka.ms/wsl) and [Ubuntu](https://docs.microsoft.com/en-us/windows/wsl/install-win10#step-6---install-your-linux-distribution-of-choice). After that, follow the guidance for installing prerequisites, building, and testing on Ubuntu. ## Check out the code + Temporal uses go modules, there is no dependency on `$GOPATH` variable. Clone the repo into the preferred location: + ```bash git clone https://github.com/temporalio/temporal.git ``` ## Build -For the very first time build `temporal-server` and helper tools with simple `make` command: + +For the very first time build `temporal-server` and helper tools with simple `make` command: + ```bash make ``` @@ -42,6 +61,7 @@ make It will install all other build dependencies and build the binaries. Further you can build binaries without running tests with: + ```bash make bins ``` @@ -49,85 +69,144 @@ make bins Please check the top of our [Makefile](Makefile) for other useful build targets. ## Run tests + We defined three categories of tests. -* Unit test: Those tests should not have dependencies other than the test target and go mock. We should have unit test coverage as much as possible. -* Integration test: Those tests cover the integration between the server and the dependencies (Cassandra, SQL, ES etc.). -* Functional test: Those tests cover the E2E functionality of Temporal server. They are all under ./tests directory. -Integration and functional tests require runtime dependencies. They can be run with `start-dependencies` target (uses `docker compose` internally). Open new terminal window and run: -```bash -make start-dependencies -``` +- Unit test: Those tests should not have dependencies other than the test target and go mock. We should have unit test coverage as much as possible. +- Integration test: Those tests cover the integration between the server and the dependencies (Cassandra, SQL, ES etc.). +- Functional test: Those tests cover the E2E functionality of Temporal server. They are all under ./tests directory. -Before testing on macOS, make sure you increase the file handle limit: -```bash -ulimit -n 8192 -``` +Integration and functional tests require [runtime dependencies](#runtime-server-and-tests-prerequisites), +when running with a persistence option that is not SQLite. If running unit tests, no need to start the dependencies. Run unit tests: + ```bash make unit-test ``` Run all integration tests: + ```bash make integration-test ``` Run all functional tests: + ```bash make functional-test ``` Or run all the tests at once: + ```bash make test ``` You can also run a single test: + ```bash go test -v -run -testify.m ``` + for example: + ```bash go test -v github.com/temporalio/temporal/common/persistence -run TestCassandraPersistenceSuite -testify.m TestPersistenceStartWorkflow ``` When you are done, don't forget to stop `docker compose` (with `Ctrl+C`) and clean up all dependencies: + ```bash make stop-dependencies ``` ## Run Temporal Server locally -First start runtime dependencies. They can be run with `start-dependencies` target (uses `docker compose` internally). Open new terminal window and run: -```bash -make start-dependencies -``` -then run the server: +First, start the optional [runtime dependencies](#runtime-server-and-tests-prerequisites) if needed for the desired persistence option. + +Then run the server: + ```bash make start ``` -This will start the server using SQLite as database. If you want to run with Cassandra and Elasticsearch, then run these commands: +This will start the server using SQLite as an in-memory database. You can choose other databases as well. + +If you want to run with Cassandra and Elasticsearch, then run these commands: + ```bash make install-schema-cass-es make start-cass-es ``` -Now you can create default namespace with Temporal CLI: +To run with SQLite with a persisted file: + ```bash -temporal operator namespace create default +make start-sqlite-file ``` -and run samples from [Go](https://github.com/temporalio/samples-go) and [Java](https://github.com/temporalio/samples-java) samples repos. Also, you can access web UI at `localhost:8080`. -When you are done, press `Ctrl+C` to stop the server. Don't forget to stop dependencies (with `Ctrl+C`) and clean up resources: +To run with Postgres: +```bash +make install-schema-postgresql +make start-postgres +``` + +To run with MySQL: +```bash +make install-schema-mysql +make start-mysql +``` + +Now you can create a namespace with the Temporal CLI (While you can select any name for a namespace, we reccomend using `default` while learning, because a number of samples assume there is a namespace named `default`): + +```bash +temporal operator namespace create -n default +``` + +and run samples from the samples repos ([Go](https://github.com/temporalio/samples-go) | [Java](https://github.com/temporalio/samples-java) | [TypeScript](https://github.com/temporalio/samples-typescript) | [.NET](https://github.com/temporalio/samples-dotnet) | [Python](https://github.com/temporalio/samples-python) | [Ruby](https://github.com/temporalio/samples-ruby)). If you are new to Temporal, helloworld ([Go](https://github.com/temporalio/samples-go/tree/main/helloworld) | [Java](https://github.com/temporalio/samples-java/tree/main/core/src/main/java/io/temporal/samples/hello) | [TypeScript](https://github.com/temporalio/samples-typescript/tree/main/hello-world) | [.NET](https://github.com/temporalio/sdk-dotnet?tab=readme-ov-file#implementing-a-workflow-and-activity) | [Python](https://github.com/temporalio/samples-python/tree/main/hello) | [Ruby](https://github.com/temporalio/sdk-ruby?tab=readme-ov-file#implementing-a-workflow-and-activity)) is a very good sample to start with. Also, if you have started the runtime dependencies, you can access the web UI at `localhost:8080` which is a good way to visualize work done by the server and deepen your knowledge of Temporal. + +When you are done, press `Ctrl+C` to stop the server. + +If you started [runtime dependencies](#runtime-server-and-tests-prerequisites), don't forget to stop dependencies +(with `Ctrl+C`) and clean up resources: + ```bash make stop-dependencies ``` -## Working with pending API changes -If you need to make changes to the gRPC definitions while also working on code in this repo, do the following: +See the [developer documentation on testing](./docs/development/testing.md) to learn more about writing tests. + +## Debugging with the IDE + +### GoLand + +For general instructions, see [GoLand Debugging](https://www.jetbrains.com/help/go/debugging-code.html). + +First, start the optional [runtime dependencies](#runtime-server-and-tests-prerequisites) if needed for the desired persistence option. + +To run the server, ensure the Run Type is package. In "Package path", enter `go.temporal.io/server/cmd/server`. +In the "Program arguments" field, add the following: + +``` +--env --allow-no-auth start +``` + +For example, to run with Postgres: +``` +--env development-postgres12 --allow-no-auth start +``` + +See Makefile for other environments. + +## Working with merged API changes + +gRPC / protobuf changes merged to the [api](https://github.com/temporalio/api) repo automatically trigger a commit in [api-go](https://github.com/temporalio/api-go). +To bring such changes into your feature branch, use `make update-go-api`. + +## Working with local API changes + +If you need to make changes to the gRPC / protobuf definitions while also working on code in this repo, do the following: 1. Checkout [api](https://github.com/temporalio/api), [api-go](https://github.com/temporalio/api-go), and [sdk-go](https://github.com/temporalio/sdk-go) 2. Make your changes to `api`, commit to a branch. @@ -140,60 +219,44 @@ If you need to make changes to the gRPC definitions while also working on code i git submodule update --remote proto/api ``` 3. Compile protos: `make proto` -4. In your copy of `sdk-go`: - 1. Point `go.mod` at local `api-go`: - ``` - replace ( - go.temporal.io/api => ../api-go - ) - ``` - 2. Compile & fix errors: `make bins` +4. (Optional, if SDK changes are required:) In your copy of `sdk-go`: + 1. Point `go.mod` at local `api-go`: + ``` + replace ( + go.temporal.io/api => ../api-go + ) + ``` + 2. Compile & fix errors: `make bins` 5. In this repo: - 1. Initialize submodules: `git submodule update --init --recursive` - 2. Point api submodule at your branch. If you make more commits to the api repo, run the last command again. - ```bash - git submodule set-url proto/api ../api - git submodule set-branch --branch mystuff proto/api - git submodule update --remote proto/api - ``` - 3. Stage the change: `git add -u` (otherwise makefile will blow it away) - 4. Point `go.mod` at local `api-go` and `sdk-go`: - ``` - replace ( - go.temporal.io/api => ../api-go - go.temporal.io/sdk => ../sdk-go - ) - ``` - 5. Build & fix errors: `make proto && make bins` - -## Licence headers -This project is Open Source Software, and requires a header at the beginning of -all source files. To verify that all files contain the header execute: -```bash -make copyright -``` + 1. Point `go.mod` at local `api-go` and `sdk-go`: + ``` + replace ( + go.temporal.io/api => ../api-go + go.temporal.io/sdk => ../sdk-go + ) + ``` + 2. Build & fix errors: + ``` + make proto + make go-generate + make bins + ``` ## Commit Messages And Titles of Pull Requests + Overcommit adds some requirements to your commit messages. At Temporal, we follow the [Chris Beams](http://chris.beams.io/posts/git-commit/) guide to writing git commit messages. Read it, follow it, learn it, love it. -All commit messages are from the titles of your pull requests. So make sure follow the rules when titling them. -Please don't use very generic titles like "bug fixes". +All commit messages are from the titles of your pull requests. So make sure follow the rules when titling them. +Please don't use very generic titles like "bug fixes". All PR titles should start with Upper case and have no dot at the end. -## Go build and run tags +## Go version update -Prior to Server version v1.23.0 our protobuf code generator allowed invalid UTF-8 data to be stored as proto strings. This isn't actually allowed by the proto3 spec, so we need to specify `-tags protolegacy` when building against the server. Our Makefile does this, but if you're using temporal as a library you'll need to enable that yourself. - -Example: - -``` shell -$ go build -tags protolegacy ./cmd/server -``` - -If you see an error like `grpc: error unmarshalling request: string field contains invalid UTF-8` then you've forgotten to specify this flag. +To update the Go version, update the `go` directive in `go.mod`. CI workflows automatically pick up the version from `go.mod`. ## License + MIT License, please see [LICENSE](LICENSE) for details. diff --git a/LICENSE b/LICENSE index 20a609ec802..3349f76795f 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License -Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. +Copyright (c) 2025 Temporal Technologies Inc. All rights reserved. Copyright (c) 2020 Uber Technologies, Inc. diff --git a/Makefile b/Makefile index 74024482e5b..5a3e1851d8e 100644 --- a/Makefile +++ b/Makefile @@ -1,37 +1,41 @@ ############################# Main targets ############################# # Install all tools and builds binaries. -install: update-tools bins +install: bins # Rebuild binaries (used by Dockerfile). -bins: temporal-server temporal-cassandra-tool temporal-sql-tool tdbg +bins: temporal-server temporal-cassandra-tool temporal-sql-tool temporal-elasticsearch-tool tdbg # Install all tools, recompile proto files, run all possible checks and tests (long but comprehensive). -all: update-tools clean proto bins check test - -# Used in CI -ci-build-misc: print-go-version ci-update-tools proto bins shell-check copyright-check go-generate gomodtidy ensure-no-changes +all: clean proto bins check test + +# Used in CI. +ci-build-misc: \ + print-go-version \ + clean-tools \ + proto \ + go-generate \ + buf-breaking \ + shell-check \ + goimports \ + gomodtidy \ + ensure-no-changes # Delete all build artifacts -clean: clean-bins clean-test-results +clean: clean-bins clean-tools clean-test-output # Recompile proto files. -proto: clean-proto buf-lint api-linter protoc service-clients goimports-proto proto-mocks copyright-proto - -# Update proto submodule from remote and recompile proto files. -update-proto: update-proto-submodule proto gomodtidy +proto: lint-protos lint-api protoc proto-codegen ######################################################################## -.PHONY: proto proto-mocks protoc +.PHONY: proto protoc install bins ci-build-misc clean ##### Arguments ###### - GOOS ?= $(shell go env GOOS) GOARCH ?= $(shell go env GOARCH) GOPATH ?= $(shell go env GOPATH) # Disable cgo by default. CGO_ENABLED ?= 0 -TEST_ARGS ?= -race PERSISTENCE_TYPE ?= nosql PERSISTENCE_DRIVER ?= cassandra @@ -40,18 +44,51 @@ PERSISTENCE_DRIVER ?= cassandra TEMPORAL_DB ?= temporal VISIBILITY_DB ?= temporal_visibility -# Always use "protolegacy" tag to allow disabling utf-8 validation on proto messages -# during proto library transition. -ALL_BUILD_TAGS := protolegacy,$(BUILD_TAG) -ALL_TEST_TAGS := $(ALL_BUILD_TAGS),$(TEST_TAG) +# The `disable_grpc_modules` build tag excludes gRPC dependencies from cloud.google.com/go/storage, +# reducing binary size by 16MB since we only use the REST client (storage.NewClient), not the +# gRPC client (storage.NewGRPCClient). Related issue: https://github.com/googleapis/google-cloud-go/issues/12343 +ALL_BUILD_TAGS := disable_grpc_modules,$(BUILD_TAG) +ALL_TEST_TAGS := $(ALL_BUILD_TAGS),test_dep,$(TEST_TAG) BUILD_TAG_FLAG := -tags $(ALL_BUILD_TAGS) TEST_TAG_FLAG := -tags $(ALL_TEST_TAGS) +# 20 minutes is the upper bound defined for all tests. (Tests in CI take up to about 14:30 now) +# If you change this, also change .github/workflows/run-tests.yml! +# The timeout in the GH workflow must be larger than this to avoid GH timing out the action, +# which causes the a job run to not produce any logs and hurts the debugging experience. +TEST_TIMEOUT ?= 35m + +# Number of retries for *-coverage targets. +MAX_TEST_ATTEMPTS ?= 3 + +# Whether or not to test with the race detector. All of (1 on y yes t true) are true values. +TEST_RACE_FLAG ?= on +# Whether or not to shuffle tests. All of (1 on y yes t true) are true values. +TEST_SHUFFLE_FLAG ?= on +# Common test args used in the various test suite targets. +COMPILED_TEST_ARGS := -timeout=$(TEST_TIMEOUT) \ + $(if $(filter 1 on y yes t true, $(TEST_RACE_FLAG)),-race,) \ + $(if $(filter 1 on y yes t true, $(TEST_SHUFFLE_FLAG)),-shuffle on,) \ + $(TEST_PARALLEL_FLAGS) \ + $(TEST_ARGS) \ + $(TEST_TAG_FLAG) ##### Variables ###### -GOBIN := $(if $(shell go env GOBIN),$(shell go env GOBIN),$(GOPATH)/bin) -PATH := $(GOBIN):$(PATH) +ROOT := $(shell git rev-parse --show-toplevel) +LOCALBIN := .bin +STAMPDIR := .stamp +export PATH := $(ROOT)/$(LOCALBIN):$(PATH) +GOINSTALL := GOBIN=$(ROOT)/$(LOCALBIN) go install + +OTEL ?= false +ifeq ($(OTEL),true) + export OTEL_BSP_SCHEDULE_DELAY=100 # in ms + export OTEL_EXPORTER_OTLP_TRACES_INSECURE=true + export OTEL_TRACES_EXPORTER=otlp + export TEMPORAL_OTEL_DEBUG=true + export TEMPORAL_TEST_DATA_ENCODING=json +endif MODULE_ROOT := $(lastword $(shell grep -e "^module " go.mod)) COLOR := "\e[1;36m%s\e[0m\n" @@ -62,17 +99,16 @@ define NEWLINE endef -TEST_TIMEOUT := 30m - - PROTO_ROOT := proto PROTO_FILES = $(shell find ./$(PROTO_ROOT)/internal -name "*.proto") +CHASM_PROTO_FILES = $(shell find ./chasm/lib -name "*.proto") PROTO_DIRS = $(sort $(dir $(PROTO_FILES))) -PROTO_IMPORTS = -I=$(PROTO_ROOT)/internal -I=$(PROTO_ROOT)/api -I=$(PROTO_ROOT)/dependencies -PROTO_OPTS = paths=source_relative:$(PROTO_OUT) +API_BINPB := $(PROTO_ROOT)/api.binpb +# Note: If you change the value of INTERNAL_BINPB, you'll have to add logic to +# develop/buf-breaking.sh to handle the old and new values at once. +INTERNAL_BINPB := $(PROTO_ROOT)/image.bin +CHASM_BINPB := $(PROTO_ROOT)/chasm.bin PROTO_OUT := api -PROTO_ENUMS := $(shell grep -R '^enum ' $(PROTO_ROOT) | cut -d ' ' -f2) -PROTO_PATHS = paths=source_relative:$(PROTO_OUT) ALL_SRC := $(shell find . -name "*.go") ALL_SRC += go.mod @@ -80,38 +116,32 @@ ALL_SCRIPTS := $(shell find . -name "*.sh") MAIN_BRANCH := main +# If you update these dirs, please also update in CategoryDirs find_altered_tests.go TEST_DIRS := $(sort $(dir $(filter %_test.go,$(ALL_SRC)))) FUNCTIONAL_TEST_ROOT := ./tests FUNCTIONAL_TEST_XDC_ROOT := ./tests/xdc FUNCTIONAL_TEST_NDC_ROOT := ./tests/ndc +MIXED_BRAIN_TEST_ROOT := ./tests/mixedbrain DB_INTEGRATION_TEST_ROOT := ./common/persistence/tests DB_TOOL_INTEGRATION_TEST_ROOT := ./tools/tests -INTEGRATION_TEST_DIRS := $(DB_INTEGRATION_TEST_ROOT) $(DB_TOOL_INTEGRATION_TEST_ROOT) ./temporaltest ./internal/temporalite -UNIT_TEST_DIRS := $(filter-out $(FUNCTIONAL_TEST_ROOT)% $(FUNCTIONAL_TEST_XDC_ROOT)% $(FUNCTIONAL_TEST_NDC_ROOT)% $(DB_INTEGRATION_TEST_ROOT)% $(DB_TOOL_INTEGRATION_TEST_ROOT)% ./temporaltest% ./internal/temporalite%,$(TEST_DIRS)) +INTEGRATION_TEST_DIRS := $(DB_INTEGRATION_TEST_ROOT) $(DB_TOOL_INTEGRATION_TEST_ROOT) ./temporaltest +ifeq ($(UNIT_TEST_DIRS),) +UNIT_TEST_DIRS := $(filter-out $(FUNCTIONAL_TEST_ROOT)% $(FUNCTIONAL_TEST_XDC_ROOT)% $(FUNCTIONAL_TEST_NDC_ROOT)% $(MIXED_BRAIN_TEST_ROOT)% $(DB_INTEGRATION_TEST_ROOT)% $(DB_TOOL_INTEGRATION_TEST_ROOT)% ./temporaltest%,$(TEST_DIRS)) +endif +SYSTEM_WORKFLOWS_ROOT := ./service/worker -# github.com/urfave/cli/v2@v2.4.0 - needs to accept comma in values before unlocking https://github.com/urfave/cli/pull/1241. PINNED_DEPENDENCIES := \ - github.com/go-sql-driver/mysql@v1.5.0 \ - github.com/urfave/cli/v2@v2.4.0 # Code coverage & test report output files. TEST_OUTPUT_ROOT := ./.testoutput -NEW_COVER_PROFILE = $(TEST_OUTPUT_ROOT)/$(shell xxd -p -l 16 /dev/urandom).cover.out # generates a new filename each time it's substituted -SUMMARY_COVER_PROFILE := $(TEST_OUTPUT_ROOT)/summary.cover.out -NEW_REPORT = $(TEST_OUTPUT_ROOT)/$(shell xxd -p -l 16 /dev/urandom).junit.xml # generates a new filename each time it's substituted +NEW_COVER_PROFILE = $(TEST_OUTPUT_ROOT)/coverage.$(shell xxd -p -l 16 /dev/urandom).out # generates a new filename each time it's substituted +NEW_REPORT = $(TEST_OUTPUT_ROOT)/junit.$(shell xxd -p -l 16 /dev/urandom).xml # generates a new filename each time it's substituted +COVERPKG_FLAG = -coverpkg=./... # DB SQL_USER ?= temporal SQL_PASSWORD ?= temporal -# Need the following option to have integration and functional tests count towards coverage. godoc below: -# -coverpkg pkg1,pkg2,pkg3 -# Apply coverage analysis in each test to the given list of packages. -# The default is for each test to analyze only the package being tested. -# Packages are specified as import paths. -INTEGRATION_TEST_COVERPKG := -coverpkg="$(MODULE_ROOT)/common/persistence/..." -FUNCTIONAL_TEST_COVERPKG := -coverpkg="$(MODULE_ROOT)/client/...,$(MODULE_ROOT)/common/...,$(MODULE_ROOT)/service/...,$(MODULE_ROOT)/temporal/..." - # Only prints output if the exit code is non-zero define silent_exec @output=$$($(1) 2>&1); \ @@ -126,111 +156,201 @@ endef print-go-version: @go version -update-goimports: - @printf $(COLOR) "Install/update goimports..." - @go install golang.org/x/tools/cmd/goimports@latest - -update-linters: - @printf $(COLOR) "Install/update linters..." - @go install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.53.3 - -update-mockgen: - @printf $(COLOR) "Install/update mockgen tool..." - @go install github.com/golang/mock/mockgen@v1.7.0-rc.1 - -update-gotestsum: - @printf $(COLOR) "Install/update gotestsum..." - @go install gotest.tools/gotestsum@v1.11 - -update-proto-plugins: - @printf $(COLOR) "Install/update proto plugins..." - @go install google.golang.org/protobuf/cmd/protoc-gen-go@latest - @go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest - @go install -modfile build/go.mod go.temporal.io/api/cmd/protoc-gen-go-helpers - @go install -modfile build/go.mod go.temporal.io/api/cmd/protogen - -update-proto-linters: - @printf $(COLOR) "Install/update proto linters..." - @go install github.com/googleapis/api-linter/cmd/api-linter@v1.32.3 - @go install github.com/bufbuild/buf/cmd/buf@v1.6.0 - -update-tctl: - @printf $(COLOR) "Install/update tctl..." - @go install github.com/temporalio/tctl/cmd/tctl@latest - -update-cli: - @printf $(COLOR) "Install/update cli..." - curl -sSf https://temporal.download/cli.sh | sh - -update-ui: - @printf $(COLOR) "Install/update temporal ui-server..." - @go install github.com/temporalio/ui-server/cmd/server@latest - -update-tools: update-goimports update-linters update-mockgen update-proto-plugins update-proto-linters update-gotestsum - -# update-linters is not included because in CI linters are run by github actions. -ci-update-tools: update-goimports update-mockgen update-proto-plugins update-proto-linters update-gotestsum +clean-tools: + @printf $(COLOR) "Delete tools..." + @rm -rf $(STAMPDIR) + @rm -rf $(LOCALBIN) + +$(STAMPDIR): + @mkdir -p $(STAMPDIR) + +$(LOCALBIN): + @mkdir -p $(LOCALBIN) + +# When updating the version, update the golangci-lint GHA workflow as well. +.PHONY: golangci-lint +GOLANGCI_LINT_BASE_REV ?= $(MAIN_BRANCH) +GOLANGCI_LINT_FIX ?= true +GOLANGCI_LINT_VERSION := v2.9.0 +GOLANGCI_LINT := $(LOCALBIN)/golangci-lint-$(GOLANGCI_LINT_VERSION) +$(GOLANGCI_LINT): $(LOCALBIN) + $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/v2/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION)) + +# Don't get confused, there is a single linter called gci, which is a part of the mega linter we use is called golangci-lint. +GCI_VERSION := v0.13.6 +GCI := $(LOCALBIN)/gci-$(GCI_VERSION) +$(GCI): $(LOCALBIN) + $(call go-install-tool,$(GCI),github.com/daixiang0/gci,$(GCI_VERSION)) + +GOTESTSUM_VER := v1.12.3 +GOTESTSUM := $(LOCALBIN)/gotestsum-$(GOTESTSUM_VER) +$(GOTESTSUM): | $(LOCALBIN) + $(call go-install-tool,$(GOTESTSUM),gotest.tools/gotestsum,$(GOTESTSUM_VER)) + +API_LINTER_VER := v1.32.3 +API_LINTER := $(LOCALBIN)/api-linter-$(API_LINTER_VER) +$(API_LINTER): | $(LOCALBIN) + $(call go-install-tool,$(API_LINTER),github.com/googleapis/api-linter/cmd/api-linter,$(API_LINTER_VER)) + +BUF_VER := v1.6.0 +BUF := $(LOCALBIN)/buf-$(BUF_VER) +$(BUF): | $(LOCALBIN) + $(call go-install-tool,$(BUF),github.com/bufbuild/buf/cmd/buf,$(BUF_VER)) + +GO_API_VER = $(shell go list -m -f '{{.Version}}' go.temporal.io/api \ + || (echo "failed to fetch version for go.temporal.io/api" >&2)) +PROTOGEN := $(LOCALBIN)/protogen-$(GO_API_VER) +$(PROTOGEN): | $(LOCALBIN) + $(call go-install-tool,$(PROTOGEN),go.temporal.io/api/cmd/protogen,$(GO_API_VER)) + +ACTIONLINT_VER := v1.7.7 +ACTIONLINT := $(LOCALBIN)/actionlint-$(ACTIONLINT_VER) +$(ACTIONLINT): | $(LOCALBIN) + $(call go-install-tool,$(ACTIONLINT),github.com/rhysd/actionlint/cmd/actionlint,$(ACTIONLINT_VER)) + +WORKFLOWCHECK_VER := master # TODO: pin this specific version once 0.3.0 follow-up is released +WORKFLOWCHECK := $(LOCALBIN)/workflowcheck-$(WORKFLOWCHECK_VER) +$(WORKFLOWCHECK): | $(LOCALBIN) + $(call go-install-tool,$(WORKFLOWCHECK),go.temporal.io/sdk/contrib/tools/workflowcheck,$(WORKFLOWCHECK_VER)) + +YAMLFMT_VER := v0.16.0 +YAMLFMT := $(LOCALBIN)/yamlfmt-$(YAMLFMT_VER) +$(YAMLFMT): | $(LOCALBIN) + $(call go-install-tool,$(YAMLFMT),github.com/google/yamlfmt/cmd/yamlfmt,$(YAMLFMT_VER)) + +GOIMPORTS_VER := v0.36.0 +GOIMPORTS := $(LOCALBIN)/goimports-$(GOIMPORTS_VER) +$(STAMPDIR)/goimports-$(GOIMPORTS_VER): | $(STAMPDIR) $(LOCALBIN) + $(call go-install-tool,$(GOIMPORTS),golang.org/x/tools/cmd/goimports,$(GOIMPORTS_VER)) + @touch $@ +$(GOIMPORTS): $(STAMPDIR)/goimports-$(GOIMPORTS_VER) + +GOWRAP_VER := v1.4.3 +GOWRAP := $(LOCALBIN)/gowrap +$(STAMPDIR)/gowrap-$(GOWRAP_VER): | $(STAMPDIR) $(LOCALBIN) + $(call go-install-tool,$(GOWRAP),github.com/hexdigest/gowrap/cmd/gowrap,$(GOWRAP_VER)) + @touch $@ +$(GOWRAP): $(STAMPDIR)/gowrap-$(GOWRAP_VER) + +GOMAJOR_VER := v0.14.0 +GOMAJOR := $(LOCALBIN)/gomajor +$(STAMPDIR)/gomajor-$(GOMAJOR_VER): | $(STAMPDIR) $(LOCALBIN) + $(call go-install-tool,$(GOMAJOR),github.com/icholy/gomajor,$(GOMAJOR_VER)) + @touch $@ +$(GOMAJOR): $(STAMPDIR)/gomajor-$(GOMAJOR_VER) + +ERRORTYPE_VER := v0.0.7 +ERRORTYPE := $(LOCALBIN)/errortype +$(ERRORTYPE): | $(LOCALBIN) + $(call go-install-tool,$(ERRORTYPE),fillmore-labs.com/errortype,$(ERRORTYPE_VER)) + +# Mockgen is called by name throughout the codebase, so we need to keep the binary name consistent +MOCKGEN_VER := v0.6.0 +MOCKGEN := $(LOCALBIN)/mockgen +$(STAMPDIR)/mockgen-$(MOCKGEN_VER): | $(STAMPDIR) $(LOCALBIN) + $(call go-install-tool,$(MOCKGEN),go.uber.org/mock/mockgen,$(MOCKGEN_VER)) + @touch $@ +$(MOCKGEN): $(STAMPDIR)/mockgen-$(MOCKGEN_VER) + +STRINGER_VER := v0.36.0 +STRINGER := $(LOCALBIN)/stringer +$(STAMPDIR)/stringer-$(STRINGER_VER): | $(STAMPDIR) $(LOCALBIN) + $(call go-install-tool,$(STRINGER),golang.org/x/tools/cmd/stringer,$(STRINGER_VER)) + @touch $@ +$(STRINGER): $(STAMPDIR)/stringer-$(STRINGER_VER) + +PROTOC_GEN_GO_VER := v1.36.6 +PROTOC_GEN_GO := $(LOCALBIN)/protoc-gen-go-$(PROTOC_GEN_GO_VER) +$(STAMPDIR)/protoc-gen-go-$(PROTOC_GEN_GO_VER): | $(STAMPDIR) $(LOCALBIN) + $(call go-install-tool,$(PROTOC_GEN_GO),google.golang.org/protobuf/cmd/protoc-gen-go,$(PROTOC_GEN_GO_VER)) + @touch $@ +$(PROTOC_GEN_GO): $(STAMPDIR)/protoc-gen-go-$(PROTOC_GEN_GO_VER) + +PROTOC_GEN_GO_GRPC_VER := v1.3.0 +PROTOC_GEN_GO_GRPC := $(LOCALBIN)/protoc-gen-go-grpc-$(PROTOC_GEN_GO_GRPC_VER) +$(STAMPDIR)/protoc-gen-go-grpc-$(PROTOC_GEN_GO_GRPC_VER): | $(STAMPDIR) $(LOCALBIN) + $(call go-install-tool,$(PROTOC_GEN_GO_GRPC),google.golang.org/grpc/cmd/protoc-gen-go-grpc,$(PROTOC_GEN_GO_GRPC_VER)) + @touch $@ +$(PROTOC_GEN_GO_GRPC): $(STAMPDIR)/protoc-gen-go-grpc-$(PROTOC_GEN_GO_GRPC_VER) + +PROTOC_GEN_GO_HELPERS := $(LOCALBIN)/protoc-gen-go-helpers-$(GO_API_VER) +$(STAMPDIR)/protoc-gen-go-helpers-$(GO_API_VER): | $(STAMPDIR) $(LOCALBIN) + $(call go-install-tool,$(PROTOC_GEN_GO_HELPERS),go.temporal.io/api/cmd/protoc-gen-go-helpers,$(GO_API_VER)) + @touch $@ +$(PROTOC_GEN_GO_HELPERS): $(STAMPDIR)/protoc-gen-go-helpers-$(GO_API_VER) + +$(LOCALBIN)/protoc-gen-go-chasm: $(LOCALBIN) cmd/tools/protoc-gen-go-chasm/main.go go.mod go.sum + @go build -o $@ ./cmd/tools/protoc-gen-go-chasm + +# go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist +# $1 - target path with name of binary (ideally with version) +# $2 - package url which can be installed +# $3 - specific version of package +# This is courtesy of https://github.com/kubernetes-sigs/kubebuilder/pull/3718 +define go-install-tool +@[ -f $(1) ] || { \ +set -e; \ +package=$(2)@$(3) ;\ +printf $(COLOR) "Downloading $${package}" ;\ +tmpdir=$$(mktemp -d) ;\ +GOBIN=$${tmpdir} go install $${package} ;\ +mv $${tmpdir}/$$(basename "$$(echo "$(1)" | sed "s/-$(3)$$//")") $(1) ;\ +rm -rf $${tmpdir} ;\ +} +endef ##### Proto ##### -$(PROTO_OUT): - @mkdir -p $(PROTO_OUT) - -clean-proto: - @rm -rf $(PROTO_OUT)/* - -update-proto-submodule: - @printf $(COLOR) "Update proto submodule from remote..." - git submodule update --force --remote $(PROTO_ROOT)/api - -install-proto-submodule: - @printf $(COLOR) "Install proto submodule..." - git submodule update --init $(PROTO_ROOT)/api - -protoc: clean-proto $(PROTO_OUT) - @protogen \ - -I=proto/api \ - -I=proto/dependencies \ - --root=proto/internal \ - --rewrite-enum=BuildId_State:BuildId \ - -p go-grpc_out=$(PROTO_PATHS) \ - -p go-helpers_out=$(PROTO_PATHS) - @mv -f "$(PROTO_OUT)/temporal/server/api/"* "$(PROTO_OUT)" - -# All gRPC generated service files paths relative to PROTO_OUT. -PROTO_GRPC_SERVICES = $(patsubst $(PROTO_OUT)/%,%,$(shell find $(PROTO_OUT) -name "service.pb.go" -o -name "service_grpc.pb.go")) -service_name = $(firstword $(subst /, ,$(1))) -mock_file_name = $(call service_name,$(1))mock/$(subst $(call service_name,$(1))/,,$(1:go=mock.go)) - -proto-mocks: protoc - @printf $(COLOR) "Generate proto mocks..." - $(foreach PROTO_GRPC_SERVICE,$(PROTO_GRPC_SERVICES),\ - @cd $(PROTO_OUT) && \ - mockgen -copyright_file ../LICENSE -package $(call service_name,$(PROTO_GRPC_SERVICE))mock -source $(PROTO_GRPC_SERVICE) -destination $(call mock_file_name,$(PROTO_GRPC_SERVICE)) \ - $(NEWLINE)) - -service-clients: +$(API_BINPB): go.mod go.sum $(PROTO_FILES) + @printf $(COLOR) "Generating proto dependencies image..." + @./cmd/tools/getproto/run.sh --out $@ + +$(INTERNAL_BINPB): $(API_BINPB) $(PROTO_FILES) + @printf $(COLOR) "Generate proto image..." + @protoc --descriptor_set_in=$(API_BINPB) -I=$(PROTO_ROOT)/internal $(PROTO_FILES) -o $@ + +$(CHASM_BINPB): $(API_BINPB) $(INTERNAL_BINPB) $(CHASM_PROTO_FILES) + @printf $(COLOR) "Generate CHASM proto image..." + @protoc --descriptor_set_in=$(API_BINPB):$(INTERNAL_BINPB) -I=. $(CHASM_PROTO_FILES) -o $@ + +protoc: $(PROTOGEN) $(MOCKGEN) $(GOIMPORTS) $(PROTOC_GEN_GO) $(PROTOC_GEN_GO_GRPC) $(PROTOC_GEN_GO_HELPERS) $(API_BINPB) $(LOCALBIN)/protoc-gen-go-chasm + @go run ./cmd/tools/protogen \ + -root=$(ROOT) \ + -proto-out=$(PROTO_OUT) \ + -proto-root=$(PROTO_ROOT) \ + -api-binpb=$(API_BINPB) \ + -protogen-bin=$(PROTOGEN) \ + -goimports-bin=$(GOIMPORTS) \ + -mockgen-bin=$(MOCKGEN) \ + -protoc-gen-go-chasm-bin=$(LOCALBIN)/protoc-gen-go-chasm \ + -protoc-gen-go-bin=$(PROTOC_GEN_GO) \ + -protoc-gen-go-grpc-bin=$(PROTOC_GEN_GO_GRPC) \ + -protoc-gen-go-helpers-bin=$(PROTOC_GEN_GO_HELPERS) \ + $(PROTO_DIRS) + +proto-codegen: @printf $(COLOR) "Generate service clients..." - @go generate ./client/... + @go generate -run genrpcwrappers ./client/... + @printf $(COLOR) "Generate server interceptors..." + @go generate ./common/rpc/interceptor/logtags/... + @printf $(COLOR) "Generate routing key extractor..." + @go generate -run genroutingkeyextractor ./common/rpc/interceptor/... + @printf $(COLOR) "Generate search attributes helpers..." + @go generate -run gensearchattributehelpers ./common/searchattribute/... update-go-api: @printf $(COLOR) "Update go.temporal.io/api@master..." @go get -u go.temporal.io/api@master -goimports-proto: - @printf $(COLOR) "Run goimports for proto files..." - @goimports -w $(PROTO_OUT) - -copyright-proto: - @printf $(COLOR) "Update license headers for proto files..." - @go run ./cmd/tools/copyright/licensegen.go --scanDir $(PROTO_OUT) - ##### Binaries ##### clean-bins: @printf $(COLOR) "Delete old binaries..." @rm -f temporal-server + @rm -f temporal-server-debug @rm -f temporal-cassandra-tool @rm -f tdbg + @rm -f fairsim @rm -f temporal-sql-tool + @rm -f temporal-elasticsearch-tool temporal-server: $(ALL_SRC) @printf $(COLOR) "Build temporal-server with CGO_ENABLED=$(CGO_ENABLED) for $(GOOS)/$(GOARCH)..." @@ -240,6 +360,10 @@ tdbg: $(ALL_SRC) @printf $(COLOR) "Build tdbg with CGO_ENABLED=$(CGO_ENABLED) for $(GOOS)/$(GOARCH)..." CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_TAG_FLAG) -o tdbg ./cmd/tools/tdbg +fairsim: $(ALL_SRC) + @printf $(COLOR) "Build fairsim with CGO_ENABLED=$(CGO_ENABLED) for $(GOOS)/$(GOARCH)..." + CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_TAG_FLAG) -o fairsim ./cmd/tools/fairsim + temporal-cassandra-tool: $(ALL_SRC) @printf $(COLOR) "Build temporal-cassandra-tool with CGO_ENABLED=$(CGO_ENABLED) for $(GOOS)/$(GOARCH)..." CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_TAG_FLAG) -o temporal-cassandra-tool ./cmd/tools/cassandra @@ -248,141 +372,196 @@ temporal-sql-tool: $(ALL_SRC) @printf $(COLOR) "Build temporal-sql-tool with CGO_ENABLED=$(CGO_ENABLED) for $(GOOS)/$(GOARCH)..." CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_TAG_FLAG) -o temporal-sql-tool ./cmd/tools/sql +temporal-elasticsearch-tool: $(ALL_SRC) + @printf $(COLOR) "Build temporal-elasticsearch-tool with CGO_ENABLED=$(CGO_ENABLED) for $(GOOS)/$(GOARCH)..." + CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_TAG_FLAG) -o temporal-elasticsearch-tool ./cmd/tools/elasticsearch + temporal-server-debug: $(ALL_SRC) @printf $(COLOR) "Build temporal-server-debug with CGO_ENABLED=$(CGO_ENABLED) for $(GOOS)/$(GOARCH)..." CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_TAG_FLAG),TEMPORAL_DEBUG -o temporal-server-debug ./cmd/server ##### Checks ##### -copyright-check: - @printf $(COLOR) "Check license header..." - @go run ./cmd/tools/copyright/licensegen.go --verifyOnly - -copyright: - @printf $(COLOR) "Fix license header..." - @go run ./cmd/tools/copyright/licensegen.go - -goimports: MERGE_BASE ?= $(shell test -d .git && git merge-base $(MAIN_BRANCH) HEAD) -goimports: MODIFIED_FILES := $(shell test -d .git && git diff --name-status $(MERGE_BASE) -- | cut -f2) -goimports: - @printf $(COLOR) "Run goimports for modified files..." - @printf "Merge base: $(MERGE_BASE)\n" - @printf "Modified files: $(MODIFIED_FILES)\n" - @goimports -w $(filter %.go, $(MODIFIED_FILES)) - -lint: - @printf $(COLOR) "Run linters..." - @golangci-lint run --verbose --timeout 10m --fix=true --new-from-rev=$(MAIN_BRANCH) --config=.golangci.yml - -api-linter: - @printf $(COLOR) "Run api-linter..." - $(call silent_exec, api-linter --set-exit-status $(PROTO_IMPORTS) --config=$(PROTO_ROOT)/api-linter.yaml $(PROTO_FILES)) - -buf-lint: - @printf $(COLOR) "Run buf linter..." - @(cd $(PROTO_ROOT) && buf lint) +goimports: fmt-imports $(GOIMPORTS) + @printf $(COLOR) "Run goimports for all files..." + @UNGENERATED_FILES=$$(find . -type f -name '*.go' -print0 | xargs -0 grep -L -e "Code generated by .* DO NOT EDIT." || true) && \ + $(GOIMPORTS) -w $$UNGENERATED_FILES -buf-build: - @printf $(COLOR) "Build image.bin with buf..." - @(cd $(PROTO_ROOT) && buf build -o image.bin) +lint: lint-code lint-actions lint-api lint-protos lint-yaml + @printf $(COLOR) "Run linters..." -buf-breaking: - @printf $(COLOR) "Run buf breaking changes check against image.bin..." - @(cd $(PROTO_ROOT) && buf check breaking --against image.bin) +lint-actions: $(ACTIONLINT) + @printf $(COLOR) "Linting GitHub actions..." + @$(ACTIONLINT) + +lint-code: $(GOLANGCI_LINT) $(ERRORTYPE) + @printf $(COLOR) "Linting code..." + @$(GOLANGCI_LINT) run --verbose --build-tags $(ALL_TEST_TAGS) --timeout 10m --fix=$(GOLANGCI_LINT_FIX) --new-from-rev=$(GOLANGCI_LINT_BASE_REV) --config=.github/.golangci.yml + @go vet -tags $(ALL_TEST_TAGS) -vettool="$(ERRORTYPE)" -style-check=false ./... + +lint-yaml: $(YAMLFMT) + @printf $(COLOR) "Checking YAML formatting..." + @$(YAMLFMT) -conf .github/.yamlfmt -lint . + +lint-api: $(API_LINTER) $(API_BINPB) + @printf $(COLOR) "Linting proto API..." + $(call silent_exec, $(API_LINTER) --set-exit-status -I=$(PROTO_ROOT)/internal --descriptor-set-in $(API_BINPB) --config=$(PROTO_ROOT)/api-linter.yaml $(PROTO_FILES)) + +lint-protos: $(BUF) $(INTERNAL_BINPB) $(CHASM_BINPB) + @printf $(COLOR) "Linting proto definitions..." + @$(BUF) lint $(INTERNAL_BINPB) + @$(BUF) lint --config chasm/lib/buf.yaml $(CHASM_BINPB) + +fmt: fmt-gofix fmt-imports fmt-protos fmt-yaml + +# Some fixes enable others (e.g. rangeint may expose minmax opportunities), +# so - as recommended by the Go team - we run go fix in a loop until it reaches +# a fixed point. We check for "files updated" in the output rather than relying +# on the exit code alone, since go fix can exit non-zero without actually +# modifying any files (see https://github.com/golang/go/issues/77482). +# Note: go fix automatically skips generated files. +GOFIX_FLAGS ?= -any -rangeint +GOFIX_MAX_ITERATIONS ?= 5 +fmt-gofix: + @printf $(COLOR) "Run go fix..." + @n=0; while [ $$n -lt $(GOFIX_MAX_ITERATIONS) ]; do \ + output=$$(go fix $(GOFIX_FLAGS) ./... 2>&1); \ + echo "$$output"; \ + if ! echo "$$output" | grep -q "files updated"; then break; fi; \ + n=$$((n + 1)); \ + printf $(COLOR) "Re-running go fix..."; \ + done; \ + if [ $$n -ge $(GOFIX_MAX_ITERATIONS) ]; then echo "ERROR: go fix did not converge after $(GOFIX_MAX_ITERATIONS) iterations"; exit 1; fi + +fmt-imports: $(GCI) # Don't get confused, there is a single linter called gci, which is a part of the mega linter we use is called golangci-lint. + @printf $(COLOR) "Formatting imports..." + @$(GCI) write --skip-generated -s standard -s default ./* + +parallelize-tests: + @printf $(COLOR) "Add t.Parallel() to tests..." + @go run ./cmd/tools/parallelize $(INTEGRATION_TEST_DIRS) + +fmt-protos: $(BUF) + @printf $(COLOR) "Formatting proto files..." + @$(BUF) format -w $(PROTO_ROOT)/internal + @$(BUF) format -w --config chasm/lib/buf.yaml chasm/lib + +fmt-yaml: $(YAMLFMT) + @printf $(COLOR) "Formatting YAML files..." + @$(YAMLFMT) -conf .github/.yamlfmt . + +# Edit proto/internal/buf.yaml to exclude specific files from this check. +# TODO: buf breaking check for CHASM protos. +buf-breaking: $(BUF) $(API_BINPB) $(INTERNAL_BINPB) + @printf $(COLOR) "Run buf breaking proto changes check..." + @env BUF=$(BUF) API_BINPB=$(API_BINPB) INTERNAL_BINPB=$(INTERNAL_BINPB) CHASM_BINPB=$(CHASM_BINPB) MAIN_BRANCH=$(MAIN_BRANCH) \ + ./develop/buf-breaking.sh shell-check: @printf $(COLOR) "Run shellcheck for script files..." @shellcheck $(ALL_SCRIPTS) -check: copyright-check lint shell-check +workflowcheck: $(WORKFLOWCHECK) + @printf $(COLOR) "Run workflowcheck for system workflows..." + for dir in $(SYSTEM_WORKFLOWS_ROOT)/*/ ; do \ + echo "Running workflowcheck on $$dir" ; \ + $(WORKFLOWCHECK) "$$dir" ; \ + done + +check: lint shell-check ##### Tests ##### -clean-test-results: - @rm -f test.log $(TEST_OUTPUT_ROOT)/* +clean-test-output: + @printf $(COLOR) "Delete test output..." + @rm -rf $(TEST_OUTPUT_ROOT) @go clean -testcache build-tests: @printf $(COLOR) "Build tests..." - @go test $(TEST_TAG_FLAG) -exec="true" -count=0 $(TEST_DIRS) + @CGO_ENABLED=$(CGO_ENABLED) go test $(TEST_TAG_FLAG) -exec="true" -count=0 $(TEST_DIRS) -unit-test: clean-test-results +unit-test: clean-test-output @printf $(COLOR) "Run unit tests..." - @go test $(UNIT_TEST_DIRS) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) $(TEST_ARGS) 2>&1 | tee -a test.log - @! grep -q "^--- FAIL" test.log + @CGO_ENABLED=$(CGO_ENABLED) go test $(UNIT_TEST_DIRS) $(COMPILED_TEST_ARGS) 2>&1 | tee -a test.log + @$(MAKE) verify-test-log -integration-test: clean-test-results +integration-test: clean-test-output @printf $(COLOR) "Run integration tests..." - @go test $(INTEGRATION_TEST_DIRS) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) $(TEST_ARGS) 2>&1 | tee -a test.log - @! grep -q "^--- FAIL" test.log + @CGO_ENABLED=$(CGO_ENABLED) go test $(INTEGRATION_TEST_DIRS) $(COMPILED_TEST_ARGS) 2>&1 | tee -a test.log + @$(MAKE) verify-test-log -functional-test: clean-test-results +functional-test: clean-test-output @printf $(COLOR) "Run functional tests..." - @go test $(FUNCTIONAL_TEST_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) $(TEST_ARGS) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log - @go test $(FUNCTIONAL_TEST_NDC_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) $(TEST_ARGS) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log -# Need to run xdc tests with race detector off because of ringpop bug causing data race issue. - @go test $(FUNCTIONAL_TEST_XDC_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log - @! grep -q "^--- FAIL" test.log + @CGO_ENABLED=$(CGO_ENABLED) go test $(FUNCTIONAL_TEST_ROOT) $(COMPILED_TEST_ARGS) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log + @CGO_ENABLED=$(CGO_ENABLED) go test $(FUNCTIONAL_TEST_NDC_ROOT) $(COMPILED_TEST_ARGS) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log + @CGO_ENABLED=$(CGO_ENABLED) go test $(FUNCTIONAL_TEST_XDC_ROOT) $(COMPILED_TEST_ARGS) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log + @$(MAKE) verify-test-log -functional-with-fault-injection-test: clean-test-results +functional-with-fault-injection-test: clean-test-output @printf $(COLOR) "Run integration tests with fault injection..." - @go test $(FUNCTIONAL_TEST_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) $(TEST_ARGS) -PersistenceFaultInjectionRate=0.005 -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log - @go test $(FUNCTIONAL_TEST_NDC_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) $(TEST_ARGS) -PersistenceFaultInjectionRate=0.005 -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log -# Need to run xdc tests with race detector off because of ringpop bug causing data race issue. - @go test $(FUNCTIONAL_TEST_XDC_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) -PersistenceFaultInjectionRate=0.005 -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log - @! grep -q "^--- FAIL" test.log + @CGO_ENABLED=$(CGO_ENABLED) go test $(FUNCTIONAL_TEST_ROOT) $(COMPILED_TEST_ARGS) -enableFaultInjection=true -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log + @CGO_ENABLED=$(CGO_ENABLED) go test $(FUNCTIONAL_TEST_NDC_ROOT) $(COMPILED_TEST_ARGS) -enableFaultInjection=true -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log + @CGO_ENABLED=$(CGO_ENABLED) go test $(FUNCTIONAL_TEST_XDC_ROOT) $(COMPILED_TEST_ARGS) -enableFaultInjection=true -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) 2>&1 | tee -a test.log + @$(MAKE) verify-test-log -test: unit-test integration-test functional-test functional-with-fault-injection-test +mixed-brain-test: clean-test-output + @printf $(COLOR) "Run mixed brain tests..." + @CGO_ENABLED=1 TEST_OUTPUT_ROOT=$(CURDIR)/$(TEST_OUTPUT_ROOT) go test -v $(MIXED_BRAIN_TEST_ROOT) $(COMPILED_TEST_ARGS) 2>&1 | tee -a test.log + @$(MAKE) verify-test-log + +verify-test-log: + @test -s test.log || (echo "TEST FAILURE: test.log is missing or empty" && exit 1) + @grep -q "^ok" test.log || (echo "TEST FAILURE: no passing test found in test.log" && exit 1) + @! grep -q "^--- FAIL" test.log || (echo "TEST FAILURE: failing test found in test.log" && exit 1) + +test: unit-test integration-test functional-test ##### Coverage & Reporting ##### $(TEST_OUTPUT_ROOT): @mkdir -p $(TEST_OUTPUT_ROOT) -prepare-coverage-test: update-gotestsum $(TEST_OUTPUT_ROOT) +prepare-coverage-test: $(GOTESTSUM) $(TEST_OUTPUT_ROOT) unit-test-coverage: prepare-coverage-test @printf $(COLOR) "Run unit tests with coverage..." - @gotestsum --junitfile $(NEW_REPORT) -- \ - $(UNIT_TEST_DIRS) -timeout=$(TEST_TIMEOUT) -race $(TEST_TAG_FLAG) -coverprofile=$(NEW_COVER_PROFILE) + go run ./cmd/tools/test-runner test --gotestsum-path=$(GOTESTSUM) --max-attempts=$(MAX_TEST_ATTEMPTS) --junitfile=$(NEW_REPORT) -- \ + $(COMPILED_TEST_ARGS) -coverprofile=$(NEW_COVER_PROFILE) $(UNIT_TEST_DIRS) integration-test-coverage: prepare-coverage-test @printf $(COLOR) "Run integration tests with coverage..." - @gotestsum --junitfile $(NEW_REPORT) -- \ - $(INTEGRATION_TEST_DIRS) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) $(INTEGRATION_TEST_COVERPKG) -coverprofile=$(NEW_COVER_PROFILE) + go run ./cmd/tools/test-runner test --gotestsum-path=$(GOTESTSUM) --max-attempts=$(MAX_TEST_ATTEMPTS) --junitfile=$(NEW_REPORT) -- \ + $(COMPILED_TEST_ARGS) -coverprofile=$(NEW_COVER_PROFILE) $(INTEGRATION_TEST_DIRS) -# This should use the same build flags as functional-test-coverage for best build caching. +# This should use the same build flags as functional-test-coverage and functional-test-{xdc,ndc}-coverage for best build caching. pre-build-functional-test-coverage: prepare-coverage-test - @go test -c -o /dev/null $(FUNCTIONAL_TEST_ROOT) -race $(TEST_TAG_FLAG) $(FUNCTIONAL_TEST_COVERPKG) + go test -c -cover -o /dev/null $(FUNCTIONAL_TEST_ROOT) $(TEST_ARGS) $(TEST_TAG_FLAG) $(COVERPKG_FLAG) functional-test-coverage: prepare-coverage-test @printf $(COLOR) "Run functional tests with coverage with $(PERSISTENCE_DRIVER) driver..." - @gotestsum --junitfile $(NEW_REPORT) -- \ - $(FUNCTIONAL_TEST_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_ARGS) $(TEST_TAG_FLAG) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) $(FUNCTIONAL_TEST_COVERPKG) -coverprofile=$(NEW_COVER_PROFILE) + go run ./cmd/tools/test-runner test --gotestsum-path=$(GOTESTSUM) --max-attempts=$(MAX_TEST_ATTEMPTS) --junitfile=$(NEW_REPORT) -- \ + $(COMPILED_TEST_ARGS) -coverprofile=$(NEW_COVER_PROFILE) $(COVERPKG_FLAG) $(FUNCTIONAL_TEST_ROOT) \ + -args -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) functional-test-xdc-coverage: prepare-coverage-test @printf $(COLOR) "Run functional test for cross DC with coverage with $(PERSISTENCE_DRIVER) driver..." - @gotestsum --junitfile $(NEW_REPORT) -- \ - $(FUNCTIONAL_TEST_XDC_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_TAG_FLAG) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) $(FUNCTIONAL_TEST_COVERPKG) -coverprofile=$(NEW_COVER_PROFILE) + go run ./cmd/tools/test-runner test --gotestsum-path=$(GOTESTSUM) --max-attempts=$(MAX_TEST_ATTEMPTS) --junitfile=$(NEW_REPORT) -- \ + $(COMPILED_TEST_ARGS) -coverprofile=$(NEW_COVER_PROFILE) $(COVERPKG_FLAG) $(FUNCTIONAL_TEST_XDC_ROOT) \ + -args -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) functional-test-ndc-coverage: prepare-coverage-test @printf $(COLOR) "Run functional test for NDC with coverage with $(PERSISTENCE_DRIVER) driver..." - @gotestsum --junitfile $(NEW_REPORT) -- \ - $(FUNCTIONAL_TEST_NDC_ROOT) -timeout=$(TEST_TIMEOUT) $(TEST_ARGS) $(TEST_TAG_FLAG) -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) $(FUNCTIONAL_TEST_COVERPKG) -coverprofile=$(NEW_COVER_PROFILE) - -.PHONY: $(SUMMARY_COVER_PROFILE) -$(SUMMARY_COVER_PROFILE): - @printf $(COLOR) "Combine coverage reports to $(SUMMARY_COVER_PROFILE)..." - @rm -f $(SUMMARY_COVER_PROFILE) $(SUMMARY_COVER_PROFILE).html - @if [ -z "$(wildcard $(TEST_OUTPUT_ROOT)/*.cover.out)" ]; then \ - echo "No coverage data, aborting!" && exit 1; \ - fi - @echo "mode: atomic" > $(SUMMARY_COVER_PROFILE) - $(foreach COVER_PROFILE,$(wildcard $(TEST_OUTPUT_ROOT)/*.cover.out),\ - @printf "Add %s...\n" $(COVER_PROFILE); \ - @grep -v -e "[Mm]ocks\?.go" -e "^mode: \w\+" $(COVER_PROFILE) >> $(SUMMARY_COVER_PROFILE) || true \ - $(NEWLINE)) - -coverage-report: $(SUMMARY_COVER_PROFILE) - @printf $(COLOR) "Generate HTML report from $(SUMMARY_COVER_PROFILE) to $(SUMMARY_COVER_PROFILE).html..." - @go tool cover -html=$(SUMMARY_COVER_PROFILE) -o $(SUMMARY_COVER_PROFILE).html + go run ./cmd/tools/test-runner test --gotestsum-path=$(GOTESTSUM) --max-attempts=$(MAX_TEST_ATTEMPTS) --junitfile=$(NEW_REPORT) -- \ + $(COMPILED_TEST_ARGS) -coverprofile=$(NEW_COVER_PROFILE) $(COVERPKG_FLAG) $(FUNCTIONAL_TEST_NDC_ROOT) \ + -args -persistenceType=$(PERSISTENCE_TYPE) -persistenceDriver=$(PERSISTENCE_DRIVER) + +report-test-crash: $(TEST_OUTPUT_ROOT) + @printf $(COLOR) "Generate test crash junit report..." + @go run ./cmd/tools/test-runner report-crash --gotestsum=report-crash \ + --junitfile=$(TEST_OUTPUT_ROOT)/junit.crash.xml \ + --crashreportname=$(CRASH_REPORT_NAME) + +print-test-summary: $(TEST_OUTPUT_ROOT) + @go run ./cmd/tools/test-runner print-summary \ + --junit-glob=$(TEST_OUTPUT_ROOT)/junit.*.xml ##### Schema ##### install-schema-cass-es: temporal-cassandra-tool install-schema-es @@ -418,14 +597,17 @@ install-schema-postgresql12: temporal-sql-tool ./temporal-sql-tool -u $(SQL_USER) --pw $(SQL_PASSWORD) -p 5432 --pl postgres12 --db $(VISIBILITY_DB) setup-schema -v 0.0 ./temporal-sql-tool -u $(SQL_USER) --pw $(SQL_PASSWORD) -p 5432 --pl postgres12 --db $(VISIBILITY_DB) update-schema -d ./schema/postgresql/v12/visibility/versioned -install-schema-es: +install-schema-es: temporal-elasticsearch-tool + @printf $(COLOR) "Install Elasticsearch schema..." + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 setup-schema + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 create-index --index temporal_visibility_v1_dev + +install-schema-es-secondary: temporal-elasticsearch-tool @printf $(COLOR) "Install Elasticsearch schema..." - curl --fail -X PUT "http://127.0.0.1:9200/_cluster/settings" -H "Content-Type: application/json" --data-binary @./schema/elasticsearch/visibility/cluster_settings_v7.json --write-out "\n" - curl --fail -X PUT "http://127.0.0.1:9200/_template/temporal_visibility_v1_template" -H "Content-Type: application/json" --data-binary @./schema/elasticsearch/visibility/index_template_v7.json --write-out "\n" -# No --fail here because create index is not idempotent operation. - curl -X PUT "http://127.0.0.1:9200/temporal_visibility_v1_dev" --write-out "\n" + ./temporal-elasticsearch-tool -ep http://127.0.0.1:8200 setup-schema + ./temporal-elasticsearch-tool -ep http://127.0.0.1:8200 create-index --index temporal_visibility_v1_secondary -install-schema-xdc: temporal-cassandra-tool +install-schema-xdc: temporal-cassandra-tool temporal-elasticsearch-tool @printf $(COLOR) "Install Cassandra schema (active)..." ./temporal-cassandra-tool drop -k temporal_cluster_a -f ./temporal-cassandra-tool create -k temporal_cluster_a --rf 1 @@ -445,15 +627,15 @@ install-schema-xdc: temporal-cassandra-tool ./temporal-cassandra-tool -k temporal_cluster_c update-schema -d ./schema/cassandra/temporal/versioned @printf $(COLOR) "Install Elasticsearch schemas..." - curl --fail -X PUT "http://127.0.0.1:9200/_cluster/settings" -H "Content-Type: application/json" --data-binary @./schema/elasticsearch/visibility/cluster_settings_v7.json --write-out "\n" - curl --fail -X PUT "http://127.0.0.1:9200/_template/temporal_visibility_v1_template" -H "Content-Type: application/json" --data-binary @./schema/elasticsearch/visibility/index_template_v7.json --write-out "\n" -# No --fail here because create index is not idempotent operation. - curl -X DELETE http://localhost:9200/temporal_visibility_v1_dev_cluster_a - curl -X DELETE http://localhost:9200/temporal_visibility_v1_dev_cluster_b - curl -X DELETE http://localhost:9200/temporal_visibility_v1_dev_cluster_c - curl -X PUT "http://127.0.0.1:9200/temporal_visibility_v1_dev_cluster_a" --write-out "\n" - curl -X PUT "http://127.0.0.1:9200/temporal_visibility_v1_dev_cluster_b" --write-out "\n" - curl -X PUT "http://127.0.0.1:9200/temporal_visibility_v1_dev_cluster_c" --write-out "\n" + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 setup-schema +# Delete indices if they exist (drop-index fails silently if index doesn't exist) + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 drop-index --index temporal_visibility_v1_dev_cluster_a --fail + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 drop-index --index temporal_visibility_v1_dev_cluster_b --fail + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 drop-index --index temporal_visibility_v1_dev_cluster_c --fail +# Create indices + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 create-index --index temporal_visibility_v1_dev_cluster_a + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 create-index --index temporal_visibility_v1_dev_cluster_b + ./temporal-elasticsearch-tool -ep http://127.0.0.1:9200 create-index --index temporal_visibility_v1_dev_cluster_c ##### Run server ##### DOCKER_COMPOSE_FILES := -f ./develop/docker-compose/docker-compose.yml -f ./develop/docker-compose/docker-compose.$(GOOS).yml @@ -464,6 +646,12 @@ start-dependencies: stop-dependencies: docker compose $(DOCKER_COMPOSE_FILES) down +start-dependencies-dual: + docker compose $(DOCKER_COMPOSE_FILES) -f ./develop/docker-compose/docker-compose.secondary-es.yml up + +stop-dependencies-dual: + docker compose $(DOCKER_COMPOSE_FILES) -f ./develop/docker-compose/docker-compose.secondary-es.yml down + start-dependencies-cdc: docker compose $(DOCKER_COMPOSE_FILES) $(DOCKER_COMPOSE_CDC_FILES) up @@ -473,35 +661,51 @@ stop-dependencies-cdc: start: start-sqlite start-cass-es: temporal-server - ./temporal-server --env development-cass-es --allow-no-auth start + ./temporal-server --config-file config/development-cass-es.yaml --allow-no-auth start + +start-cass-archival: temporal-server + ./temporal-server --config-file config/development-cass-archival.yaml --allow-no-auth start + +start-cass-es-dual: temporal-server + ./temporal-server --config-file config/development-cass-es-dual.yaml --allow-no-auth start + +start-cass-es-custom: temporal-server + ./temporal-server --config-file config/development-cass-es-custom.yaml --allow-no-auth start start-es-fi: temporal-server - ./temporal-server --env development-cass-es-fi --allow-no-auth start + ./temporal-server --config-file config/development-cass-es-fi.yaml --allow-no-auth start start-mysql: start-mysql8 start-mysql8: temporal-server - ./temporal-server --env development-mysql8 --allow-no-auth start + ./temporal-server --config-file config/development-mysql8.yaml --allow-no-auth start start-mysql-es: temporal-server - ./temporal-server --env development-mysql-es --allow-no-auth start + ./temporal-server --config-file config/development-mysql-es.yaml --allow-no-auth start start-postgres: start-postgres12 start-postgres12: temporal-server - ./temporal-server --env development-postgres12 --allow-no-auth start + ./temporal-server --config-file config/development-postgres12.yaml --allow-no-auth start start-sqlite: temporal-server - ./temporal-server --env development-sqlite --allow-no-auth start + ./temporal-server --config-file config/development-sqlite.yaml --allow-no-auth start + +start-sqlite-file: temporal-server + ./temporal-server --config-file config/development-sqlite-file.yaml --allow-no-auth start start-xdc-cluster-a: temporal-server - ./temporal-server --env development-cluster-a --allow-no-auth start + ./temporal-server --config-file config/development-cluster-a.yaml --allow-no-auth start start-xdc-cluster-b: temporal-server - ./temporal-server --env development-cluster-b --allow-no-auth start + ./temporal-server --config-file config/development-cluster-b.yaml --allow-no-auth start start-xdc-cluster-c: temporal-server - ./temporal-server --env development-cluster-c --allow-no-auth start + ./temporal-server --config-file config/development-cluster-c.yaml --allow-no-auth start + +start-jwt: temporal-server + @./config/jwt/setup-keys.sh + ./temporal-server --config-file config/development-jwt.yaml start --service frontend --service internal-frontend --service history --service matching --service worker ##### Grafana ##### update-dashboards: @@ -514,15 +718,24 @@ gomodtidy: @go mod tidy update-dependencies: - @printf $(COLOR) "Update dependencies..." + @printf $(COLOR) "Update dependencies (minor versions only) ..." @go get -u -t $(PINNED_DEPENDENCIES) ./... @go mod tidy -go-generate: +update-dependencies-major: $(GOMAJOR) + @printf $(COLOR) "Major version upgrades available:" + @$(GOMAJOR) list -major + @echo "" + @printf $(COLOR) "Update dependencies (major versions only) ..." + @$(GOMAJOR) get -major all + @go mod tidy + +go-generate: $(MOCKGEN) $(GOIMPORTS) $(STRINGER) $(GOWRAP) @printf $(COLOR) "Process go:generate directives..." - @go generate ./... + @PATH="$(ROOT)/$(LOCALBIN):$(PATH)" go generate ./... ensure-no-changes: @printf $(COLOR) "Check for local changes..." @printf $(COLOR) "========================================================================" - @git diff --name-status --exit-code || (printf $(COLOR) "========================================================================"; printf $(RED) "Above files are not regenerated properly. Regenerate them and try again."; exit 1) + @git status --porcelain + @test -z "`git status --porcelain`" || (printf $(COLOR) "========================================================================"; printf $(RED) "Above files are not regenerated properly. Regenerate them and try again."; git diff HEAD ; exit 1) diff --git a/README.md b/README.md index 8c21185b34e..d89e30ebb98 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,24 @@ -[![Build status](https://github.com/temporalio/temporal/actions/workflows/run-tests.yml/badge.svg?branch=main)](https://github.com/temporalio/temporal/commits/main/) -[![Coverage Status](https://coveralls.io/repos/github/temporalio/temporal/badge.svg?branch=main)](https://coveralls.io/github/temporalio/temporal?branch=main) -[![Discourse](https://img.shields.io/static/v1?label=Discourse&message=Get%20Help&color=informational)](https://community.temporal.io) -[![Go Report Card][go-report-image]][go-report-url] +
-[go-report-image]: https://goreportcard.com/badge/github.com/temporalio/temporal -[go-report-url]: https://goreportcard.com/report/github.com/temporalio/temporal +# Temporal—durable execution platform -# Temporal +

+ +[![GitHub Release](https://img.shields.io/github/v/release/temporalio/temporal)](https://github.com/temporalio/temporal/releases/latest) +[![GitHub License](https://img.shields.io/github/license/temporalio/temporal)](https://github.com/temporalio/temporal/blob/main/LICENSE) +[![Code Coverage](https://img.shields.io/badge/codecov-report-blue)](https://app.codecov.io/gh/temporalio/temporal) +[![Community](https://img.shields.io/static/v1?label=community&message=get%20help&color=informational)](https://community.temporal.io) +[![Go Report Card](https://goreportcard.com/badge/github.com/temporalio/temporal)](https://goreportcard.com/report/github.com/temporalio/temporal) + +**[Introduction](#introduction)   •  ** +**[Getting Started](#getting-started)   •  ** +**[Contributing](#contributing)   •  ** +**[Temporal Docs](https://docs.temporal.io/)   •  ** +**[Temporal 101](https://learn.temporal.io/courses/temporal_101/)** + +
+ +## Introduction Temporal is a durable execution platform that enables developers to build scalable applications without sacrificing productivity or reliability. The Temporal server executes units of application logic called Workflows in a resilient manner that automatically handles intermittent failures, and retries failed operations. @@ -16,12 +28,6 @@ It is developed by [Temporal Technologies](https://temporal.io/), a startup by t [![image](https://github.com/temporalio/temporal/assets/251288/693d18b5-01de-4a3b-b47b-96347b84f610)](https://youtu.be/wIpz4ioK0gI 'Getting to know Temporal') -Learn more: - -- [Courses](https://learn.temporal.io/courses/temporal_101/) -- [Docs](https://docs.temporal.io) -- Internal architecture: [docs/](./docs/architecture/README.md) - ## Getting Started ### Download and Start Temporal Server Locally @@ -59,12 +65,15 @@ This repository contains the source code of the Temporal server. To implement Wo ## Contributing -We'd love your help in making Temporal great. Please review the [internal architecture docs](./docs/architecture/README.md) and our [contribution guide](CONTRIBUTING.md). +We'd love your help in making Temporal great. -If you'd like to work on or propose a new feature, first peruse [feature requests](https://community.temporal.io/c/feature-requests/6) and our [proposals repo](https://github.com/temporalio/proposals) to discover existing active and accepted proposals. +Helpful links to get started: -Feel free to join the Temporal community [forum](https://community.temporal.io) or [Slack](https://t.mp/slack) to start a discussion or check if a feature has already been discussed. -Once you're sure the proposal is not covered elsewhere, please follow our [proposal instructions](https://github.com/temporalio/proposals#creating-a-new-proposal) or submit a [feature request](https://community.temporal.io/c/feature-requests/6). +- [work on or propose a new feature](https://github.com/temporalio/proposals) +- [learn about the Temporal Server architecture](./docs/architecture/README.md) +- [learn how to build and run the Temporal Server locally](./CONTRIBUTING.md) +- [learn about Temporal Server testing tools and best practices](./docs/development/testing.md) +- join the Temporal community [forum](https://community.temporal.io) and [Slack](https://t.mp/slack) ## License diff --git a/api/adminservice/v1/request_response.go-helpers.pb.go b/api/adminservice/v1/request_response.go-helpers.pb.go index 2e2d7c09257..1dcfb313dda 100644 --- a/api/adminservice/v1/request_response.go-helpers.pb.go +++ b/api/adminservice/v1/request_response.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package adminservice @@ -2914,3 +2890,632 @@ func (this *ListQueuesResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type DeepHealthCheckRequest to the protobuf v3 wire format +func (val *DeepHealthCheckRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeepHealthCheckRequest from the protobuf v3 wire format +func (val *DeepHealthCheckRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeepHealthCheckRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeepHealthCheckRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeepHealthCheckRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeepHealthCheckRequest + switch t := that.(type) { + case *DeepHealthCheckRequest: + that1 = t + case DeepHealthCheckRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeepHealthCheckResponse to the protobuf v3 wire format +func (val *DeepHealthCheckResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeepHealthCheckResponse from the protobuf v3 wire format +func (val *DeepHealthCheckResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeepHealthCheckResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeepHealthCheckResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeepHealthCheckResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeepHealthCheckResponse + switch t := that.(type) { + case *DeepHealthCheckResponse: + that1 = t + case DeepHealthCheckResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncWorkflowStateRequest to the protobuf v3 wire format +func (val *SyncWorkflowStateRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncWorkflowStateRequest from the protobuf v3 wire format +func (val *SyncWorkflowStateRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncWorkflowStateRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncWorkflowStateRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncWorkflowStateRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncWorkflowStateRequest + switch t := that.(type) { + case *SyncWorkflowStateRequest: + that1 = t + case SyncWorkflowStateRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncWorkflowStateResponse to the protobuf v3 wire format +func (val *SyncWorkflowStateResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncWorkflowStateResponse from the protobuf v3 wire format +func (val *SyncWorkflowStateResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncWorkflowStateResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncWorkflowStateResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncWorkflowStateResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncWorkflowStateResponse + switch t := that.(type) { + case *SyncWorkflowStateResponse: + that1 = t + case SyncWorkflowStateResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GenerateLastHistoryReplicationTasksRequest to the protobuf v3 wire format +func (val *GenerateLastHistoryReplicationTasksRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GenerateLastHistoryReplicationTasksRequest from the protobuf v3 wire format +func (val *GenerateLastHistoryReplicationTasksRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GenerateLastHistoryReplicationTasksRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GenerateLastHistoryReplicationTasksRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GenerateLastHistoryReplicationTasksRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GenerateLastHistoryReplicationTasksRequest + switch t := that.(type) { + case *GenerateLastHistoryReplicationTasksRequest: + that1 = t + case GenerateLastHistoryReplicationTasksRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GenerateLastHistoryReplicationTasksResponse to the protobuf v3 wire format +func (val *GenerateLastHistoryReplicationTasksResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GenerateLastHistoryReplicationTasksResponse from the protobuf v3 wire format +func (val *GenerateLastHistoryReplicationTasksResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GenerateLastHistoryReplicationTasksResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GenerateLastHistoryReplicationTasksResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GenerateLastHistoryReplicationTasksResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GenerateLastHistoryReplicationTasksResponse + switch t := that.(type) { + case *GenerateLastHistoryReplicationTasksResponse: + that1 = t + case GenerateLastHistoryReplicationTasksResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeTaskQueuePartitionRequest to the protobuf v3 wire format +func (val *DescribeTaskQueuePartitionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeTaskQueuePartitionRequest from the protobuf v3 wire format +func (val *DescribeTaskQueuePartitionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeTaskQueuePartitionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeTaskQueuePartitionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeTaskQueuePartitionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeTaskQueuePartitionRequest + switch t := that.(type) { + case *DescribeTaskQueuePartitionRequest: + that1 = t + case DescribeTaskQueuePartitionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeTaskQueuePartitionResponse to the protobuf v3 wire format +func (val *DescribeTaskQueuePartitionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeTaskQueuePartitionResponse from the protobuf v3 wire format +func (val *DescribeTaskQueuePartitionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeTaskQueuePartitionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeTaskQueuePartitionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeTaskQueuePartitionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeTaskQueuePartitionResponse + switch t := that.(type) { + case *DescribeTaskQueuePartitionResponse: + that1 = t + case DescribeTaskQueuePartitionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceUnloadTaskQueuePartitionRequest to the protobuf v3 wire format +func (val *ForceUnloadTaskQueuePartitionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceUnloadTaskQueuePartitionRequest from the protobuf v3 wire format +func (val *ForceUnloadTaskQueuePartitionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceUnloadTaskQueuePartitionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceUnloadTaskQueuePartitionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceUnloadTaskQueuePartitionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceUnloadTaskQueuePartitionRequest + switch t := that.(type) { + case *ForceUnloadTaskQueuePartitionRequest: + that1 = t + case ForceUnloadTaskQueuePartitionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceUnloadTaskQueuePartitionResponse to the protobuf v3 wire format +func (val *ForceUnloadTaskQueuePartitionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceUnloadTaskQueuePartitionResponse from the protobuf v3 wire format +func (val *ForceUnloadTaskQueuePartitionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceUnloadTaskQueuePartitionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceUnloadTaskQueuePartitionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceUnloadTaskQueuePartitionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceUnloadTaskQueuePartitionResponse + switch t := that.(type) { + case *ForceUnloadTaskQueuePartitionResponse: + that1 = t + case ForceUnloadTaskQueuePartitionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetTaskQueueUserDataRequest to the protobuf v3 wire format +func (val *GetTaskQueueUserDataRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetTaskQueueUserDataRequest from the protobuf v3 wire format +func (val *GetTaskQueueUserDataRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetTaskQueueUserDataRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetTaskQueueUserDataRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetTaskQueueUserDataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetTaskQueueUserDataRequest + switch t := that.(type) { + case *GetTaskQueueUserDataRequest: + that1 = t + case GetTaskQueueUserDataRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetTaskQueueUserDataResponse to the protobuf v3 wire format +func (val *GetTaskQueueUserDataResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetTaskQueueUserDataResponse from the protobuf v3 wire format +func (val *GetTaskQueueUserDataResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetTaskQueueUserDataResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetTaskQueueUserDataResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetTaskQueueUserDataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetTaskQueueUserDataResponse + switch t := that.(type) { + case *GetTaskQueueUserDataResponse: + that1 = t + case GetTaskQueueUserDataResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartAdminBatchOperationRequest to the protobuf v3 wire format +func (val *StartAdminBatchOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartAdminBatchOperationRequest from the protobuf v3 wire format +func (val *StartAdminBatchOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartAdminBatchOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartAdminBatchOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartAdminBatchOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartAdminBatchOperationRequest + switch t := that.(type) { + case *StartAdminBatchOperationRequest: + that1 = t + case StartAdminBatchOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartAdminBatchOperationResponse to the protobuf v3 wire format +func (val *StartAdminBatchOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartAdminBatchOperationResponse from the protobuf v3 wire format +func (val *StartAdminBatchOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartAdminBatchOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartAdminBatchOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartAdminBatchOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartAdminBatchOperationResponse + switch t := that.(type) { + case *StartAdminBatchOperationResponse: + that1 = t + case StartAdminBatchOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type BatchOperationRefreshTasks to the protobuf v3 wire format +func (val *BatchOperationRefreshTasks) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type BatchOperationRefreshTasks from the protobuf v3 wire format +func (val *BatchOperationRefreshTasks) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *BatchOperationRefreshTasks) Size() int { + return proto.Size(val) +} + +// Equal returns whether two BatchOperationRefreshTasks values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *BatchOperationRefreshTasks) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *BatchOperationRefreshTasks + switch t := that.(type) { + case *BatchOperationRefreshTasks: + that1 = t + case BatchOperationRefreshTasks: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MigrateScheduleRequest to the protobuf v3 wire format +func (val *MigrateScheduleRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MigrateScheduleRequest from the protobuf v3 wire format +func (val *MigrateScheduleRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MigrateScheduleRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MigrateScheduleRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MigrateScheduleRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MigrateScheduleRequest + switch t := that.(type) { + case *MigrateScheduleRequest: + that1 = t + case MigrateScheduleRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MigrateScheduleResponse to the protobuf v3 wire format +func (val *MigrateScheduleResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MigrateScheduleResponse from the protobuf v3 wire format +func (val *MigrateScheduleResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MigrateScheduleResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MigrateScheduleResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MigrateScheduleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MigrateScheduleResponse + switch t := that.(type) { + case *MigrateScheduleResponse: + that1 = t + case MigrateScheduleResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/adminservice/v1/request_response.pb.go b/api/adminservice/v1/request_response.pb.go index 7acf9747077..56fe71caeeb 100644 --- a/api/adminservice/v1/request_response.pb.go +++ b/api/adminservice/v1/request_response.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -30,21 +8,26 @@ package adminservice import ( reflect "reflect" + "strconv" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/api/common/v1" v16 "go.temporal.io/api/enums/v1" v110 "go.temporal.io/api/namespace/v1" v111 "go.temporal.io/api/replication/v1" + v115 "go.temporal.io/api/taskqueue/v1" v19 "go.temporal.io/api/version/v1" v17 "go.temporal.io/api/workflow/v1" v18 "go.temporal.io/server/api/cluster/v1" v112 "go.temporal.io/server/api/common/v1" v14 "go.temporal.io/server/api/enums/v1" + v113 "go.temporal.io/server/api/health/v1" v11 "go.temporal.io/server/api/history/v1" v13 "go.temporal.io/server/api/namespace/v1" v12 "go.temporal.io/server/api/persistence/v1" v15 "go.temporal.io/server/api/replication/v1" + v114 "go.temporal.io/server/api/taskqueue/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -58,22 +41,81 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Target scheduler implementation for migration. +type MigrateScheduleRequest_SchedulerTarget int32 + +const ( + MigrateScheduleRequest_SCHEDULER_TARGET_UNSPECIFIED MigrateScheduleRequest_SchedulerTarget = 0 + MigrateScheduleRequest_SCHEDULER_TARGET_CHASM MigrateScheduleRequest_SchedulerTarget = // Migrate to CHASM-backed scheduler (V2). + 1 + MigrateScheduleRequest_SCHEDULER_TARGET_WORKFLOW MigrateScheduleRequest_SchedulerTarget = // Migrate to workflow-backed scheduler (V1). + 2 +) + +// Enum value maps for MigrateScheduleRequest_SchedulerTarget. +var ( + MigrateScheduleRequest_SchedulerTarget_name = map[int32]string{ + 0: "SCHEDULER_TARGET_UNSPECIFIED", + 1: "SCHEDULER_TARGET_CHASM", + 2: "SCHEDULER_TARGET_WORKFLOW", + } + MigrateScheduleRequest_SchedulerTarget_value = map[string]int32{ + "SCHEDULER_TARGET_UNSPECIFIED": 0, + "SCHEDULER_TARGET_CHASM": 1, + "SCHEDULER_TARGET_WORKFLOW": 2, + } +) + +func (x MigrateScheduleRequest_SchedulerTarget) Enum() *MigrateScheduleRequest_SchedulerTarget { + p := new(MigrateScheduleRequest_SchedulerTarget) + *p = x + return p +} + +func (x MigrateScheduleRequest_SchedulerTarget) String() string { + switch x { + case MigrateScheduleRequest_SCHEDULER_TARGET_UNSPECIFIED: + return "MigrateScheduleRequestSchedulerTargetUnspecified" + case MigrateScheduleRequest_SCHEDULER_TARGET_CHASM: + return "MigrateScheduleRequestSchedulerTargetChasm" + case MigrateScheduleRequest_SCHEDULER_TARGET_WORKFLOW: + return "MigrateScheduleRequestSchedulerTargetWorkflow" + default: + return strconv.Itoa(int(x)) + } + +} + +func (MigrateScheduleRequest_SchedulerTarget) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_adminservice_v1_request_response_proto_enumTypes[0].Descriptor() +} + +func (MigrateScheduleRequest_SchedulerTarget) Type() protoreflect.EnumType { + return &file_temporal_server_api_adminservice_v1_request_response_proto_enumTypes[0] +} + +func (x MigrateScheduleRequest_SchedulerTarget) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MigrateScheduleRequest_SchedulerTarget.Descriptor instead. +func (MigrateScheduleRequest_SchedulerTarget) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{93, 0} +} + type RebuildMutableStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` unknownFields protoimpl.UnknownFields - - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RebuildMutableStateRequest) Reset() { *x = RebuildMutableStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RebuildMutableStateRequest) String() string { @@ -84,7 +126,7 @@ func (*RebuildMutableStateRequest) ProtoMessage() {} func (x *RebuildMutableStateRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -114,18 +156,16 @@ func (x *RebuildMutableStateRequest) GetExecution() *v1.WorkflowExecution { } type RebuildMutableStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RebuildMutableStateResponse) Reset() { *x = RebuildMutableStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RebuildMutableStateResponse) String() string { @@ -136,7 +176,7 @@ func (*RebuildMutableStateResponse) ProtoMessage() {} func (x *RebuildMutableStateResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -152,24 +192,21 @@ func (*RebuildMutableStateResponse) Descriptor() ([]byte, []int) { } type ImportWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - HistoryBatches []*v1.DataBlob `protobuf:"bytes,3,rep,name=history_batches,json=historyBatches,proto3" json:"history_batches,omitempty"` - VersionHistory *v11.VersionHistory `protobuf:"bytes,4,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` - Token []byte `protobuf:"bytes,5,opt,name=token,proto3" json:"token,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + HistoryBatches []*v1.DataBlob `protobuf:"bytes,3,rep,name=history_batches,json=historyBatches,proto3" json:"history_batches,omitempty"` + VersionHistory *v11.VersionHistory `protobuf:"bytes,4,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + Token []byte `protobuf:"bytes,5,opt,name=token,proto3" json:"token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ImportWorkflowExecutionRequest) Reset() { *x = ImportWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ImportWorkflowExecutionRequest) String() string { @@ -180,7 +217,7 @@ func (*ImportWorkflowExecutionRequest) ProtoMessage() {} func (x *ImportWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -231,20 +268,17 @@ func (x *ImportWorkflowExecutionRequest) GetToken() []byte { } type ImportWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Token []byte `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` unknownFields protoimpl.UnknownFields - - Token []byte `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ImportWorkflowExecutionResponse) Reset() { *x = ImportWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ImportWorkflowExecutionResponse) String() string { @@ -255,7 +289,7 @@ func (*ImportWorkflowExecutionResponse) ProtoMessage() {} func (x *ImportWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -278,21 +312,22 @@ func (x *ImportWorkflowExecutionResponse) GetToken() []byte { } type DescribeMutableStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + SkipForceReload bool `protobuf:"varint,3,opt,name=skip_force_reload,json=skipForceReload,proto3" json:"skip_force_reload,omitempty"` + Archetype string `protobuf:"bytes,4,opt,name=archetype,proto3" json:"archetype,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,5,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` unknownFields protoimpl.UnknownFields - - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DescribeMutableStateRequest) Reset() { *x = DescribeMutableStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeMutableStateRequest) String() string { @@ -303,7 +338,7 @@ func (*DescribeMutableStateRequest) ProtoMessage() {} func (x *DescribeMutableStateRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -332,24 +367,45 @@ func (x *DescribeMutableStateRequest) GetExecution() *v1.WorkflowExecution { return nil } -type DescribeMutableStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *DescribeMutableStateRequest) GetSkipForceReload() bool { + if x != nil { + return x.SkipForceReload + } + return false +} + +func (x *DescribeMutableStateRequest) GetArchetype() string { + if x != nil { + return x.Archetype + } + return "" +} + +func (x *DescribeMutableStateRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} - ShardId string `protobuf:"bytes,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - HistoryAddr string `protobuf:"bytes,2,opt,name=history_addr,json=historyAddr,proto3" json:"history_addr,omitempty"` - CacheMutableState *v12.WorkflowMutableState `protobuf:"bytes,3,opt,name=cache_mutable_state,json=cacheMutableState,proto3" json:"cache_mutable_state,omitempty"` +type DescribeMutableStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardId string `protobuf:"bytes,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + HistoryAddr string `protobuf:"bytes,2,opt,name=history_addr,json=historyAddr,proto3" json:"history_addr,omitempty"` + // CacheMutableState is only available when mutable state is in cache. + CacheMutableState *v12.WorkflowMutableState `protobuf:"bytes,3,opt,name=cache_mutable_state,json=cacheMutableState,proto3" json:"cache_mutable_state,omitempty"` + // DatabaseMutableState is always available, + // but only loaded from database when mutable state is NOT in cache or skip_force_reload is false. DatabaseMutableState *v12.WorkflowMutableState `protobuf:"bytes,4,opt,name=database_mutable_state,json=databaseMutableState,proto3" json:"database_mutable_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescribeMutableStateResponse) Reset() { *x = DescribeMutableStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeMutableStateResponse) String() string { @@ -360,7 +416,7 @@ func (*DescribeMutableStateResponse) ProtoMessage() {} func (x *DescribeMutableStateResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -405,24 +461,21 @@ func (x *DescribeMutableStateResponse) GetDatabaseMutableState() *v12.WorkflowMu // At least one of the parameters needs to be provided. type DescribeHistoryHostRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` //ip:port HostAddress string `protobuf:"bytes,1,opt,name=host_address,json=hostAddress,proto3" json:"host_address,omitempty"` ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` WorkflowExecution *v1.WorkflowExecution `protobuf:"bytes,4,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescribeHistoryHostRequest) Reset() { *x = DescribeHistoryHostRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeHistoryHostRequest) String() string { @@ -433,7 +486,7 @@ func (*DescribeHistoryHostRequest) ProtoMessage() {} func (x *DescribeHistoryHostRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -477,23 +530,20 @@ func (x *DescribeHistoryHostRequest) GetWorkflowExecution() *v1.WorkflowExecutio } type DescribeHistoryHostResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` ShardsNumber int32 `protobuf:"varint,1,opt,name=shards_number,json=shardsNumber,proto3" json:"shards_number,omitempty"` ShardIds []int32 `protobuf:"varint,2,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` NamespaceCache *v13.NamespaceCacheInfo `protobuf:"bytes,3,opt,name=namespace_cache,json=namespaceCache,proto3" json:"namespace_cache,omitempty"` Address string `protobuf:"bytes,5,opt,name=address,proto3" json:"address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescribeHistoryHostResponse) Reset() { *x = DescribeHistoryHostResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeHistoryHostResponse) String() string { @@ -504,7 +554,7 @@ func (*DescribeHistoryHostResponse) ProtoMessage() {} func (x *DescribeHistoryHostResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -548,20 +598,17 @@ func (x *DescribeHistoryHostResponse) GetAddress() string { } type CloseShardRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CloseShardRequest) Reset() { *x = CloseShardRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CloseShardRequest) String() string { @@ -572,7 +619,7 @@ func (*CloseShardRequest) ProtoMessage() {} func (x *CloseShardRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -595,18 +642,16 @@ func (x *CloseShardRequest) GetShardId() int32 { } type CloseShardResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CloseShardResponse) Reset() { *x = CloseShardResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CloseShardResponse) String() string { @@ -617,7 +662,7 @@ func (*CloseShardResponse) ProtoMessage() {} func (x *CloseShardResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -633,20 +678,17 @@ func (*CloseShardResponse) Descriptor() ([]byte, []int) { } type GetShardRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetShardRequest) Reset() { *x = GetShardRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetShardRequest) String() string { @@ -657,7 +699,7 @@ func (*GetShardRequest) ProtoMessage() {} func (x *GetShardRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -680,20 +722,17 @@ func (x *GetShardRequest) GetShardId() int32 { } type GetShardResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ShardInfo *v12.ShardInfo `protobuf:"bytes,1,opt,name=shard_info,json=shardInfo,proto3" json:"shard_info,omitempty"` unknownFields protoimpl.UnknownFields - - ShardInfo *v12.ShardInfo `protobuf:"bytes,1,opt,name=shard_info,json=shardInfo,proto3" json:"shard_info,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetShardResponse) Reset() { *x = GetShardResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetShardResponse) String() string { @@ -704,7 +743,7 @@ func (*GetShardResponse) ProtoMessage() {} func (x *GetShardResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -727,25 +766,22 @@ func (x *GetShardResponse) GetShardInfo() *v12.ShardInfo { } type ListHistoryTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` // The task category. See tasks.TaskCategoryRegistry for more. Category int32 `protobuf:"varint,2,opt,name=category,proto3" json:"category,omitempty"` TaskRange *v11.TaskRange `protobuf:"bytes,3,opt,name=task_range,json=taskRange,proto3" json:"task_range,omitempty"` BatchSize int32 `protobuf:"varint,4,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` NextPageToken []byte `protobuf:"bytes,5,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListHistoryTasksRequest) Reset() { *x = ListHistoryTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListHistoryTasksRequest) String() string { @@ -756,7 +792,7 @@ func (*ListHistoryTasksRequest) ProtoMessage() {} func (x *ListHistoryTasksRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -807,21 +843,18 @@ func (x *ListHistoryTasksRequest) GetNextPageToken() []byte { } type ListHistoryTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` + NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields - - Tasks []*Task `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` - NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListHistoryTasksResponse) Reset() { *x = ListHistoryTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListHistoryTasksResponse) String() string { @@ -832,7 +865,7 @@ func (*ListHistoryTasksResponse) ProtoMessage() {} func (x *ListHistoryTasksResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -862,26 +895,23 @@ func (x *ListHistoryTasksResponse) GetNextPageToken() []byte { } type Task struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + TaskId int64 `protobuf:"varint,4,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + TaskType v14.TaskType `protobuf:"varint,5,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` + FireTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=fire_time,json=fireTime,proto3" json:"fire_time,omitempty"` + Version int64 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - TaskId int64 `protobuf:"varint,4,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - TaskType v14.TaskType `protobuf:"varint,5,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` - FireTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=fire_time,json=fireTime,proto3" json:"fire_time,omitempty"` - Version int64 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Task) Reset() { *x = Task{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Task) String() string { @@ -892,7 +922,7 @@ func (*Task) ProtoMessage() {} func (x *Task) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -957,24 +987,21 @@ func (x *Task) GetVersion() int64 { } type RemoveTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` // The task category. See tasks.TaskCategoryRegistry for more. Category int32 `protobuf:"varint,2,opt,name=category,proto3" json:"category,omitempty"` TaskId int64 `protobuf:"varint,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RemoveTaskRequest) Reset() { *x = RemoveTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoveTaskRequest) String() string { @@ -985,7 +1012,7 @@ func (*RemoveTaskRequest) ProtoMessage() {} func (x *RemoveTaskRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1029,18 +1056,16 @@ func (x *RemoveTaskRequest) GetVisibilityTime() *timestamppb.Timestamp { } type RemoveTaskResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RemoveTaskResponse) Reset() { *x = RemoveTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoveTaskResponse) String() string { @@ -1051,7 +1076,7 @@ func (*RemoveTaskResponse) ProtoMessage() {} func (x *RemoveTaskResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1070,27 +1095,24 @@ func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { // StartEventId defines the beginning of the event to fetch. The first event is exclusive. // EndEventId and EndEventVersion defines the end of the event to fetch. The end event is exclusive. type GetWorkflowExecutionRawHistoryV2Request struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,9,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - StartEventId int64 `protobuf:"varint,3,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` - StartEventVersion int64 `protobuf:"varint,4,opt,name=start_event_version,json=startEventVersion,proto3" json:"start_event_version,omitempty"` - EndEventId int64 `protobuf:"varint,5,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` - EndEventVersion int64 `protobuf:"varint,6,opt,name=end_event_version,json=endEventVersion,proto3" json:"end_event_version,omitempty"` - MaximumPageSize int32 `protobuf:"varint,7,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,8,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,9,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + StartEventId int64 `protobuf:"varint,3,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` + StartEventVersion int64 `protobuf:"varint,4,opt,name=start_event_version,json=startEventVersion,proto3" json:"start_event_version,omitempty"` + EndEventId int64 `protobuf:"varint,5,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` + EndEventVersion int64 `protobuf:"varint,6,opt,name=end_event_version,json=endEventVersion,proto3" json:"end_event_version,omitempty"` + MaximumPageSize int32 `protobuf:"varint,7,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,8,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetWorkflowExecutionRawHistoryV2Request) Reset() { *x = GetWorkflowExecutionRawHistoryV2Request{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetWorkflowExecutionRawHistoryV2Request) String() string { @@ -1101,7 +1123,7 @@ func (*GetWorkflowExecutionRawHistoryV2Request) ProtoMessage() {} func (x *GetWorkflowExecutionRawHistoryV2Request) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1173,23 +1195,20 @@ func (x *GetWorkflowExecutionRawHistoryV2Request) GetNextPageToken() []byte { } type GetWorkflowExecutionRawHistoryV2Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - HistoryBatches []*v1.DataBlob `protobuf:"bytes,2,rep,name=history_batches,json=historyBatches,proto3" json:"history_batches,omitempty"` - VersionHistory *v11.VersionHistory `protobuf:"bytes,3,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` - HistoryNodeIds []int64 `protobuf:"varint,4,rep,packed,name=history_node_ids,json=historyNodeIds,proto3" json:"history_node_ids,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + HistoryBatches []*v1.DataBlob `protobuf:"bytes,2,rep,name=history_batches,json=historyBatches,proto3" json:"history_batches,omitempty"` + VersionHistory *v11.VersionHistory `protobuf:"bytes,3,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + HistoryNodeIds []int64 `protobuf:"varint,4,rep,packed,name=history_node_ids,json=historyNodeIds,proto3" json:"history_node_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetWorkflowExecutionRawHistoryV2Response) Reset() { *x = GetWorkflowExecutionRawHistoryV2Response{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetWorkflowExecutionRawHistoryV2Response) String() string { @@ -1200,7 +1219,7 @@ func (*GetWorkflowExecutionRawHistoryV2Response) ProtoMessage() {} func (x *GetWorkflowExecutionRawHistoryV2Response) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1244,27 +1263,24 @@ func (x *GetWorkflowExecutionRawHistoryV2Response) GetHistoryNodeIds() []int64 { } type GetWorkflowExecutionRawHistoryRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - StartEventId int64 `protobuf:"varint,3,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` - StartEventVersion int64 `protobuf:"varint,4,opt,name=start_event_version,json=startEventVersion,proto3" json:"start_event_version,omitempty"` - EndEventId int64 `protobuf:"varint,5,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` - EndEventVersion int64 `protobuf:"varint,6,opt,name=end_event_version,json=endEventVersion,proto3" json:"end_event_version,omitempty"` - MaximumPageSize int32 `protobuf:"varint,7,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,8,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + StartEventId int64 `protobuf:"varint,3,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` + StartEventVersion int64 `protobuf:"varint,4,opt,name=start_event_version,json=startEventVersion,proto3" json:"start_event_version,omitempty"` + EndEventId int64 `protobuf:"varint,5,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` + EndEventVersion int64 `protobuf:"varint,6,opt,name=end_event_version,json=endEventVersion,proto3" json:"end_event_version,omitempty"` + MaximumPageSize int32 `protobuf:"varint,7,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,8,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetWorkflowExecutionRawHistoryRequest) Reset() { *x = GetWorkflowExecutionRawHistoryRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetWorkflowExecutionRawHistoryRequest) String() string { @@ -1275,7 +1291,7 @@ func (*GetWorkflowExecutionRawHistoryRequest) ProtoMessage() {} func (x *GetWorkflowExecutionRawHistoryRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1347,23 +1363,20 @@ func (x *GetWorkflowExecutionRawHistoryRequest) GetNextPageToken() []byte { } type GetWorkflowExecutionRawHistoryResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - HistoryBatches []*v1.DataBlob `protobuf:"bytes,2,rep,name=history_batches,json=historyBatches,proto3" json:"history_batches,omitempty"` - VersionHistory *v11.VersionHistory `protobuf:"bytes,3,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` - HistoryNodeIds []int64 `protobuf:"varint,4,rep,packed,name=history_node_ids,json=historyNodeIds,proto3" json:"history_node_ids,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + HistoryBatches []*v1.DataBlob `protobuf:"bytes,2,rep,name=history_batches,json=historyBatches,proto3" json:"history_batches,omitempty"` + VersionHistory *v11.VersionHistory `protobuf:"bytes,3,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + HistoryNodeIds []int64 `protobuf:"varint,4,rep,packed,name=history_node_ids,json=historyNodeIds,proto3" json:"history_node_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetWorkflowExecutionRawHistoryResponse) Reset() { *x = GetWorkflowExecutionRawHistoryResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetWorkflowExecutionRawHistoryResponse) String() string { @@ -1374,7 +1387,7 @@ func (*GetWorkflowExecutionRawHistoryResponse) ProtoMessage() {} func (x *GetWorkflowExecutionRawHistoryResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1418,21 +1431,18 @@ func (x *GetWorkflowExecutionRawHistoryResponse) GetHistoryNodeIds() []int64 { } type GetReplicationMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Tokens []*v15.ReplicationToken `protobuf:"bytes,1,rep,name=tokens,proto3" json:"tokens,omitempty"` + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` unknownFields protoimpl.UnknownFields - - Tokens []*v15.ReplicationToken `protobuf:"bytes,1,rep,name=tokens,proto3" json:"tokens,omitempty"` - ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetReplicationMessagesRequest) Reset() { *x = GetReplicationMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetReplicationMessagesRequest) String() string { @@ -1443,7 +1453,7 @@ func (*GetReplicationMessagesRequest) ProtoMessage() {} func (x *GetReplicationMessagesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1473,20 +1483,17 @@ func (x *GetReplicationMessagesRequest) GetClusterName() string { } type GetReplicationMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ShardMessages map[int32]*v15.ReplicationMessages `protobuf:"bytes,1,rep,name=shard_messages,json=shardMessages,proto3" json:"shard_messages,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - ShardMessages map[int32]*v15.ReplicationMessages `protobuf:"bytes,1,rep,name=shard_messages,json=shardMessages,proto3" json:"shard_messages,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *GetReplicationMessagesResponse) Reset() { *x = GetReplicationMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetReplicationMessagesResponse) String() string { @@ -1497,7 +1504,7 @@ func (*GetReplicationMessagesResponse) ProtoMessage() {} func (x *GetReplicationMessagesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1520,26 +1527,23 @@ func (x *GetReplicationMessagesResponse) GetShardMessages() map[int32]*v15.Repli } type GetNamespaceReplicationMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // lastRetrievedMessageId is where the next fetch should begin with. LastRetrievedMessageId int64 `protobuf:"varint,1,opt,name=last_retrieved_message_id,json=lastRetrievedMessageId,proto3" json:"last_retrieved_message_id,omitempty"` // lastProcessedMessageId is the last messageId that is processed on the passive side. // This can be different than lastRetrievedMessageId if passive side supports prefetching messages. LastProcessedMessageId int64 `protobuf:"varint,2,opt,name=last_processed_message_id,json=lastProcessedMessageId,proto3" json:"last_processed_message_id,omitempty"` // clusterName is the name of the pulling cluster. - ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetNamespaceReplicationMessagesRequest) Reset() { *x = GetNamespaceReplicationMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNamespaceReplicationMessagesRequest) String() string { @@ -1550,7 +1554,7 @@ func (*GetNamespaceReplicationMessagesRequest) ProtoMessage() {} func (x *GetNamespaceReplicationMessagesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1587,20 +1591,17 @@ func (x *GetNamespaceReplicationMessagesRequest) GetClusterName() string { } type GetNamespaceReplicationMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Messages *v15.ReplicationMessages `protobuf:"bytes,1,opt,name=messages,proto3" json:"messages,omitempty"` unknownFields protoimpl.UnknownFields - - Messages *v15.ReplicationMessages `protobuf:"bytes,1,opt,name=messages,proto3" json:"messages,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetNamespaceReplicationMessagesResponse) Reset() { *x = GetNamespaceReplicationMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNamespaceReplicationMessagesResponse) String() string { @@ -1611,7 +1612,7 @@ func (*GetNamespaceReplicationMessagesResponse) ProtoMessage() {} func (x *GetNamespaceReplicationMessagesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1634,20 +1635,17 @@ func (x *GetNamespaceReplicationMessagesResponse) GetMessages() *v15.Replication } type GetDLQReplicationMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TaskInfos []*v15.ReplicationTaskInfo `protobuf:"bytes,1,rep,name=task_infos,json=taskInfos,proto3" json:"task_infos,omitempty"` unknownFields protoimpl.UnknownFields - - TaskInfos []*v15.ReplicationTaskInfo `protobuf:"bytes,1,rep,name=task_infos,json=taskInfos,proto3" json:"task_infos,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetDLQReplicationMessagesRequest) Reset() { *x = GetDLQReplicationMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetDLQReplicationMessagesRequest) String() string { @@ -1658,7 +1656,7 @@ func (*GetDLQReplicationMessagesRequest) ProtoMessage() {} func (x *GetDLQReplicationMessagesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1681,20 +1679,17 @@ func (x *GetDLQReplicationMessagesRequest) GetTaskInfos() []*v15.ReplicationTask } type GetDLQReplicationMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` ReplicationTasks []*v15.ReplicationTask `protobuf:"bytes,1,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetDLQReplicationMessagesResponse) Reset() { *x = GetDLQReplicationMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetDLQReplicationMessagesResponse) String() string { @@ -1705,7 +1700,7 @@ func (*GetDLQReplicationMessagesResponse) ProtoMessage() {} func (x *GetDLQReplicationMessagesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1729,22 +1724,19 @@ func (x *GetDLQReplicationMessagesResponse) GetReplicationTasks() []*v15.Replica // ReapplyEventsRequest is the request for reapply events API. type ReapplyEventsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - Events *v1.DataBlob `protobuf:"bytes,3,opt,name=events,proto3" json:"events,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + Events *v1.DataBlob `protobuf:"bytes,3,opt,name=events,proto3" json:"events,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReapplyEventsRequest) Reset() { *x = ReapplyEventsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReapplyEventsRequest) String() string { @@ -1755,7 +1747,7 @@ func (*ReapplyEventsRequest) ProtoMessage() {} func (x *ReapplyEventsRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1792,18 +1784,16 @@ func (x *ReapplyEventsRequest) GetEvents() *v1.DataBlob { } type ReapplyEventsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReapplyEventsResponse) Reset() { *x = ReapplyEventsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReapplyEventsResponse) String() string { @@ -1814,7 +1804,7 @@ func (*ReapplyEventsResponse) ProtoMessage() {} func (x *ReapplyEventsResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1830,23 +1820,20 @@ func (*ReapplyEventsResponse) Descriptor() ([]byte, []int) { } type AddSearchAttributesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SearchAttributes map[string]v16.IndexedValueType `protobuf:"bytes,1,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=temporal.api.enums.v1.IndexedValueType"` + state protoimpl.MessageState `protogen:"open.v1"` + SearchAttributes map[string]v16.IndexedValueType `protobuf:"bytes,1,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=temporal.api.enums.v1.IndexedValueType"` IndexName string `protobuf:"bytes,2,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"` SkipSchemaUpdate bool `protobuf:"varint,3,opt,name=skip_schema_update,json=skipSchemaUpdate,proto3" json:"skip_schema_update,omitempty"` Namespace string `protobuf:"bytes,4,opt,name=namespace,proto3" json:"namespace,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddSearchAttributesRequest) Reset() { *x = AddSearchAttributesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddSearchAttributesRequest) String() string { @@ -1857,7 +1844,7 @@ func (*AddSearchAttributesRequest) ProtoMessage() {} func (x *AddSearchAttributesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1901,18 +1888,16 @@ func (x *AddSearchAttributesRequest) GetNamespace() string { } type AddSearchAttributesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddSearchAttributesResponse) Reset() { *x = AddSearchAttributesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddSearchAttributesResponse) String() string { @@ -1923,7 +1908,7 @@ func (*AddSearchAttributesResponse) ProtoMessage() {} func (x *AddSearchAttributesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1939,22 +1924,19 @@ func (*AddSearchAttributesResponse) Descriptor() ([]byte, []int) { } type RemoveSearchAttributesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SearchAttributes []string `protobuf:"bytes,1,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty"` - IndexName string `protobuf:"bytes,2,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"` - Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + SearchAttributes []string `protobuf:"bytes,1,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty"` + IndexName string `protobuf:"bytes,2,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RemoveSearchAttributesRequest) Reset() { *x = RemoveSearchAttributesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoveSearchAttributesRequest) String() string { @@ -1965,7 +1947,7 @@ func (*RemoveSearchAttributesRequest) ProtoMessage() {} func (x *RemoveSearchAttributesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2002,18 +1984,16 @@ func (x *RemoveSearchAttributesRequest) GetNamespace() string { } type RemoveSearchAttributesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RemoveSearchAttributesResponse) Reset() { *x = RemoveSearchAttributesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoveSearchAttributesResponse) String() string { @@ -2024,7 +2004,7 @@ func (*RemoveSearchAttributesResponse) ProtoMessage() {} func (x *RemoveSearchAttributesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2040,21 +2020,18 @@ func (*RemoveSearchAttributesResponse) Descriptor() ([]byte, []int) { } type GetSearchAttributesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + IndexName string `protobuf:"bytes,1,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` unknownFields protoimpl.UnknownFields - - IndexName string `protobuf:"bytes,1,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetSearchAttributesRequest) Reset() { *x = GetSearchAttributesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetSearchAttributesRequest) String() string { @@ -2065,7 +2042,7 @@ func (*GetSearchAttributesRequest) ProtoMessage() {} func (x *GetSearchAttributesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2095,24 +2072,21 @@ func (x *GetSearchAttributesRequest) GetNamespace() string { } type GetSearchAttributesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CustomAttributes map[string]v16.IndexedValueType `protobuf:"bytes,1,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=temporal.api.enums.v1.IndexedValueType"` - SystemAttributes map[string]v16.IndexedValueType `protobuf:"bytes,2,rep,name=system_attributes,json=systemAttributes,proto3" json:"system_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=temporal.api.enums.v1.IndexedValueType"` - Mapping map[string]string `protobuf:"bytes,3,rep,name=mapping,proto3" json:"mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + CustomAttributes map[string]v16.IndexedValueType `protobuf:"bytes,1,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=temporal.api.enums.v1.IndexedValueType"` + SystemAttributes map[string]v16.IndexedValueType `protobuf:"bytes,2,rep,name=system_attributes,json=systemAttributes,proto3" json:"system_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=temporal.api.enums.v1.IndexedValueType"` + Mapping map[string]string `protobuf:"bytes,3,rep,name=mapping,proto3" json:"mapping,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // State of the workflow that adds search attributes to the system. AddWorkflowExecutionInfo *v17.WorkflowExecutionInfo `protobuf:"bytes,4,opt,name=add_workflow_execution_info,json=addWorkflowExecutionInfo,proto3" json:"add_workflow_execution_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetSearchAttributesResponse) Reset() { *x = GetSearchAttributesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetSearchAttributesResponse) String() string { @@ -2123,7 +2097,7 @@ func (*GetSearchAttributesResponse) ProtoMessage() {} func (x *GetSearchAttributesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2167,20 +2141,17 @@ func (x *GetSearchAttributesResponse) GetAddWorkflowExecutionInfo() *v17.Workflo } type DescribeClusterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` unknownFields protoimpl.UnknownFields - - ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DescribeClusterRequest) Reset() { *x = DescribeClusterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeClusterRequest) String() string { @@ -2191,7 +2162,7 @@ func (*DescribeClusterRequest) ProtoMessage() {} func (x *DescribeClusterRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2214,32 +2185,30 @@ func (x *DescribeClusterRequest) GetClusterName() string { } type DescribeClusterResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SupportedClients map[string]string `protobuf:"bytes,1,rep,name=supported_clients,json=supportedClients,proto3" json:"supported_clients,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ServerVersion string `protobuf:"bytes,2,opt,name=server_version,json=serverVersion,proto3" json:"server_version,omitempty"` - MembershipInfo *v18.MembershipInfo `protobuf:"bytes,3,opt,name=membership_info,json=membershipInfo,proto3" json:"membership_info,omitempty"` - ClusterId string `protobuf:"bytes,4,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - ClusterName string `protobuf:"bytes,5,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` - HistoryShardCount int32 `protobuf:"varint,6,opt,name=history_shard_count,json=historyShardCount,proto3" json:"history_shard_count,omitempty"` - PersistenceStore string `protobuf:"bytes,7,opt,name=persistence_store,json=persistenceStore,proto3" json:"persistence_store,omitempty"` - VisibilityStore string `protobuf:"bytes,8,opt,name=visibility_store,json=visibilityStore,proto3" json:"visibility_store,omitempty"` - VersionInfo *v19.VersionInfo `protobuf:"bytes,9,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - FailoverVersionIncrement int64 `protobuf:"varint,10,opt,name=failover_version_increment,json=failoverVersionIncrement,proto3" json:"failover_version_increment,omitempty"` - InitialFailoverVersion int64 `protobuf:"varint,11,opt,name=initial_failover_version,json=initialFailoverVersion,proto3" json:"initial_failover_version,omitempty"` - IsGlobalNamespaceEnabled bool `protobuf:"varint,12,opt,name=is_global_namespace_enabled,json=isGlobalNamespaceEnabled,proto3" json:"is_global_namespace_enabled,omitempty"` - Tags map[string]string `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + SupportedClients map[string]string `protobuf:"bytes,1,rep,name=supported_clients,json=supportedClients,proto3" json:"supported_clients,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ServerVersion string `protobuf:"bytes,2,opt,name=server_version,json=serverVersion,proto3" json:"server_version,omitempty"` + MembershipInfo *v18.MembershipInfo `protobuf:"bytes,3,opt,name=membership_info,json=membershipInfo,proto3" json:"membership_info,omitempty"` + ClusterId string `protobuf:"bytes,4,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ClusterName string `protobuf:"bytes,5,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + HistoryShardCount int32 `protobuf:"varint,6,opt,name=history_shard_count,json=historyShardCount,proto3" json:"history_shard_count,omitempty"` + PersistenceStore string `protobuf:"bytes,7,opt,name=persistence_store,json=persistenceStore,proto3" json:"persistence_store,omitempty"` + VisibilityStore string `protobuf:"bytes,8,opt,name=visibility_store,json=visibilityStore,proto3" json:"visibility_store,omitempty"` + VersionInfo *v19.VersionInfo `protobuf:"bytes,9,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` + FailoverVersionIncrement int64 `protobuf:"varint,10,opt,name=failover_version_increment,json=failoverVersionIncrement,proto3" json:"failover_version_increment,omitempty"` + InitialFailoverVersion int64 `protobuf:"varint,11,opt,name=initial_failover_version,json=initialFailoverVersion,proto3" json:"initial_failover_version,omitempty"` + IsGlobalNamespaceEnabled bool `protobuf:"varint,12,opt,name=is_global_namespace_enabled,json=isGlobalNamespaceEnabled,proto3" json:"is_global_namespace_enabled,omitempty"` + Tags map[string]string `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + HttpAddress string `protobuf:"bytes,14,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescribeClusterResponse) Reset() { *x = DescribeClusterResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeClusterResponse) String() string { @@ -2250,7 +2219,7 @@ func (*DescribeClusterResponse) ProtoMessage() {} func (x *DescribeClusterResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2356,22 +2325,26 @@ func (x *DescribeClusterResponse) GetTags() map[string]string { return nil } +func (x *DescribeClusterResponse) GetHttpAddress() string { + if x != nil { + return x.HttpAddress + } + return "" +} + type ListClustersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields - - PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListClustersRequest) Reset() { *x = ListClustersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListClustersRequest) String() string { @@ -2382,7 +2355,7 @@ func (*ListClustersRequest) ProtoMessage() {} func (x *ListClustersRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2412,21 +2385,18 @@ func (x *ListClustersRequest) GetNextPageToken() []byte { } type ListClustersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Clusters []*v12.ClusterMetadata `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"` NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListClustersResponse) Reset() { *x = ListClustersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListClustersResponse) String() string { @@ -2437,7 +2407,7 @@ func (*ListClustersResponse) ProtoMessage() {} func (x *ListClustersResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2467,21 +2437,22 @@ func (x *ListClustersResponse) GetNextPageToken() []byte { } type AddOrUpdateRemoteClusterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - FrontendAddress string `protobuf:"bytes,1,opt,name=frontend_address,json=frontendAddress,proto3" json:"frontend_address,omitempty"` - EnableRemoteClusterConnection bool `protobuf:"varint,2,opt,name=enable_remote_cluster_connection,json=enableRemoteClusterConnection,proto3" json:"enable_remote_cluster_connection,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + FrontendAddress string `protobuf:"bytes,1,opt,name=frontend_address,json=frontendAddress,proto3" json:"frontend_address,omitempty"` + EnableRemoteClusterConnection bool `protobuf:"varint,2,opt,name=enable_remote_cluster_connection,json=enableRemoteClusterConnection,proto3" json:"enable_remote_cluster_connection,omitempty"` + // Deprecated: Marked as deprecated in temporal/server/api/adminservice/v1/request_response.proto. + FrontendHttpAddress string `protobuf:"bytes,3,opt,name=frontend_http_address,json=frontendHttpAddress,proto3" json:"frontend_http_address,omitempty"` + // enable_replication controls whether replication streams are active. + EnableReplication bool `protobuf:"varint,4,opt,name=enable_replication,json=enableReplication,proto3" json:"enable_replication,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddOrUpdateRemoteClusterRequest) Reset() { *x = AddOrUpdateRemoteClusterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddOrUpdateRemoteClusterRequest) String() string { @@ -2492,7 +2463,7 @@ func (*AddOrUpdateRemoteClusterRequest) ProtoMessage() {} func (x *AddOrUpdateRemoteClusterRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2521,19 +2492,32 @@ func (x *AddOrUpdateRemoteClusterRequest) GetEnableRemoteClusterConnection() boo return false } +// Deprecated: Marked as deprecated in temporal/server/api/adminservice/v1/request_response.proto. +func (x *AddOrUpdateRemoteClusterRequest) GetFrontendHttpAddress() string { + if x != nil { + return x.FrontendHttpAddress + } + return "" +} + +func (x *AddOrUpdateRemoteClusterRequest) GetEnableReplication() bool { + if x != nil { + return x.EnableReplication + } + return false +} + type AddOrUpdateRemoteClusterResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddOrUpdateRemoteClusterResponse) Reset() { *x = AddOrUpdateRemoteClusterResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddOrUpdateRemoteClusterResponse) String() string { @@ -2544,7 +2528,7 @@ func (*AddOrUpdateRemoteClusterResponse) ProtoMessage() {} func (x *AddOrUpdateRemoteClusterResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2560,20 +2544,17 @@ func (*AddOrUpdateRemoteClusterResponse) Descriptor() ([]byte, []int) { } type RemoveRemoteClusterRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` unknownFields protoimpl.UnknownFields - - ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RemoveRemoteClusterRequest) Reset() { *x = RemoveRemoteClusterRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoveRemoteClusterRequest) String() string { @@ -2584,7 +2565,7 @@ func (*RemoveRemoteClusterRequest) ProtoMessage() {} func (x *RemoveRemoteClusterRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2607,18 +2588,16 @@ func (x *RemoveRemoteClusterRequest) GetClusterName() string { } type RemoveRemoteClusterResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RemoveRemoteClusterResponse) Reset() { *x = RemoveRemoteClusterResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RemoveRemoteClusterResponse) String() string { @@ -2629,7 +2608,7 @@ func (*RemoveRemoteClusterResponse) ProtoMessage() {} func (x *RemoveRemoteClusterResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2645,10 +2624,7 @@ func (*RemoveRemoteClusterResponse) Descriptor() ([]byte, []int) { } type ListClusterMembersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "within" is used to indicate a time range. --) @@ -2662,15 +2638,15 @@ type ListClusterMembersRequest struct { SessionStartedAfterTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=session_started_after_time,json=sessionStartedAfterTime,proto3" json:"session_started_after_time,omitempty"` PageSize int32 `protobuf:"varint,6,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` NextPageToken []byte `protobuf:"bytes,7,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListClusterMembersRequest) Reset() { *x = ListClusterMembersRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListClusterMembersRequest) String() string { @@ -2681,7 +2657,7 @@ func (*ListClusterMembersRequest) ProtoMessage() {} func (x *ListClusterMembersRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2746,21 +2722,18 @@ func (x *ListClusterMembersRequest) GetNextPageToken() []byte { } type ListClusterMembersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ActiveMembers []*v18.ClusterMember `protobuf:"bytes,1,rep,name=active_members,json=activeMembers,proto3" json:"active_members,omitempty"` + NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields - - ActiveMembers []*v18.ClusterMember `protobuf:"bytes,1,rep,name=active_members,json=activeMembers,proto3" json:"active_members,omitempty"` - NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListClusterMembersResponse) Reset() { *x = ListClusterMembersResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListClusterMembersResponse) String() string { @@ -2771,7 +2744,7 @@ func (*ListClusterMembersResponse) ProtoMessage() {} func (x *ListClusterMembersResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2801,25 +2774,22 @@ func (x *ListClusterMembersResponse) GetNextPageToken() []byte { } type GetDLQMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Type v14.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` MaximumPageSize int32 `protobuf:"varint,5,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` NextPageToken []byte `protobuf:"bytes,6,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetDLQMessagesRequest) Reset() { *x = GetDLQMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetDLQMessagesRequest) String() string { @@ -2830,7 +2800,7 @@ func (*GetDLQMessagesRequest) ProtoMessage() {} func (x *GetDLQMessagesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2888,23 +2858,20 @@ func (x *GetDLQMessagesRequest) GetNextPageToken() []byte { } type GetDLQMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Type v14.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` ReplicationTasks []*v15.ReplicationTask `protobuf:"bytes,2,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` ReplicationTasksInfo []*v15.ReplicationTaskInfo `protobuf:"bytes,4,rep,name=replication_tasks_info,json=replicationTasksInfo,proto3" json:"replication_tasks_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetDLQMessagesResponse) Reset() { *x = GetDLQMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetDLQMessagesResponse) String() string { @@ -2915,7 +2882,7 @@ func (*GetDLQMessagesResponse) ProtoMessage() {} func (x *GetDLQMessagesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2959,23 +2926,20 @@ func (x *GetDLQMessagesResponse) GetReplicationTasksInfo() []*v15.ReplicationTas } type PurgeDLQMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Type v14.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PurgeDLQMessagesRequest) Reset() { *x = PurgeDLQMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PurgeDLQMessagesRequest) String() string { @@ -2986,7 +2950,7 @@ func (*PurgeDLQMessagesRequest) ProtoMessage() {} func (x *PurgeDLQMessagesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3030,18 +2994,16 @@ func (x *PurgeDLQMessagesRequest) GetInclusiveEndMessageId() int64 { } type PurgeDLQMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PurgeDLQMessagesResponse) Reset() { *x = PurgeDLQMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PurgeDLQMessagesResponse) String() string { @@ -3052,7 +3014,7 @@ func (*PurgeDLQMessagesResponse) ProtoMessage() {} func (x *PurgeDLQMessagesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3068,25 +3030,22 @@ func (*PurgeDLQMessagesResponse) Descriptor() ([]byte, []int) { } type MergeDLQMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Type v14.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` MaximumPageSize int32 `protobuf:"varint,5,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` NextPageToken []byte `protobuf:"bytes,6,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *MergeDLQMessagesRequest) Reset() { *x = MergeDLQMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MergeDLQMessagesRequest) String() string { @@ -3097,7 +3056,7 @@ func (*MergeDLQMessagesRequest) ProtoMessage() {} func (x *MergeDLQMessagesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3155,20 +3114,17 @@ func (x *MergeDLQMessagesRequest) GetNextPageToken() []byte { } type MergeDLQMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields - - NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MergeDLQMessagesResponse) Reset() { *x = MergeDLQMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MergeDLQMessagesResponse) String() string { @@ -3179,7 +3135,7 @@ func (*MergeDLQMessagesResponse) ProtoMessage() {} func (x *MergeDLQMessagesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3202,21 +3158,21 @@ func (x *MergeDLQMessagesResponse) GetNextPageToken() []byte { } type RefreshWorkflowTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + Archetype string `protobuf:"bytes,4,opt,name=archetype,proto3" json:"archetype,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,5,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RefreshWorkflowTasksRequest) Reset() { *x = RefreshWorkflowTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RefreshWorkflowTasksRequest) String() string { @@ -3227,7 +3183,7 @@ func (*RefreshWorkflowTasksRequest) ProtoMessage() {} func (x *RefreshWorkflowTasksRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3256,19 +3212,31 @@ func (x *RefreshWorkflowTasksRequest) GetExecution() *v1.WorkflowExecution { return nil } +func (x *RefreshWorkflowTasksRequest) GetArchetype() string { + if x != nil { + return x.Archetype + } + return "" +} + +func (x *RefreshWorkflowTasksRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + type RefreshWorkflowTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RefreshWorkflowTasksResponse) Reset() { *x = RefreshWorkflowTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RefreshWorkflowTasksResponse) String() string { @@ -3279,7 +3247,7 @@ func (*RefreshWorkflowTasksResponse) ProtoMessage() {} func (x *RefreshWorkflowTasksResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3295,27 +3263,24 @@ func (*RefreshWorkflowTasksResponse) Descriptor() ([]byte, []int) { } type ResendReplicationTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + RemoteCluster string `protobuf:"bytes,4,opt,name=remote_cluster,json=remoteCluster,proto3" json:"remote_cluster,omitempty"` + StartEventId int64 `protobuf:"varint,5,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` + StartVersion int64 `protobuf:"varint,6,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + EndEventId int64 `protobuf:"varint,7,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` + EndVersion int64 `protobuf:"varint,8,opt,name=end_version,json=endVersion,proto3" json:"end_version,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - RemoteCluster string `protobuf:"bytes,4,opt,name=remote_cluster,json=remoteCluster,proto3" json:"remote_cluster,omitempty"` - StartEventId int64 `protobuf:"varint,5,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` - StartVersion int64 `protobuf:"varint,6,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` - EndEventId int64 `protobuf:"varint,7,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` - EndVersion int64 `protobuf:"varint,8,opt,name=end_version,json=endVersion,proto3" json:"end_version,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ResendReplicationTasksRequest) Reset() { *x = ResendReplicationTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResendReplicationTasksRequest) String() string { @@ -3326,7 +3291,7 @@ func (*ResendReplicationTasksRequest) ProtoMessage() {} func (x *ResendReplicationTasksRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3398,18 +3363,16 @@ func (x *ResendReplicationTasksRequest) GetEndVersion() int64 { } type ResendReplicationTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ResendReplicationTasksResponse) Reset() { *x = ResendReplicationTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResendReplicationTasksResponse) String() string { @@ -3420,7 +3383,7 @@ func (*ResendReplicationTasksResponse) ProtoMessage() {} func (x *ResendReplicationTasksResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3436,26 +3399,25 @@ func (*ResendReplicationTasksResponse) Descriptor() ([]byte, []int) { } type GetTaskQueueTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskQueueType v16.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + MinPass int64 `protobuf:"varint,9,opt,name=min_pass,json=minPass,proto3" json:"min_pass,omitempty"` + MinTaskId int64 `protobuf:"varint,4,opt,name=min_task_id,json=minTaskId,proto3" json:"min_task_id,omitempty"` + MaxTaskId int64 `protobuf:"varint,5,opt,name=max_task_id,json=maxTaskId,proto3" json:"max_task_id,omitempty"` + BatchSize int32 `protobuf:"varint,6,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,7,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + Subqueue int32 `protobuf:"varint,8,opt,name=subqueue,proto3" json:"subqueue,omitempty"` unknownFields protoimpl.UnknownFields - - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - TaskQueueType v16.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` - MinTaskId int64 `protobuf:"varint,4,opt,name=min_task_id,json=minTaskId,proto3" json:"min_task_id,omitempty"` - MaxTaskId int64 `protobuf:"varint,5,opt,name=max_task_id,json=maxTaskId,proto3" json:"max_task_id,omitempty"` - BatchSize int32 `protobuf:"varint,6,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,7,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetTaskQueueTasksRequest) Reset() { *x = GetTaskQueueTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTaskQueueTasksRequest) String() string { @@ -3466,7 +3428,7 @@ func (*GetTaskQueueTasksRequest) ProtoMessage() {} func (x *GetTaskQueueTasksRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3502,6 +3464,13 @@ func (x *GetTaskQueueTasksRequest) GetTaskQueueType() v16.TaskQueueType { return v16.TaskQueueType(0) } +func (x *GetTaskQueueTasksRequest) GetMinPass() int64 { + if x != nil { + return x.MinPass + } + return 0 +} + func (x *GetTaskQueueTasksRequest) GetMinTaskId() int64 { if x != nil { return x.MinTaskId @@ -3530,22 +3499,26 @@ func (x *GetTaskQueueTasksRequest) GetNextPageToken() []byte { return nil } -type GetTaskQueueTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *GetTaskQueueTasksRequest) GetSubqueue() int32 { + if x != nil { + return x.Subqueue + } + return 0 +} +type GetTaskQueueTasksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` Tasks []*v12.AllocatedTaskInfo `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetTaskQueueTasksResponse) Reset() { *x = GetTaskQueueTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetTaskQueueTasksResponse) String() string { @@ -3556,7 +3529,7 @@ func (*GetTaskQueueTasksResponse) ProtoMessage() {} func (x *GetTaskQueueTasksResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3586,21 +3559,21 @@ func (x *GetTaskQueueTasksResponse) GetNextPageToken() []byte { } type DeleteWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + Archetype string `protobuf:"bytes,3,opt,name=archetype,proto3" json:"archetype,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,4,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` unknownFields protoimpl.UnknownFields - - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DeleteWorkflowExecutionRequest) Reset() { *x = DeleteWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DeleteWorkflowExecutionRequest) String() string { @@ -3611,7 +3584,7 @@ func (*DeleteWorkflowExecutionRequest) ProtoMessage() {} func (x *DeleteWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3640,21 +3613,32 @@ func (x *DeleteWorkflowExecutionRequest) GetExecution() *v1.WorkflowExecution { return nil } +func (x *DeleteWorkflowExecutionRequest) GetArchetype() string { + if x != nil { + return x.Archetype + } + return "" +} + +func (x *DeleteWorkflowExecutionRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + type DeleteWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Warnings []string `protobuf:"bytes,1,rep,name=warnings,proto3" json:"warnings,omitempty"` unknownFields protoimpl.UnknownFields - - Warnings []string `protobuf:"bytes,1,rep,name=warnings,proto3" json:"warnings,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DeleteWorkflowExecutionResponse) Reset() { *x = DeleteWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DeleteWorkflowExecutionResponse) String() string { @@ -3665,7 +3649,7 @@ func (*DeleteWorkflowExecutionResponse) ProtoMessage() {} func (x *DeleteWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3688,23 +3672,20 @@ func (x *DeleteWorkflowExecutionResponse) GetWarnings() []string { } type StreamWorkflowReplicationMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Attributes: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Attributes: // // *StreamWorkflowReplicationMessagesRequest_SyncReplicationState - Attributes isStreamWorkflowReplicationMessagesRequest_Attributes `protobuf_oneof:"attributes"` + Attributes isStreamWorkflowReplicationMessagesRequest_Attributes `protobuf_oneof:"attributes"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StreamWorkflowReplicationMessagesRequest) Reset() { *x = StreamWorkflowReplicationMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StreamWorkflowReplicationMessagesRequest) String() string { @@ -3715,7 +3696,7 @@ func (*StreamWorkflowReplicationMessagesRequest) ProtoMessage() {} func (x *StreamWorkflowReplicationMessagesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3730,16 +3711,18 @@ func (*StreamWorkflowReplicationMessagesRequest) Descriptor() ([]byte, []int) { return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{59} } -func (m *StreamWorkflowReplicationMessagesRequest) GetAttributes() isStreamWorkflowReplicationMessagesRequest_Attributes { - if m != nil { - return m.Attributes +func (x *StreamWorkflowReplicationMessagesRequest) GetAttributes() isStreamWorkflowReplicationMessagesRequest_Attributes { + if x != nil { + return x.Attributes } return nil } func (x *StreamWorkflowReplicationMessagesRequest) GetSyncReplicationState() *v15.SyncReplicationState { - if x, ok := x.GetAttributes().(*StreamWorkflowReplicationMessagesRequest_SyncReplicationState); ok { - return x.SyncReplicationState + if x != nil { + if x, ok := x.Attributes.(*StreamWorkflowReplicationMessagesRequest_SyncReplicationState); ok { + return x.SyncReplicationState + } } return nil } @@ -3756,23 +3739,20 @@ func (*StreamWorkflowReplicationMessagesRequest_SyncReplicationState) isStreamWo } type StreamWorkflowReplicationMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Attributes: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Attributes: // // *StreamWorkflowReplicationMessagesResponse_Messages - Attributes isStreamWorkflowReplicationMessagesResponse_Attributes `protobuf_oneof:"attributes"` + Attributes isStreamWorkflowReplicationMessagesResponse_Attributes `protobuf_oneof:"attributes"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StreamWorkflowReplicationMessagesResponse) Reset() { *x = StreamWorkflowReplicationMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StreamWorkflowReplicationMessagesResponse) String() string { @@ -3783,7 +3763,7 @@ func (*StreamWorkflowReplicationMessagesResponse) ProtoMessage() {} func (x *StreamWorkflowReplicationMessagesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3798,16 +3778,18 @@ func (*StreamWorkflowReplicationMessagesResponse) Descriptor() ([]byte, []int) { return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{60} } -func (m *StreamWorkflowReplicationMessagesResponse) GetAttributes() isStreamWorkflowReplicationMessagesResponse_Attributes { - if m != nil { - return m.Attributes +func (x *StreamWorkflowReplicationMessagesResponse) GetAttributes() isStreamWorkflowReplicationMessagesResponse_Attributes { + if x != nil { + return x.Attributes } return nil } func (x *StreamWorkflowReplicationMessagesResponse) GetMessages() *v15.WorkflowReplicationMessages { - if x, ok := x.GetAttributes().(*StreamWorkflowReplicationMessagesResponse_Messages); ok { - return x.Messages + if x != nil { + if x, ok := x.Attributes.(*StreamWorkflowReplicationMessagesResponse_Messages); ok { + return x.Messages + } } return nil } @@ -3824,24 +3806,21 @@ func (*StreamWorkflowReplicationMessagesResponse_Messages) isStreamWorkflowRepli } type GetNamespaceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Attributes: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Attributes: // // *GetNamespaceRequest_Namespace // *GetNamespaceRequest_Id - Attributes isGetNamespaceRequest_Attributes `protobuf_oneof:"attributes"` + Attributes isGetNamespaceRequest_Attributes `protobuf_oneof:"attributes"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetNamespaceRequest) Reset() { *x = GetNamespaceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNamespaceRequest) String() string { @@ -3852,7 +3831,7 @@ func (*GetNamespaceRequest) ProtoMessage() {} func (x *GetNamespaceRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3867,23 +3846,27 @@ func (*GetNamespaceRequest) Descriptor() ([]byte, []int) { return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{61} } -func (m *GetNamespaceRequest) GetAttributes() isGetNamespaceRequest_Attributes { - if m != nil { - return m.Attributes +func (x *GetNamespaceRequest) GetAttributes() isGetNamespaceRequest_Attributes { + if x != nil { + return x.Attributes } return nil } func (x *GetNamespaceRequest) GetNamespace() string { - if x, ok := x.GetAttributes().(*GetNamespaceRequest_Namespace); ok { - return x.Namespace + if x != nil { + if x, ok := x.Attributes.(*GetNamespaceRequest_Namespace); ok { + return x.Namespace + } } return "" } func (x *GetNamespaceRequest) GetId() string { - if x, ok := x.GetAttributes().(*GetNamespaceRequest_Id); ok { - return x.Id + if x != nil { + if x, ok := x.Attributes.(*GetNamespaceRequest_Id); ok { + return x.Id + } } return "" } @@ -3905,10 +3888,7 @@ func (*GetNamespaceRequest_Namespace) isGetNamespaceRequest_Attributes() {} func (*GetNamespaceRequest_Id) isGetNamespaceRequest_Attributes() {} type GetNamespaceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Info *v110.NamespaceInfo `protobuf:"bytes,3,opt,name=info,proto3" json:"info,omitempty"` Config *v110.NamespaceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` ReplicationConfig *v111.NamespaceReplicationConfig `protobuf:"bytes,5,opt,name=replication_config,json=replicationConfig,proto3" json:"replication_config,omitempty"` @@ -3916,15 +3896,15 @@ type GetNamespaceResponse struct { FailoverVersion int64 `protobuf:"varint,7,opt,name=failover_version,json=failoverVersion,proto3" json:"failover_version,omitempty"` FailoverHistory []*v111.FailoverStatus `protobuf:"bytes,8,rep,name=failover_history,json=failoverHistory,proto3" json:"failover_history,omitempty"` IsGlobalNamespace bool `protobuf:"varint,9,opt,name=is_global_namespace,json=isGlobalNamespace,proto3" json:"is_global_namespace,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetNamespaceResponse) Reset() { *x = GetNamespaceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetNamespaceResponse) String() string { @@ -3935,7 +3915,7 @@ func (*GetNamespaceResponse) ProtoMessage() {} func (x *GetNamespaceResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4000,23 +3980,20 @@ func (x *GetNamespaceResponse) GetIsGlobalNamespace() bool { } type GetDLQTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - DlqKey *v112.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + DlqKey *v112.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` // page_size must be positive. Up to this many tasks will be returned. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetDLQTasksRequest) Reset() { *x = GetDLQTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetDLQTasksRequest) String() string { @@ -4027,7 +4004,7 @@ func (*GetDLQTasksRequest) ProtoMessage() {} func (x *GetDLQTasksRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4064,24 +4041,21 @@ func (x *GetDLQTasksRequest) GetNextPageToken() []byte { } type GetDLQTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` DlqTasks []*v112.HistoryDLQTask `protobuf:"bytes,1,rep,name=dlq_tasks,json=dlqTasks,proto3" json:"dlq_tasks,omitempty"` // next_page_token is empty if there are no more results. However, the converse is not true. If there are no more // results, this field may still be non-empty. This is to avoid having to do a count query to determine whether // there are more results. NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetDLQTasksResponse) Reset() { *x = GetDLQTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetDLQTasksResponse) String() string { @@ -4092,7 +4066,7 @@ func (*GetDLQTasksResponse) ProtoMessage() {} func (x *GetDLQTasksResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4122,21 +4096,18 @@ func (x *GetDLQTasksResponse) GetNextPageToken() []byte { } type PurgeDLQTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` DlqKey *v112.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` InclusiveMaxTaskMetadata *v112.HistoryDLQTaskMetadata `protobuf:"bytes,2,opt,name=inclusive_max_task_metadata,json=inclusiveMaxTaskMetadata,proto3" json:"inclusive_max_task_metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PurgeDLQTasksRequest) Reset() { *x = PurgeDLQTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PurgeDLQTasksRequest) String() string { @@ -4147,7 +4118,7 @@ func (*PurgeDLQTasksRequest) ProtoMessage() {} func (x *PurgeDLQTasksRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4177,21 +4148,18 @@ func (x *PurgeDLQTasksRequest) GetInclusiveMaxTaskMetadata() *v112.HistoryDLQTas } type PurgeDLQTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // job_token is a token that can be used to query the status of the purge operation. - JobToken []byte `protobuf:"bytes,1,opt,name=job_token,json=jobToken,proto3" json:"job_token,omitempty"` + JobToken []byte `protobuf:"bytes,1,opt,name=job_token,json=jobToken,proto3" json:"job_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PurgeDLQTasksResponse) Reset() { *x = PurgeDLQTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PurgeDLQTasksResponse) String() string { @@ -4202,7 +4170,7 @@ func (*PurgeDLQTasksResponse) ProtoMessage() {} func (x *PurgeDLQTasksResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4226,21 +4194,18 @@ func (x *PurgeDLQTasksResponse) GetJobToken() []byte { // DLQJobToken identifies a DLQ job. This proto is for internal use only and clients should not use it. type DLQJobToken struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,2,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` unknownFields protoimpl.UnknownFields - - WorkflowId string `protobuf:"bytes,1,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,2,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DLQJobToken) Reset() { *x = DLQJobToken{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DLQJobToken) String() string { @@ -4251,7 +4216,7 @@ func (*DLQJobToken) ProtoMessage() {} func (x *DLQJobToken) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[67] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4281,10 +4246,7 @@ func (x *DLQJobToken) GetRunId() string { } type MergeDLQTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` DlqKey *v112.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` InclusiveMaxTaskMetadata *v112.HistoryDLQTaskMetadata `protobuf:"bytes,2,opt,name=inclusive_max_task_metadata,json=inclusiveMaxTaskMetadata,proto3" json:"inclusive_max_task_metadata,omitempty"` // batch_size controls how many tasks to merge at a time. The default can be found in the dlq package of the server. @@ -4292,16 +4254,16 @@ type MergeDLQTasksRequest struct { // - If this is 0, the default will be used. // - If this is greater than the maximum allowed batch size, an error will be returned. // - Otherwise, the specified batch size will be used. - BatchSize int32 `protobuf:"varint,3,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` + BatchSize int32 `protobuf:"varint,3,opt,name=batch_size,json=batchSize,proto3" json:"batch_size,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *MergeDLQTasksRequest) Reset() { *x = MergeDLQTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MergeDLQTasksRequest) String() string { @@ -4312,7 +4274,7 @@ func (*MergeDLQTasksRequest) ProtoMessage() {} func (x *MergeDLQTasksRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4349,20 +4311,17 @@ func (x *MergeDLQTasksRequest) GetBatchSize() int32 { } type MergeDLQTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + JobToken []byte `protobuf:"bytes,1,opt,name=job_token,json=jobToken,proto3" json:"job_token,omitempty"` unknownFields protoimpl.UnknownFields - - JobToken []byte `protobuf:"bytes,1,opt,name=job_token,json=jobToken,proto3" json:"job_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MergeDLQTasksResponse) Reset() { *x = MergeDLQTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MergeDLQTasksResponse) String() string { @@ -4373,7 +4332,7 @@ func (*MergeDLQTasksResponse) ProtoMessage() {} func (x *MergeDLQTasksResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4396,21 +4355,18 @@ func (x *MergeDLQTasksResponse) GetJobToken() []byte { } type DescribeDLQJobRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Job token of MergeDLQTasks or PurgeDLQTasks job. - JobToken string `protobuf:"bytes,1,opt,name=job_token,json=jobToken,proto3" json:"job_token,omitempty"` + JobToken []byte `protobuf:"bytes,1,opt,name=job_token,json=jobToken,proto3" json:"job_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescribeDLQJobRequest) Reset() { *x = DescribeDLQJobRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeDLQJobRequest) String() string { @@ -4421,7 +4377,7 @@ func (*DescribeDLQJobRequest) ProtoMessage() {} func (x *DescribeDLQJobRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4436,18 +4392,15 @@ func (*DescribeDLQJobRequest) Descriptor() ([]byte, []int) { return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{70} } -func (x *DescribeDLQJobRequest) GetJobToken() string { +func (x *DescribeDLQJobRequest) GetJobToken() []byte { if x != nil { return x.JobToken } - return "" + return nil } type DescribeDLQJobResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` DlqKey *v112.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` OperationType v14.DLQOperationType `protobuf:"varint,2,opt,name=operation_type,json=operationType,proto3,enum=temporal.server.api.enums.v1.DLQOperationType" json:"operation_type,omitempty"` OperationState v14.DLQOperationState `protobuf:"varint,3,opt,name=operation_state,json=operationState,proto3,enum=temporal.server.api.enums.v1.DLQOperationState" json:"operation_state,omitempty"` @@ -4461,15 +4414,15 @@ type DescribeDLQJobResponse struct { LastProcessedMessageId int64 `protobuf:"varint,7,opt,name=last_processed_message_id,json=lastProcessedMessageId,proto3" json:"last_processed_message_id,omitempty"` // messages_processed is the total number of messages that are re-enqueued and deleted from the DLQ so far by the DLQ job. MessagesProcessed int64 `protobuf:"varint,8,opt,name=messages_processed,json=messagesProcessed,proto3" json:"messages_processed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescribeDLQJobResponse) Reset() { *x = DescribeDLQJobResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeDLQJobResponse) String() string { @@ -4480,7 +4433,7 @@ func (*DescribeDLQJobResponse) ProtoMessage() {} func (x *DescribeDLQJobResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[71] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4552,23 +4505,20 @@ func (x *DescribeDLQJobResponse) GetMessagesProcessed() int64 { } type CancelDLQJobRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Job token of MergeDLQTasks or PurgeDLQTasks job to cancel. - JobToken string `protobuf:"bytes,1,opt,name=job_token,json=jobToken,proto3" json:"job_token,omitempty"` + JobToken []byte `protobuf:"bytes,1,opt,name=job_token,json=jobToken,proto3" json:"job_token,omitempty"` // The reason for cancellation. - Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CancelDLQJobRequest) Reset() { *x = CancelDLQJobRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CancelDLQJobRequest) String() string { @@ -4579,7 +4529,7 @@ func (*CancelDLQJobRequest) ProtoMessage() {} func (x *CancelDLQJobRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[72] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4594,11 +4544,11 @@ func (*CancelDLQJobRequest) Descriptor() ([]byte, []int) { return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{72} } -func (x *CancelDLQJobRequest) GetJobToken() string { +func (x *CancelDLQJobRequest) GetJobToken() []byte { if x != nil { return x.JobToken } - return "" + return nil } func (x *CancelDLQJobRequest) GetReason() string { @@ -4609,22 +4559,19 @@ func (x *CancelDLQJobRequest) GetReason() string { } type CancelDLQJobResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This is true if the workflow was successfully terminated by this request and false if // the workflow was already completed or terminated. - Canceled bool `protobuf:"varint,1,opt,name=canceled,proto3" json:"canceled,omitempty"` + Canceled bool `protobuf:"varint,1,opt,name=canceled,proto3" json:"canceled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CancelDLQJobResponse) Reset() { *x = CancelDLQJobResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CancelDLQJobResponse) String() string { @@ -4635,7 +4582,7 @@ func (*CancelDLQJobResponse) ProtoMessage() {} func (x *CancelDLQJobResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[73] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4661,21 +4608,18 @@ func (x *CancelDLQJobResponse) GetCanceled() bool { // dependency. In addition, we can't extract a common request proto because the shard_id needs to be present in the top // proto layer, so we duplicate it. It shouldn't be a big deal because this proto is not very big. type AddTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Tasks []*AddTasksRequest_Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty"` unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - Tasks []*AddTasksRequest_Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty"` + sizeCache protoimpl.SizeCache } func (x *AddTasksRequest) Reset() { *x = AddTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddTasksRequest) String() string { @@ -4686,7 +4630,7 @@ func (*AddTasksRequest) ProtoMessage() {} func (x *AddTasksRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[74] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4716,18 +4660,16 @@ func (x *AddTasksRequest) GetTasks() []*AddTasksRequest_Task { } type AddTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddTasksResponse) Reset() { *x = AddTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddTasksResponse) String() string { @@ -4738,7 +4680,7 @@ func (*AddTasksResponse) ProtoMessage() {} func (x *AddTasksResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[75] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4754,22 +4696,19 @@ func (*AddTasksResponse) Descriptor() ([]byte, []int) { } type ListQueuesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + QueueType int32 `protobuf:"varint,1,opt,name=queue_type,json=queueType,proto3" json:"queue_type,omitempty"` + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields - - QueueType int32 `protobuf:"varint,1,opt,name=queue_type,json=queueType,proto3" json:"queue_type,omitempty"` - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListQueuesRequest) Reset() { *x = ListQueuesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[76] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListQueuesRequest) String() string { @@ -4780,7 +4719,7 @@ func (*ListQueuesRequest) ProtoMessage() {} func (x *ListQueuesRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[76] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4817,21 +4756,18 @@ func (x *ListQueuesRequest) GetNextPageToken() []byte { } type ListQueuesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Queues []*ListQueuesResponse_QueueInfo `protobuf:"bytes,1,rep,name=queues,proto3" json:"queues,omitempty"` NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListQueuesResponse) Reset() { *x = ListQueuesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[77] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListQueuesResponse) String() string { @@ -4842,7 +4778,7 @@ func (*ListQueuesResponse) ProtoMessage() {} func (x *ListQueuesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[77] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4871,33 +4807,67 @@ func (x *ListQueuesResponse) GetNextPageToken() []byte { return nil } -type AddTasksRequest_Task struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DeepHealthCheckRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - CategoryId int32 `protobuf:"varint,1,opt,name=category_id,json=categoryId,proto3" json:"category_id,omitempty"` - Blob *v1.DataBlob `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"` +func (x *DeepHealthCheckRequest) Reset() { + *x = DeepHealthCheckRequest{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AddTasksRequest_Task) Reset() { - *x = AddTasksRequest_Task{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[85] +func (x *DeepHealthCheckRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeepHealthCheckRequest) ProtoMessage() {} + +func (x *DeepHealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[78] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } + return mi.MessageOf(x) } -func (x *AddTasksRequest_Task) String() string { +// Deprecated: Use DeepHealthCheckRequest.ProtoReflect.Descriptor instead. +func (*DeepHealthCheckRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{78} +} + +type DeepHealthCheckResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + State v14.HealthState `protobuf:"varint,1,opt,name=state,proto3,enum=temporal.server.api.enums.v1.HealthState" json:"state,omitempty"` + // Per-service diagnostic details including per-host breakdown. + Services []*v113.ServiceHealthDetail `protobuf:"bytes,2,rep,name=services,proto3" json:"services,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeepHealthCheckResponse) Reset() { + *x = DeepHealthCheckResponse{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeepHealthCheckResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AddTasksRequest_Task) ProtoMessage() {} +func (*DeepHealthCheckResponse) ProtoMessage() {} -func (x *AddTasksRequest_Task) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[85] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeepHealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[79] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4907,52 +4877,54 @@ func (x *AddTasksRequest_Task) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AddTasksRequest_Task.ProtoReflect.Descriptor instead. -func (*AddTasksRequest_Task) Descriptor() ([]byte, []int) { - return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{74, 0} +// Deprecated: Use DeepHealthCheckResponse.ProtoReflect.Descriptor instead. +func (*DeepHealthCheckResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{79} } -func (x *AddTasksRequest_Task) GetCategoryId() int32 { +func (x *DeepHealthCheckResponse) GetState() v14.HealthState { if x != nil { - return x.CategoryId + return x.State } - return 0 + return v14.HealthState(0) } -func (x *AddTasksRequest_Task) GetBlob() *v1.DataBlob { +func (x *DeepHealthCheckResponse) GetServices() []*v113.ServiceHealthDetail { if x != nil { - return x.Blob + return x.Services } return nil } -type ListQueuesResponse_QueueInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type SyncWorkflowStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + VersionedTransition *v12.VersionedTransition `protobuf:"bytes,3,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + VersionHistories *v11.VersionHistories `protobuf:"bytes,4,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + TargetClusterId int32 `protobuf:"varint,5,opt,name=target_cluster_id,json=targetClusterId,proto3" json:"target_cluster_id,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,6,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` unknownFields protoimpl.UnknownFields - - QueueName string `protobuf:"bytes,1,opt,name=queue_name,json=queueName,proto3" json:"queue_name,omitempty"` - MessageCount int64 `protobuf:"varint,2,opt,name=message_count,json=messageCount,proto3" json:"message_count,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ListQueuesResponse_QueueInfo) Reset() { - *x = ListQueuesResponse_QueueInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[86] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *SyncWorkflowStateRequest) Reset() { + *x = SyncWorkflowStateRequest{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ListQueuesResponse_QueueInfo) String() string { +func (x *SyncWorkflowStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListQueuesResponse_QueueInfo) ProtoMessage() {} +func (*SyncWorkflowStateRequest) ProtoMessage() {} -func (x *ListQueuesResponse_QueueInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[86] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SyncWorkflowStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[80] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4962,2207 +4934,1716 @@ func (x *ListQueuesResponse_QueueInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListQueuesResponse_QueueInfo.ProtoReflect.Descriptor instead. -func (*ListQueuesResponse_QueueInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{77, 0} +// Deprecated: Use SyncWorkflowStateRequest.ProtoReflect.Descriptor instead. +func (*SyncWorkflowStateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{80} } -func (x *ListQueuesResponse_QueueInfo) GetQueueName() string { +func (x *SyncWorkflowStateRequest) GetNamespaceId() string { if x != nil { - return x.QueueName + return x.NamespaceId } return "" } -func (x *ListQueuesResponse_QueueInfo) GetMessageCount() int64 { +func (x *SyncWorkflowStateRequest) GetExecution() *v1.WorkflowExecution { if x != nil { - return x.MessageCount + return x.Execution } - return 0 + return nil } -var File_temporal_server_api_adminservice_v1_request_response_proto protoreflect.FileDescriptor +func (x *SyncWorkflowStateRequest) GetVersionedTransition() *v12.VersionedTransition { + if x != nil { + return x.VersionedTransition + } + return nil +} -var file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc = []byte{ - 0x0a, 0x3a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, - 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2f, - 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6c, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x29, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, - 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, - 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6c, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x30, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x39, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x33, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, - 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x3f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x75, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, - 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8b, 0x01, 0x0a, 0x1a, - 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, - 0x00, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd5, - 0x02, 0x0a, 0x1e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0f, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, - 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x3b, 0x0a, 0x1f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x8c, 0x01, - 0x0a, 0x1b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x02, 0x68, 0x00, 0x22, 0xc6, 0x02, 0x0a, 0x1c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x25, 0x0a, 0x0c, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x41, 0x64, 0x64, - 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6c, 0x0a, 0x13, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x6d, 0x75, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x11, 0x63, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x75, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x72, 0x0a, - 0x16, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x14, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xe2, 0x01, 0x0a, 0x1a, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x5f, - 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x6f, - 0x73, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x12, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x22, 0xee, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x27, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x4e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x61, 0x0a, 0x0f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x32, 0x0a, 0x11, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x14, 0x0a, 0x12, 0x43, 0x6c, 0x6f, 0x73, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x0a, 0x0f, - 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, - 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x64, 0x0a, 0x10, 0x47, - 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, - 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x22, 0xf5, 0x01, 0x0a, 0x17, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x1e, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x4c, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x09, 0x74, - 0x61, 0x73, 0x6b, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x62, - 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, - 0x62, 0x61, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x8b, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x43, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xae, 0x02, 0x0a, 0x04, - 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, - 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x47, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, - 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x09, 0x66, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x08, 0x66, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb8, 0x01, 0x0a, 0x11, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1e, 0x0a, - 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, - 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, - 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0f, 0x76, 0x69, 0x73, 0x69, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x76, 0x69, 0x73, 0x69, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x14, 0x0a, 0x12, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0xb3, 0x03, 0x0a, 0x27, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x32, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x24, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, - 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x50, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xb0, - 0x02, 0x0a, 0x28, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x56, - 0x32, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x74, - 0x63, 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, - 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, - 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x03, 0x52, 0x0e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xab, 0x03, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, - 0x13, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x24, - 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x65, 0x6e, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x2e, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x50, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xae, 0x02, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0f, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, - 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x03, 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x49, - 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x98, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x06, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa8, 0x02, 0x0a, 0x1e, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x0e, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x56, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x81, 0x01, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x51, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xcd, 0x01, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x19, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6c, 0x61, - 0x73, 0x74, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x19, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, - 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x82, 0x01, 0x0a, 0x27, 0x47, 0x65, 0x74, 0x4e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x57, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x7e, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, - 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x09, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x89, 0x01, - 0x0a, 0x21, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x64, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x10, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x22, 0xdf, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, - 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x42, 0x02, 0x68, - 0x00, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, - 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x92, - 0x03, 0x0a, 0x1a, 0x41, 0x64, 0x64, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x86, 0x01, 0x0a, 0x11, - 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x55, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, - 0x0a, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, - 0x0a, 0x12, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, - 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x74, 0x0a, - 0x15, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x1d, 0x0a, - 0x1b, 0x41, 0x64, 0x64, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x95, 0x01, 0x0a, 0x1d, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x11, 0x73, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x69, 0x6e, 0x64, 0x65, - 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x20, 0x0a, 0x1e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x61, 0x0a, 0x1a, 0x47, 0x65, 0x74, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4e, - 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc2, 0x06, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x53, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x56, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x87, 0x01, 0x0a, 0x11, 0x73, 0x79, 0x73, 0x74, - 0x65, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x56, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6b, 0x0a, 0x07, 0x6d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x72, 0x0a, 0x1b, 0x61, 0x64, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x18, 0x61, 0x64, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x74, - 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x74, - 0x0a, 0x15, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, - 0x0c, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x18, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, - 0x16, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x83, 0x08, 0x0a, 0x17, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x83, 0x01, 0x0a, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x52, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, - 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x49, 0x6e, 0x66, 0x6f, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2f, 0x0a, 0x11, 0x70, - 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x76, 0x69, 0x73, - 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x74, 0x6f, - 0x72, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x40, - 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x18, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x18, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x6c, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x1b, 0x69, 0x73, 0x5f, 0x67, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x73, 0x47, 0x6c, - 0x6f, 0x62, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5e, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0d, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, - 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x42, 0x02, 0x68, - 0x00, 0x1a, 0x4b, 0x0a, 0x15, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x09, 0x54, 0x61, 0x67, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, - 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, - 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, - 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x9d, 0x01, 0x0a, 0x1f, 0x41, 0x64, 0x64, 0x4f, 0x72, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x10, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x22, - 0x0a, 0x20, 0x41, 0x64, 0x64, 0x4f, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, - 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x43, 0x0a, 0x1a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x22, 0x1d, 0x0a, 0x1b, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa3, - 0x03, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x6c, 0x61, - 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x77, 0x69, 0x74, 0x68, - 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x57, - 0x69, 0x74, 0x68, 0x69, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x70, 0x63, - 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, - 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, - 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x6f, - 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, - 0x6f, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x1a, - 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, - 0x66, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x17, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, - 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa2, 0x01, 0x0a, - 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x6d, 0x62, 0x65, - 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x0e, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x0d, 0x61, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc5, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, - 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, - 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, - 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x45, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x50, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xe8, - 0x02, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, - 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, - 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x71, 0x0a, 0x16, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x14, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x49, 0x6e, 0x66, - 0x6f, 0x42, 0x02, 0x68, 0x00, 0x22, 0xeb, 0x01, 0x0a, 0x17, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, - 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x49, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, - 0x74, 0x74, 0x65, 0x72, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x6e, - 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x45, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x1a, 0x0a, 0x18, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, - 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xc7, 0x02, 0x0a, 0x17, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, - 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, - 0x69, 0x76, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x45, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x50, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, - 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x46, 0x0a, 0x18, 0x4d, 0x65, 0x72, 0x67, 0x65, - 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x97, 0x01, 0x0a, 0x1b, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, - 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x1e, 0x0a, - 0x1c, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, - 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xcf, 0x02, 0x0a, 0x1d, - 0x52, 0x65, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, - 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x27, 0x0a, 0x0d, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x24, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x6e, 0x64, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x20, 0x0a, 0x1e, 0x52, 0x65, - 0x73, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc8, 0x02, 0x0a, 0x18, 0x47, - 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, - 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x50, 0x0a, 0x0f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, - 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x22, 0x0a, 0x0b, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x54, 0x61, 0x73, 0x6b, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x22, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x54, - 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, - 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x62, 0x61, 0x74, - 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x98, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4f, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, - 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x8f, 0x01, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x41, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1e, 0x0a, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xae, - 0x01, 0x0a, 0x28, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x74, 0x0a, 0x16, 0x73, 0x79, 0x6e, 0x63, - 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, - 0x14, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x42, 0x0c, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x29, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x61, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x48, - 0x00, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x42, 0x0c, - 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0x5d, 0x0a, 0x13, 0x47, - 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x22, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x14, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, - 0x52, 0x02, 0x69, 0x64, 0x42, 0x02, 0x68, 0x00, 0x42, 0x0c, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0xf6, 0x03, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x04, - 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x46, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x6a, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, - 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, - 0x10, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5a, 0x0a, 0x10, 0x66, 0x61, 0x69, - 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x66, - 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x32, 0x0a, 0x13, 0x69, 0x73, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x69, 0x73, - 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x22, 0xac, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x07, 0x64, 0x6c, 0x71, 0x5f, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, - 0x4b, 0x65, 0x79, 0x52, 0x06, 0x64, 0x6c, 0x71, 0x4b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, - 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, - 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x91, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x44, 0x4c, - 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, - 0x09, 0x64, 0x6c, 0x71, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x08, 0x64, 0x6c, - 0x71, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x22, 0xdb, 0x01, 0x0a, 0x14, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x54, - 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x07, 0x64, 0x6c, - 0x71, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x44, 0x4c, 0x51, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x64, 0x6c, 0x71, 0x4b, 0x65, 0x79, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x78, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, - 0x78, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x4d, 0x61, 0x78, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x42, 0x02, 0x68, 0x00, 0x22, 0x38, 0x0a, 0x15, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x54, - 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x09, - 0x6a, 0x6f, 0x62, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, - 0x6a, 0x6f, 0x62, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x4d, 0x0a, 0x0b, 0x44, - 0x4c, 0x51, 0x4a, 0x6f, 0x62, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, - 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xfe, 0x01, 0x0a, 0x14, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x44, - 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, - 0x07, 0x64, 0x6c, 0x71, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x64, 0x6c, 0x71, 0x4b, 0x65, 0x79, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x78, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x54, - 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x62, 0x61, 0x74, 0x63, 0x68, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x62, 0x61, 0x74, 0x63, - 0x68, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x38, 0x0a, 0x15, 0x4d, 0x65, 0x72, 0x67, - 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1f, 0x0a, 0x09, 0x6a, 0x6f, 0x62, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x38, 0x0a, 0x15, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x44, 0x4c, 0x51, 0x4a, 0x6f, 0x62, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x6a, 0x6f, 0x62, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb2, 0x04, 0x0a, 0x16, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x62, 0x65, 0x44, 0x4c, 0x51, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x49, 0x0a, 0x07, 0x64, 0x6c, 0x71, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x64, 0x6c, 0x71, 0x4b, - 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x59, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x4c, 0x51, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x0f, - 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x44, 0x4c, 0x51, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x39, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, - 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, - 0x6d, 0x61, 0x78, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x3d, 0x0a, 0x19, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, - 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x31, 0x0a, 0x12, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x50, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x52, 0x0a, 0x13, 0x43, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x44, 0x4c, 0x51, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x6a, 0x6f, 0x62, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x6f, 0x62, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x1a, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x36, 0x0a, 0x14, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x44, 0x4c, 0x51, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x1e, 0x0a, 0x08, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, - 0xec, 0x01, 0x0a, 0x0f, 0x41, 0x64, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x53, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x1a, 0x65, 0x0a, 0x04, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x23, 0x0a, 0x0b, 0x63, 0x61, 0x74, 0x65, 0x67, - 0x6f, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x63, 0x61, 0x74, - 0x65, 0x67, 0x6f, 0x72, 0x79, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6c, - 0x6f, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x68, - 0x00, 0x22, 0x12, 0x0a, 0x10, 0x41, 0x64, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x83, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x75, - 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x71, 0x75, 0x65, - 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, - 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x22, 0xf8, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x57, 0x0a, 0x09, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x75, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x71, 0x75, 0x65, 0x75, - 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x27, 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x42, - 0x38, 0x5a, 0x36, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (x *SyncWorkflowStateRequest) GetVersionHistories() *v11.VersionHistories { + if x != nil { + return x.VersionHistories + } + return nil } -var ( - file_temporal_server_api_adminservice_v1_request_response_proto_rawDescOnce sync.Once - file_temporal_server_api_adminservice_v1_request_response_proto_rawDescData = file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc -) +func (x *SyncWorkflowStateRequest) GetTargetClusterId() int32 { + if x != nil { + return x.TargetClusterId + } + return 0 +} -func file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP() []byte { - file_temporal_server_api_adminservice_v1_request_response_proto_rawDescOnce.Do(func() { - file_temporal_server_api_adminservice_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_adminservice_v1_request_response_proto_rawDescData) - }) - return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescData +func (x *SyncWorkflowStateRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 } -var file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 87) -var file_temporal_server_api_adminservice_v1_request_response_proto_goTypes = []interface{}{ - (*RebuildMutableStateRequest)(nil), // 0: temporal.server.api.adminservice.v1.RebuildMutableStateRequest - (*RebuildMutableStateResponse)(nil), // 1: temporal.server.api.adminservice.v1.RebuildMutableStateResponse - (*ImportWorkflowExecutionRequest)(nil), // 2: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest - (*ImportWorkflowExecutionResponse)(nil), // 3: temporal.server.api.adminservice.v1.ImportWorkflowExecutionResponse - (*DescribeMutableStateRequest)(nil), // 4: temporal.server.api.adminservice.v1.DescribeMutableStateRequest - (*DescribeMutableStateResponse)(nil), // 5: temporal.server.api.adminservice.v1.DescribeMutableStateResponse - (*DescribeHistoryHostRequest)(nil), // 6: temporal.server.api.adminservice.v1.DescribeHistoryHostRequest - (*DescribeHistoryHostResponse)(nil), // 7: temporal.server.api.adminservice.v1.DescribeHistoryHostResponse - (*CloseShardRequest)(nil), // 8: temporal.server.api.adminservice.v1.CloseShardRequest - (*CloseShardResponse)(nil), // 9: temporal.server.api.adminservice.v1.CloseShardResponse - (*GetShardRequest)(nil), // 10: temporal.server.api.adminservice.v1.GetShardRequest - (*GetShardResponse)(nil), // 11: temporal.server.api.adminservice.v1.GetShardResponse - (*ListHistoryTasksRequest)(nil), // 12: temporal.server.api.adminservice.v1.ListHistoryTasksRequest - (*ListHistoryTasksResponse)(nil), // 13: temporal.server.api.adminservice.v1.ListHistoryTasksResponse - (*Task)(nil), // 14: temporal.server.api.adminservice.v1.Task - (*RemoveTaskRequest)(nil), // 15: temporal.server.api.adminservice.v1.RemoveTaskRequest - (*RemoveTaskResponse)(nil), // 16: temporal.server.api.adminservice.v1.RemoveTaskResponse - (*GetWorkflowExecutionRawHistoryV2Request)(nil), // 17: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request - (*GetWorkflowExecutionRawHistoryV2Response)(nil), // 18: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response - (*GetWorkflowExecutionRawHistoryRequest)(nil), // 19: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest - (*GetWorkflowExecutionRawHistoryResponse)(nil), // 20: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse - (*GetReplicationMessagesRequest)(nil), // 21: temporal.server.api.adminservice.v1.GetReplicationMessagesRequest - (*GetReplicationMessagesResponse)(nil), // 22: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse - (*GetNamespaceReplicationMessagesRequest)(nil), // 23: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesRequest - (*GetNamespaceReplicationMessagesResponse)(nil), // 24: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse - (*GetDLQReplicationMessagesRequest)(nil), // 25: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesRequest - (*GetDLQReplicationMessagesResponse)(nil), // 26: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse - (*ReapplyEventsRequest)(nil), // 27: temporal.server.api.adminservice.v1.ReapplyEventsRequest - (*ReapplyEventsResponse)(nil), // 28: temporal.server.api.adminservice.v1.ReapplyEventsResponse - (*AddSearchAttributesRequest)(nil), // 29: temporal.server.api.adminservice.v1.AddSearchAttributesRequest - (*AddSearchAttributesResponse)(nil), // 30: temporal.server.api.adminservice.v1.AddSearchAttributesResponse - (*RemoveSearchAttributesRequest)(nil), // 31: temporal.server.api.adminservice.v1.RemoveSearchAttributesRequest - (*RemoveSearchAttributesResponse)(nil), // 32: temporal.server.api.adminservice.v1.RemoveSearchAttributesResponse - (*GetSearchAttributesRequest)(nil), // 33: temporal.server.api.adminservice.v1.GetSearchAttributesRequest - (*GetSearchAttributesResponse)(nil), // 34: temporal.server.api.adminservice.v1.GetSearchAttributesResponse - (*DescribeClusterRequest)(nil), // 35: temporal.server.api.adminservice.v1.DescribeClusterRequest - (*DescribeClusterResponse)(nil), // 36: temporal.server.api.adminservice.v1.DescribeClusterResponse - (*ListClustersRequest)(nil), // 37: temporal.server.api.adminservice.v1.ListClustersRequest - (*ListClustersResponse)(nil), // 38: temporal.server.api.adminservice.v1.ListClustersResponse - (*AddOrUpdateRemoteClusterRequest)(nil), // 39: temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterRequest - (*AddOrUpdateRemoteClusterResponse)(nil), // 40: temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterResponse - (*RemoveRemoteClusterRequest)(nil), // 41: temporal.server.api.adminservice.v1.RemoveRemoteClusterRequest - (*RemoveRemoteClusterResponse)(nil), // 42: temporal.server.api.adminservice.v1.RemoveRemoteClusterResponse - (*ListClusterMembersRequest)(nil), // 43: temporal.server.api.adminservice.v1.ListClusterMembersRequest - (*ListClusterMembersResponse)(nil), // 44: temporal.server.api.adminservice.v1.ListClusterMembersResponse - (*GetDLQMessagesRequest)(nil), // 45: temporal.server.api.adminservice.v1.GetDLQMessagesRequest - (*GetDLQMessagesResponse)(nil), // 46: temporal.server.api.adminservice.v1.GetDLQMessagesResponse - (*PurgeDLQMessagesRequest)(nil), // 47: temporal.server.api.adminservice.v1.PurgeDLQMessagesRequest - (*PurgeDLQMessagesResponse)(nil), // 48: temporal.server.api.adminservice.v1.PurgeDLQMessagesResponse - (*MergeDLQMessagesRequest)(nil), // 49: temporal.server.api.adminservice.v1.MergeDLQMessagesRequest - (*MergeDLQMessagesResponse)(nil), // 50: temporal.server.api.adminservice.v1.MergeDLQMessagesResponse - (*RefreshWorkflowTasksRequest)(nil), // 51: temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest - (*RefreshWorkflowTasksResponse)(nil), // 52: temporal.server.api.adminservice.v1.RefreshWorkflowTasksResponse - (*ResendReplicationTasksRequest)(nil), // 53: temporal.server.api.adminservice.v1.ResendReplicationTasksRequest - (*ResendReplicationTasksResponse)(nil), // 54: temporal.server.api.adminservice.v1.ResendReplicationTasksResponse - (*GetTaskQueueTasksRequest)(nil), // 55: temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest - (*GetTaskQueueTasksResponse)(nil), // 56: temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse - (*DeleteWorkflowExecutionRequest)(nil), // 57: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest - (*DeleteWorkflowExecutionResponse)(nil), // 58: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse - (*StreamWorkflowReplicationMessagesRequest)(nil), // 59: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest - (*StreamWorkflowReplicationMessagesResponse)(nil), // 60: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse - (*GetNamespaceRequest)(nil), // 61: temporal.server.api.adminservice.v1.GetNamespaceRequest - (*GetNamespaceResponse)(nil), // 62: temporal.server.api.adminservice.v1.GetNamespaceResponse - (*GetDLQTasksRequest)(nil), // 63: temporal.server.api.adminservice.v1.GetDLQTasksRequest - (*GetDLQTasksResponse)(nil), // 64: temporal.server.api.adminservice.v1.GetDLQTasksResponse - (*PurgeDLQTasksRequest)(nil), // 65: temporal.server.api.adminservice.v1.PurgeDLQTasksRequest - (*PurgeDLQTasksResponse)(nil), // 66: temporal.server.api.adminservice.v1.PurgeDLQTasksResponse - (*DLQJobToken)(nil), // 67: temporal.server.api.adminservice.v1.DLQJobToken - (*MergeDLQTasksRequest)(nil), // 68: temporal.server.api.adminservice.v1.MergeDLQTasksRequest - (*MergeDLQTasksResponse)(nil), // 69: temporal.server.api.adminservice.v1.MergeDLQTasksResponse - (*DescribeDLQJobRequest)(nil), // 70: temporal.server.api.adminservice.v1.DescribeDLQJobRequest - (*DescribeDLQJobResponse)(nil), // 71: temporal.server.api.adminservice.v1.DescribeDLQJobResponse - (*CancelDLQJobRequest)(nil), // 72: temporal.server.api.adminservice.v1.CancelDLQJobRequest - (*CancelDLQJobResponse)(nil), // 73: temporal.server.api.adminservice.v1.CancelDLQJobResponse - (*AddTasksRequest)(nil), // 74: temporal.server.api.adminservice.v1.AddTasksRequest - (*AddTasksResponse)(nil), // 75: temporal.server.api.adminservice.v1.AddTasksResponse - (*ListQueuesRequest)(nil), // 76: temporal.server.api.adminservice.v1.ListQueuesRequest - (*ListQueuesResponse)(nil), // 77: temporal.server.api.adminservice.v1.ListQueuesResponse - nil, // 78: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry - nil, // 79: temporal.server.api.adminservice.v1.AddSearchAttributesRequest.SearchAttributesEntry - nil, // 80: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.CustomAttributesEntry - nil, // 81: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.SystemAttributesEntry - nil, // 82: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.MappingEntry - nil, // 83: temporal.server.api.adminservice.v1.DescribeClusterResponse.SupportedClientsEntry - nil, // 84: temporal.server.api.adminservice.v1.DescribeClusterResponse.TagsEntry - (*AddTasksRequest_Task)(nil), // 85: temporal.server.api.adminservice.v1.AddTasksRequest.Task - (*ListQueuesResponse_QueueInfo)(nil), // 86: temporal.server.api.adminservice.v1.ListQueuesResponse.QueueInfo - (*v1.WorkflowExecution)(nil), // 87: temporal.api.common.v1.WorkflowExecution - (*v1.DataBlob)(nil), // 88: temporal.api.common.v1.DataBlob - (*v11.VersionHistory)(nil), // 89: temporal.server.api.history.v1.VersionHistory - (*v12.WorkflowMutableState)(nil), // 90: temporal.server.api.persistence.v1.WorkflowMutableState - (*v13.NamespaceCacheInfo)(nil), // 91: temporal.server.api.namespace.v1.NamespaceCacheInfo - (*v12.ShardInfo)(nil), // 92: temporal.server.api.persistence.v1.ShardInfo - (*v11.TaskRange)(nil), // 93: temporal.server.api.history.v1.TaskRange - (v14.TaskType)(0), // 94: temporal.server.api.enums.v1.TaskType - (*timestamppb.Timestamp)(nil), // 95: google.protobuf.Timestamp - (*v15.ReplicationToken)(nil), // 96: temporal.server.api.replication.v1.ReplicationToken - (*v15.ReplicationMessages)(nil), // 97: temporal.server.api.replication.v1.ReplicationMessages - (*v15.ReplicationTaskInfo)(nil), // 98: temporal.server.api.replication.v1.ReplicationTaskInfo - (*v15.ReplicationTask)(nil), // 99: temporal.server.api.replication.v1.ReplicationTask - (*v17.WorkflowExecutionInfo)(nil), // 100: temporal.api.workflow.v1.WorkflowExecutionInfo - (*v18.MembershipInfo)(nil), // 101: temporal.server.api.cluster.v1.MembershipInfo - (*v19.VersionInfo)(nil), // 102: temporal.api.version.v1.VersionInfo - (*v12.ClusterMetadata)(nil), // 103: temporal.server.api.persistence.v1.ClusterMetadata - (*durationpb.Duration)(nil), // 104: google.protobuf.Duration - (v14.ClusterMemberRole)(0), // 105: temporal.server.api.enums.v1.ClusterMemberRole - (*v18.ClusterMember)(nil), // 106: temporal.server.api.cluster.v1.ClusterMember - (v14.DeadLetterQueueType)(0), // 107: temporal.server.api.enums.v1.DeadLetterQueueType - (v16.TaskQueueType)(0), // 108: temporal.api.enums.v1.TaskQueueType - (*v12.AllocatedTaskInfo)(nil), // 109: temporal.server.api.persistence.v1.AllocatedTaskInfo - (*v15.SyncReplicationState)(nil), // 110: temporal.server.api.replication.v1.SyncReplicationState - (*v15.WorkflowReplicationMessages)(nil), // 111: temporal.server.api.replication.v1.WorkflowReplicationMessages - (*v110.NamespaceInfo)(nil), // 112: temporal.api.namespace.v1.NamespaceInfo - (*v110.NamespaceConfig)(nil), // 113: temporal.api.namespace.v1.NamespaceConfig - (*v111.NamespaceReplicationConfig)(nil), // 114: temporal.api.replication.v1.NamespaceReplicationConfig - (*v111.FailoverStatus)(nil), // 115: temporal.api.replication.v1.FailoverStatus - (*v112.HistoryDLQKey)(nil), // 116: temporal.server.api.common.v1.HistoryDLQKey - (*v112.HistoryDLQTask)(nil), // 117: temporal.server.api.common.v1.HistoryDLQTask - (*v112.HistoryDLQTaskMetadata)(nil), // 118: temporal.server.api.common.v1.HistoryDLQTaskMetadata - (v14.DLQOperationType)(0), // 119: temporal.server.api.enums.v1.DLQOperationType - (v14.DLQOperationState)(0), // 120: temporal.server.api.enums.v1.DLQOperationState - (v16.IndexedValueType)(0), // 121: temporal.api.enums.v1.IndexedValueType +type SyncWorkflowStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + VersionedTransitionArtifact *v15.VersionedTransitionArtifact `protobuf:"bytes,5,opt,name=versioned_transition_artifact,json=versionedTransitionArtifact,proto3" json:"versioned_transition_artifact,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -var file_temporal_server_api_adminservice_v1_request_response_proto_depIdxs = []int32{ - 87, // 0: temporal.server.api.adminservice.v1.RebuildMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 87, // 1: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 88, // 2: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest.history_batches:type_name -> temporal.api.common.v1.DataBlob - 89, // 3: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest.version_history:type_name -> temporal.server.api.history.v1.VersionHistory - 87, // 4: temporal.server.api.adminservice.v1.DescribeMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 90, // 5: temporal.server.api.adminservice.v1.DescribeMutableStateResponse.cache_mutable_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState - 90, // 6: temporal.server.api.adminservice.v1.DescribeMutableStateResponse.database_mutable_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState - 87, // 7: temporal.server.api.adminservice.v1.DescribeHistoryHostRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 91, // 8: temporal.server.api.adminservice.v1.DescribeHistoryHostResponse.namespace_cache:type_name -> temporal.server.api.namespace.v1.NamespaceCacheInfo - 92, // 9: temporal.server.api.adminservice.v1.GetShardResponse.shard_info:type_name -> temporal.server.api.persistence.v1.ShardInfo - 93, // 10: temporal.server.api.adminservice.v1.ListHistoryTasksRequest.task_range:type_name -> temporal.server.api.history.v1.TaskRange - 14, // 11: temporal.server.api.adminservice.v1.ListHistoryTasksResponse.tasks:type_name -> temporal.server.api.adminservice.v1.Task - 94, // 12: temporal.server.api.adminservice.v1.Task.task_type:type_name -> temporal.server.api.enums.v1.TaskType - 95, // 13: temporal.server.api.adminservice.v1.Task.fire_time:type_name -> google.protobuf.Timestamp - 95, // 14: temporal.server.api.adminservice.v1.RemoveTaskRequest.visibility_time:type_name -> google.protobuf.Timestamp - 87, // 15: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 88, // 16: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response.history_batches:type_name -> temporal.api.common.v1.DataBlob - 89, // 17: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response.version_history:type_name -> temporal.server.api.history.v1.VersionHistory - 87, // 18: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 88, // 19: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse.history_batches:type_name -> temporal.api.common.v1.DataBlob - 89, // 20: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse.version_history:type_name -> temporal.server.api.history.v1.VersionHistory - 96, // 21: temporal.server.api.adminservice.v1.GetReplicationMessagesRequest.tokens:type_name -> temporal.server.api.replication.v1.ReplicationToken - 78, // 22: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.shard_messages:type_name -> temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry - 97, // 23: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse.messages:type_name -> temporal.server.api.replication.v1.ReplicationMessages - 98, // 24: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesRequest.task_infos:type_name -> temporal.server.api.replication.v1.ReplicationTaskInfo - 99, // 25: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask - 87, // 26: temporal.server.api.adminservice.v1.ReapplyEventsRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 88, // 27: temporal.server.api.adminservice.v1.ReapplyEventsRequest.events:type_name -> temporal.api.common.v1.DataBlob - 79, // 28: temporal.server.api.adminservice.v1.AddSearchAttributesRequest.search_attributes:type_name -> temporal.server.api.adminservice.v1.AddSearchAttributesRequest.SearchAttributesEntry - 80, // 29: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.custom_attributes:type_name -> temporal.server.api.adminservice.v1.GetSearchAttributesResponse.CustomAttributesEntry - 81, // 30: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.system_attributes:type_name -> temporal.server.api.adminservice.v1.GetSearchAttributesResponse.SystemAttributesEntry - 82, // 31: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.mapping:type_name -> temporal.server.api.adminservice.v1.GetSearchAttributesResponse.MappingEntry - 100, // 32: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.add_workflow_execution_info:type_name -> temporal.api.workflow.v1.WorkflowExecutionInfo - 83, // 33: temporal.server.api.adminservice.v1.DescribeClusterResponse.supported_clients:type_name -> temporal.server.api.adminservice.v1.DescribeClusterResponse.SupportedClientsEntry - 101, // 34: temporal.server.api.adminservice.v1.DescribeClusterResponse.membership_info:type_name -> temporal.server.api.cluster.v1.MembershipInfo - 102, // 35: temporal.server.api.adminservice.v1.DescribeClusterResponse.version_info:type_name -> temporal.api.version.v1.VersionInfo - 84, // 36: temporal.server.api.adminservice.v1.DescribeClusterResponse.tags:type_name -> temporal.server.api.adminservice.v1.DescribeClusterResponse.TagsEntry - 103, // 37: temporal.server.api.adminservice.v1.ListClustersResponse.clusters:type_name -> temporal.server.api.persistence.v1.ClusterMetadata - 104, // 38: temporal.server.api.adminservice.v1.ListClusterMembersRequest.last_heartbeat_within:type_name -> google.protobuf.Duration - 105, // 39: temporal.server.api.adminservice.v1.ListClusterMembersRequest.role:type_name -> temporal.server.api.enums.v1.ClusterMemberRole - 95, // 40: temporal.server.api.adminservice.v1.ListClusterMembersRequest.session_started_after_time:type_name -> google.protobuf.Timestamp - 106, // 41: temporal.server.api.adminservice.v1.ListClusterMembersResponse.active_members:type_name -> temporal.server.api.cluster.v1.ClusterMember - 107, // 42: temporal.server.api.adminservice.v1.GetDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType - 107, // 43: temporal.server.api.adminservice.v1.GetDLQMessagesResponse.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType - 99, // 44: temporal.server.api.adminservice.v1.GetDLQMessagesResponse.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask - 98, // 45: temporal.server.api.adminservice.v1.GetDLQMessagesResponse.replication_tasks_info:type_name -> temporal.server.api.replication.v1.ReplicationTaskInfo - 107, // 46: temporal.server.api.adminservice.v1.PurgeDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType - 107, // 47: temporal.server.api.adminservice.v1.MergeDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType - 87, // 48: temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 108, // 49: temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType - 109, // 50: temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse.tasks:type_name -> temporal.server.api.persistence.v1.AllocatedTaskInfo - 87, // 51: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 110, // 52: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest.sync_replication_state:type_name -> temporal.server.api.replication.v1.SyncReplicationState - 111, // 53: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse.messages:type_name -> temporal.server.api.replication.v1.WorkflowReplicationMessages - 112, // 54: temporal.server.api.adminservice.v1.GetNamespaceResponse.info:type_name -> temporal.api.namespace.v1.NamespaceInfo - 113, // 55: temporal.server.api.adminservice.v1.GetNamespaceResponse.config:type_name -> temporal.api.namespace.v1.NamespaceConfig - 114, // 56: temporal.server.api.adminservice.v1.GetNamespaceResponse.replication_config:type_name -> temporal.api.replication.v1.NamespaceReplicationConfig - 115, // 57: temporal.server.api.adminservice.v1.GetNamespaceResponse.failover_history:type_name -> temporal.api.replication.v1.FailoverStatus - 116, // 58: temporal.server.api.adminservice.v1.GetDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey - 117, // 59: temporal.server.api.adminservice.v1.GetDLQTasksResponse.dlq_tasks:type_name -> temporal.server.api.common.v1.HistoryDLQTask - 116, // 60: temporal.server.api.adminservice.v1.PurgeDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey - 118, // 61: temporal.server.api.adminservice.v1.PurgeDLQTasksRequest.inclusive_max_task_metadata:type_name -> temporal.server.api.common.v1.HistoryDLQTaskMetadata - 116, // 62: temporal.server.api.adminservice.v1.MergeDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey - 118, // 63: temporal.server.api.adminservice.v1.MergeDLQTasksRequest.inclusive_max_task_metadata:type_name -> temporal.server.api.common.v1.HistoryDLQTaskMetadata - 116, // 64: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey - 119, // 65: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.operation_type:type_name -> temporal.server.api.enums.v1.DLQOperationType - 120, // 66: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.operation_state:type_name -> temporal.server.api.enums.v1.DLQOperationState - 95, // 67: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.start_time:type_name -> google.protobuf.Timestamp - 95, // 68: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.end_time:type_name -> google.protobuf.Timestamp - 85, // 69: temporal.server.api.adminservice.v1.AddTasksRequest.tasks:type_name -> temporal.server.api.adminservice.v1.AddTasksRequest.Task - 86, // 70: temporal.server.api.adminservice.v1.ListQueuesResponse.queues:type_name -> temporal.server.api.adminservice.v1.ListQueuesResponse.QueueInfo - 97, // 71: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry.value:type_name -> temporal.server.api.replication.v1.ReplicationMessages - 121, // 72: temporal.server.api.adminservice.v1.AddSearchAttributesRequest.SearchAttributesEntry.value:type_name -> temporal.api.enums.v1.IndexedValueType - 121, // 73: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.CustomAttributesEntry.value:type_name -> temporal.api.enums.v1.IndexedValueType - 121, // 74: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.SystemAttributesEntry.value:type_name -> temporal.api.enums.v1.IndexedValueType - 88, // 75: temporal.server.api.adminservice.v1.AddTasksRequest.Task.blob:type_name -> temporal.api.common.v1.DataBlob - 76, // [76:76] is the sub-list for method output_type - 76, // [76:76] is the sub-list for method input_type - 76, // [76:76] is the sub-list for extension type_name - 76, // [76:76] is the sub-list for extension extendee - 0, // [0:76] is the sub-list for field type_name + +func (x *SyncWorkflowStateResponse) Reset() { + *x = SyncWorkflowStateResponse{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func init() { file_temporal_server_api_adminservice_v1_request_response_proto_init() } -func file_temporal_server_api_adminservice_v1_request_response_proto_init() { - if File_temporal_server_api_adminservice_v1_request_response_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildMutableStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildMutableStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeMutableStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeMutableStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeHistoryHostRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeHistoryHostResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloseShardRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloseShardResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHistoryTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHistoryTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Task); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionRawHistoryV2Request); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionRawHistoryV2Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionRawHistoryRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionRawHistoryResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplicationMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplicationMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNamespaceReplicationMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNamespaceReplicationMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQReplicationMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQReplicationMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReapplyEventsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReapplyEventsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddSearchAttributesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddSearchAttributesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveSearchAttributesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveSearchAttributesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSearchAttributesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSearchAttributesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeClusterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeClusterResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListClustersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListClustersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddOrUpdateRemoteClusterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddOrUpdateRemoteClusterResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveRemoteClusterRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveRemoteClusterResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListClusterMembersRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListClusterMembersResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PurgeDLQMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PurgeDLQMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MergeDLQMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MergeDLQMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshWorkflowTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshWorkflowTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResendReplicationTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResendReplicationTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTaskQueueTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTaskQueueTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamWorkflowReplicationMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamWorkflowReplicationMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNamespaceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetNamespaceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } +func (x *SyncWorkflowStateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncWorkflowStateResponse) ProtoMessage() {} + +func (x *SyncWorkflowStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[81] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncWorkflowStateResponse.ProtoReflect.Descriptor instead. +func (*SyncWorkflowStateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{81} +} + +func (x *SyncWorkflowStateResponse) GetVersionedTransitionArtifact() *v15.VersionedTransitionArtifact { + if x != nil { + return x.VersionedTransitionArtifact + } + return nil +} + +type GenerateLastHistoryReplicationTasksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + TargetClusters []string `protobuf:"bytes,3,rep,name=target_clusters,json=targetClusters,proto3" json:"target_clusters,omitempty"` + Archetype string `protobuf:"bytes,4,opt,name=archetype,proto3" json:"archetype,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,5,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GenerateLastHistoryReplicationTasksRequest) Reset() { + *x = GenerateLastHistoryReplicationTasksRequest{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GenerateLastHistoryReplicationTasksRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateLastHistoryReplicationTasksRequest) ProtoMessage() {} + +func (x *GenerateLastHistoryReplicationTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[82] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PurgeDLQTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateLastHistoryReplicationTasksRequest.ProtoReflect.Descriptor instead. +func (*GenerateLastHistoryReplicationTasksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{82} +} + +func (x *GenerateLastHistoryReplicationTasksRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *GenerateLastHistoryReplicationTasksRequest) GetExecution() *v1.WorkflowExecution { + if x != nil { + return x.Execution + } + return nil +} + +func (x *GenerateLastHistoryReplicationTasksRequest) GetTargetClusters() []string { + if x != nil { + return x.TargetClusters + } + return nil +} + +func (x *GenerateLastHistoryReplicationTasksRequest) GetArchetype() string { + if x != nil { + return x.Archetype + } + return "" +} + +func (x *GenerateLastHistoryReplicationTasksRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +type GenerateLastHistoryReplicationTasksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + StateTransitionCount int64 `protobuf:"varint,1,opt,name=state_transition_count,json=stateTransitionCount,proto3" json:"state_transition_count,omitempty"` + HistoryLength int64 `protobuf:"varint,2,opt,name=history_length,json=historyLength,proto3" json:"history_length,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GenerateLastHistoryReplicationTasksResponse) Reset() { + *x = GenerateLastHistoryReplicationTasksResponse{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[83] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GenerateLastHistoryReplicationTasksResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GenerateLastHistoryReplicationTasksResponse) ProtoMessage() {} + +func (x *GenerateLastHistoryReplicationTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[83] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PurgeDLQTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GenerateLastHistoryReplicationTasksResponse.ProtoReflect.Descriptor instead. +func (*GenerateLastHistoryReplicationTasksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{83} +} + +func (x *GenerateLastHistoryReplicationTasksResponse) GetStateTransitionCount() int64 { + if x != nil { + return x.StateTransitionCount + } + return 0 +} + +func (x *GenerateLastHistoryReplicationTasksResponse) GetHistoryLength() int64 { + if x != nil { + return x.HistoryLength + } + return 0 +} + +type DescribeTaskQueuePartitionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + TaskQueuePartition *v114.TaskQueuePartition `protobuf:"bytes,2,opt,name=task_queue_partition,json=taskQueuePartition,proto3" json:"task_queue_partition,omitempty"` + // Absent means unversioned queue. Ignored for sticky partitions. + BuildIds *v115.TaskQueueVersionSelection `protobuf:"bytes,3,opt,name=build_ids,json=buildIds,proto3" json:"build_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeTaskQueuePartitionRequest) Reset() { + *x = DescribeTaskQueuePartitionRequest{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[84] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeTaskQueuePartitionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeTaskQueuePartitionRequest) ProtoMessage() {} + +func (x *DescribeTaskQueuePartitionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[84] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DLQJobToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeTaskQueuePartitionRequest.ProtoReflect.Descriptor instead. +func (*DescribeTaskQueuePartitionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{84} +} + +func (x *DescribeTaskQueuePartitionRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *DescribeTaskQueuePartitionRequest) GetTaskQueuePartition() *v114.TaskQueuePartition { + if x != nil { + return x.TaskQueuePartition + } + return nil +} + +func (x *DescribeTaskQueuePartitionRequest) GetBuildIds() *v115.TaskQueueVersionSelection { + if x != nil { + return x.BuildIds + } + return nil +} + +type DescribeTaskQueuePartitionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // contains k-v pairs of the type: buildID -> TaskQueueVersionInfoInternal + VersionsInfoInternal map[string]*v114.TaskQueueVersionInfoInternal `protobuf:"bytes,1,rep,name=versions_info_internal,json=versionsInfoInternal,proto3" json:"versions_info_internal,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeTaskQueuePartitionResponse) Reset() { + *x = DescribeTaskQueuePartitionResponse{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeTaskQueuePartitionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeTaskQueuePartitionResponse) ProtoMessage() {} + +func (x *DescribeTaskQueuePartitionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[85] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MergeDLQTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeTaskQueuePartitionResponse.ProtoReflect.Descriptor instead. +func (*DescribeTaskQueuePartitionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{85} +} + +func (x *DescribeTaskQueuePartitionResponse) GetVersionsInfoInternal() map[string]*v114.TaskQueueVersionInfoInternal { + if x != nil { + return x.VersionsInfoInternal + } + return nil +} + +type ForceUnloadTaskQueuePartitionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + TaskQueuePartition *v114.TaskQueuePartition `protobuf:"bytes,2,opt,name=task_queue_partition,json=taskQueuePartition,proto3" json:"task_queue_partition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceUnloadTaskQueuePartitionRequest) Reset() { + *x = ForceUnloadTaskQueuePartitionRequest{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[86] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceUnloadTaskQueuePartitionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceUnloadTaskQueuePartitionRequest) ProtoMessage() {} + +func (x *ForceUnloadTaskQueuePartitionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[86] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MergeDLQTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceUnloadTaskQueuePartitionRequest.ProtoReflect.Descriptor instead. +func (*ForceUnloadTaskQueuePartitionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{86} +} + +func (x *ForceUnloadTaskQueuePartitionRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ForceUnloadTaskQueuePartitionRequest) GetTaskQueuePartition() *v114.TaskQueuePartition { + if x != nil { + return x.TaskQueuePartition + } + return nil +} + +type ForceUnloadTaskQueuePartitionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + WasLoaded bool `protobuf:"varint,1,opt,name=was_loaded,json=wasLoaded,proto3" json:"was_loaded,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceUnloadTaskQueuePartitionResponse) Reset() { + *x = ForceUnloadTaskQueuePartitionResponse{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[87] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceUnloadTaskQueuePartitionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceUnloadTaskQueuePartitionResponse) ProtoMessage() {} + +func (x *ForceUnloadTaskQueuePartitionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[87] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeDLQJobRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceUnloadTaskQueuePartitionResponse.ProtoReflect.Descriptor instead. +func (*ForceUnloadTaskQueuePartitionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{87} +} + +func (x *ForceUnloadTaskQueuePartitionResponse) GetWasLoaded() bool { + if x != nil { + return x.WasLoaded + } + return false +} + +type GetTaskQueueUserDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskQueueType v16.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + // If non-zero, fetch the user data loaded by this partition instead of the root. + PartitionId int32 `protobuf:"varint,4,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetTaskQueueUserDataRequest) Reset() { + *x = GetTaskQueueUserDataRequest{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetTaskQueueUserDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTaskQueueUserDataRequest) ProtoMessage() {} + +func (x *GetTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[88] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeDLQJobResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTaskQueueUserDataRequest.ProtoReflect.Descriptor instead. +func (*GetTaskQueueUserDataRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{88} +} + +func (x *GetTaskQueueUserDataRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *GetTaskQueueUserDataRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *GetTaskQueueUserDataRequest) GetTaskQueueType() v16.TaskQueueType { + if x != nil { + return x.TaskQueueType + } + return v16.TaskQueueType(0) +} + +func (x *GetTaskQueueUserDataRequest) GetPartitionId() int32 { + if x != nil { + return x.PartitionId + } + return 0 +} + +type GetTaskQueueUserDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + UserData *v12.TaskQueueTypeUserData `protobuf:"bytes,1,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetTaskQueueUserDataResponse) Reset() { + *x = GetTaskQueueUserDataResponse{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetTaskQueueUserDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTaskQueueUserDataResponse) ProtoMessage() {} + +func (x *GetTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[89] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelDLQJobRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTaskQueueUserDataResponse.ProtoReflect.Descriptor instead. +func (*GetTaskQueueUserDataResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{89} +} + +func (x *GetTaskQueueUserDataResponse) GetUserData() *v12.TaskQueueTypeUserData { + if x != nil { + return x.UserData + } + return nil +} + +func (x *GetTaskQueueUserDataResponse) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +// StartAdminBatchOperationRequest starts an admin batch operation. +// WARNING: Batch Operations are exposed to all users of the namespace. Admin Batch Operations should be exercised with caution. +type StartAdminBatchOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace that contains the batch operation. + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // Visibility query defines the group of workflows to apply the batch operation. + // This field and `executions` are mutually exclusive. + VisibilityQuery string `protobuf:"bytes,2,opt,name=visibility_query,json=visibilityQuery,proto3" json:"visibility_query,omitempty"` + // A unique job identifier for this batch operation. + JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + // Reason for the operation. + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + // List of workflow executions to apply the batch operation to. + // This field and `visibility_query` are mutually exclusive. + Executions []*v1.WorkflowExecution `protobuf:"bytes,5,rep,name=executions,proto3" json:"executions,omitempty"` + // The identity of the worker/client. + Identity string `protobuf:"bytes,6,opt,name=identity,proto3" json:"identity,omitempty"` + // The admin batch operation to perform. + // + // Types that are valid to be assigned to Operation: + // + // *StartAdminBatchOperationRequest_RefreshTasksOperation + Operation isStartAdminBatchOperationRequest_Operation `protobuf_oneof:"operation"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartAdminBatchOperationRequest) Reset() { + *x = StartAdminBatchOperationRequest{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[90] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartAdminBatchOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartAdminBatchOperationRequest) ProtoMessage() {} + +func (x *StartAdminBatchOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[90] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelDLQJobResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartAdminBatchOperationRequest.ProtoReflect.Descriptor instead. +func (*StartAdminBatchOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{90} +} + +func (x *StartAdminBatchOperationRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *StartAdminBatchOperationRequest) GetVisibilityQuery() string { + if x != nil { + return x.VisibilityQuery + } + return "" +} + +func (x *StartAdminBatchOperationRequest) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *StartAdminBatchOperationRequest) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *StartAdminBatchOperationRequest) GetExecutions() []*v1.WorkflowExecution { + if x != nil { + return x.Executions + } + return nil +} + +func (x *StartAdminBatchOperationRequest) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *StartAdminBatchOperationRequest) GetOperation() isStartAdminBatchOperationRequest_Operation { + if x != nil { + return x.Operation + } + return nil +} + +func (x *StartAdminBatchOperationRequest) GetRefreshTasksOperation() *BatchOperationRefreshTasks { + if x != nil { + if x, ok := x.Operation.(*StartAdminBatchOperationRequest_RefreshTasksOperation); ok { + return x.RefreshTasksOperation } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + } + return nil +} + +type isStartAdminBatchOperationRequest_Operation interface { + isStartAdminBatchOperationRequest_Operation() +} + +type StartAdminBatchOperationRequest_RefreshTasksOperation struct { + RefreshTasksOperation *BatchOperationRefreshTasks `protobuf:"bytes,10,opt,name=refresh_tasks_operation,json=refreshTasksOperation,proto3,oneof"` +} + +func (*StartAdminBatchOperationRequest_RefreshTasksOperation) isStartAdminBatchOperationRequest_Operation() { +} + +type StartAdminBatchOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartAdminBatchOperationResponse) Reset() { + *x = StartAdminBatchOperationResponse{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[91] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartAdminBatchOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartAdminBatchOperationResponse) ProtoMessage() {} + +func (x *StartAdminBatchOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[91] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartAdminBatchOperationResponse.ProtoReflect.Descriptor instead. +func (*StartAdminBatchOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{91} +} + +// BatchOperationRefreshTasks refreshes tasks for batch executions. +// This regenerates all pending tasks for each execution. +type BatchOperationRefreshTasks struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatchOperationRefreshTasks) Reset() { + *x = BatchOperationRefreshTasks{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[92] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatchOperationRefreshTasks) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchOperationRefreshTasks) ProtoMessage() {} + +func (x *BatchOperationRefreshTasks) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[92] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListQueuesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchOperationRefreshTasks.ProtoReflect.Descriptor instead. +func (*BatchOperationRefreshTasks) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{92} +} + +type MigrateScheduleRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace name. + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // Schedule ID. + ScheduleId string `protobuf:"bytes,2,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` + // Target scheduler implementation. + Target MigrateScheduleRequest_SchedulerTarget `protobuf:"varint,3,opt,name=target,proto3,enum=temporal.server.api.adminservice.v1.MigrateScheduleRequest_SchedulerTarget" json:"target,omitempty"` + // Identity of the caller. + Identity string `protobuf:"bytes,4,opt,name=identity,proto3" json:"identity,omitempty"` + // Used for request deduplication. + RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MigrateScheduleRequest) Reset() { + *x = MigrateScheduleRequest{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[93] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MigrateScheduleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrateScheduleRequest) ProtoMessage() {} + +func (x *MigrateScheduleRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[93] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListQueuesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrateScheduleRequest.ProtoReflect.Descriptor instead. +func (*MigrateScheduleRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{93} +} + +func (x *MigrateScheduleRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *MigrateScheduleRequest) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +func (x *MigrateScheduleRequest) GetTarget() MigrateScheduleRequest_SchedulerTarget { + if x != nil { + return x.Target + } + return MigrateScheduleRequest_SCHEDULER_TARGET_UNSPECIFIED +} + +func (x *MigrateScheduleRequest) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *MigrateScheduleRequest) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +type MigrateScheduleResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MigrateScheduleResponse) Reset() { + *x = MigrateScheduleResponse{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[94] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MigrateScheduleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrateScheduleResponse) ProtoMessage() {} + +func (x *MigrateScheduleResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[94] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddTasksRequest_Task); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrateScheduleResponse.ProtoReflect.Descriptor instead. +func (*MigrateScheduleResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{94} +} + +type AddTasksRequest_Task struct { + state protoimpl.MessageState `protogen:"open.v1"` + CategoryId int32 `protobuf:"varint,1,opt,name=category_id,json=categoryId,proto3" json:"category_id,omitempty"` + Blob *v1.DataBlob `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AddTasksRequest_Task) Reset() { + *x = AddTasksRequest_Task{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[102] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AddTasksRequest_Task) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddTasksRequest_Task) ProtoMessage() {} + +func (x *AddTasksRequest_Task) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[102] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListQueuesResponse_QueueInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddTasksRequest_Task.ProtoReflect.Descriptor instead. +func (*AddTasksRequest_Task) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{74, 0} +} + +func (x *AddTasksRequest_Task) GetCategoryId() int32 { + if x != nil { + return x.CategoryId + } + return 0 +} + +func (x *AddTasksRequest_Task) GetBlob() *v1.DataBlob { + if x != nil { + return x.Blob + } + return nil +} + +type ListQueuesResponse_QueueInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + QueueName string `protobuf:"bytes,1,opt,name=queue_name,json=queueName,proto3" json:"queue_name,omitempty"` + MessageCount int64 `protobuf:"varint,2,opt,name=message_count,json=messageCount,proto3" json:"message_count,omitempty"` + LastMessageId int64 `protobuf:"varint,3,opt,name=last_message_id,json=lastMessageId,proto3" json:"last_message_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListQueuesResponse_QueueInfo) Reset() { + *x = ListQueuesResponse_QueueInfo{} + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[103] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListQueuesResponse_QueueInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListQueuesResponse_QueueInfo) ProtoMessage() {} + +func (x *ListQueuesResponse_QueueInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[103] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListQueuesResponse_QueueInfo.ProtoReflect.Descriptor instead. +func (*ListQueuesResponse_QueueInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP(), []int{77, 0} +} + +func (x *ListQueuesResponse_QueueInfo) GetQueueName() string { + if x != nil { + return x.QueueName + } + return "" +} + +func (x *ListQueuesResponse_QueueInfo) GetMessageCount() int64 { + if x != nil { + return x.MessageCount + } + return 0 +} + +func (x *ListQueuesResponse_QueueInfo) GetLastMessageId() int64 { + if x != nil { + return x.LastMessageId + } + return 0 +} + +var File_temporal_server_api_adminservice_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc = "" + + "\n" + + ":temporal/server/api/adminservice/v1/request_response.proto\x12#temporal.server.api.adminservice.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a\"temporal/api/enums/v1/common.proto\x1a&temporal/api/enums/v1/task_queue.proto\x1a'temporal/api/namespace/v1/message.proto\x1a)temporal/api/replication/v1/message.proto\x1a'temporal/api/taskqueue/v1/message.proto\x1a%temporal/api/version/v1/message.proto\x1a&temporal/api/workflow/v1/message.proto\x1a,temporal/server/api/cluster/v1/message.proto\x1a'temporal/server/api/common/v1/dlq.proto\x1a*temporal/server/api/enums/v1/cluster.proto\x1a)temporal/server/api/enums/v1/common.proto\x1a&temporal/server/api/enums/v1/dlq.proto\x1a'temporal/server/api/enums/v1/task.proto\x1a+temporal/server/api/health/v1/message.proto\x1a,temporal/server/api/history/v1/message.proto\x1a.temporal/server/api/namespace/v1/message.proto\x1a9temporal/server/api/persistence/v1/cluster_metadata.proto\x1a3temporal/server/api/persistence/v1/executions.proto\x1a,temporal/server/api/persistence/v1/hsm.proto\x1a4temporal/server/api/persistence/v1/task_queues.proto\x1a.temporal/server/api/persistence/v1/tasks.proto\x1a?temporal/server/api/persistence/v1/workflow_mutable_state.proto\x1a0temporal/server/api/replication/v1/message.proto\x1a.temporal/server/api/taskqueue/v1/message.proto\"\x83\x01\n" + + "\x1aRebuildMutableStateRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\"\x1d\n" + + "\x1bRebuildMutableStateResponse\"\xc1\x02\n" + + "\x1eImportWorkflowExecutionRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12I\n" + + "\x0fhistory_batches\x18\x03 \x03(\v2 .temporal.api.common.v1.DataBlobR\x0ehistoryBatches\x12W\n" + + "\x0fversion_history\x18\x04 \x01(\v2..temporal.server.api.history.v1.VersionHistoryR\x0eversionHistory\x12\x14\n" + + "\x05token\x18\x05 \x01(\fR\x05token\"7\n" + + "\x1fImportWorkflowExecutionResponse\x12\x14\n" + + "\x05token\x18\x01 \x01(\fR\x05token\"\xf1\x01\n" + + "\x1bDescribeMutableStateRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12*\n" + + "\x11skip_force_reload\x18\x03 \x01(\bR\x0fskipForceReload\x12\x1c\n" + + "\tarchetype\x18\x04 \x01(\tR\tarchetype\x12!\n" + + "\farchetype_id\x18\x05 \x01(\rR\varchetypeId\"\xb6\x02\n" + + "\x1cDescribeMutableStateResponse\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\tR\ashardId\x12!\n" + + "\fhistory_addr\x18\x02 \x01(\tR\vhistoryAddr\x12h\n" + + "\x13cache_mutable_state\x18\x03 \x01(\v28.temporal.server.api.persistence.v1.WorkflowMutableStateR\x11cacheMutableState\x12n\n" + + "\x16database_mutable_state\x18\x04 \x01(\v28.temporal.server.api.persistence.v1.WorkflowMutableStateR\x14databaseMutableState\"\xd2\x01\n" + + "\x1aDescribeHistoryHostRequest\x12!\n" + + "\fhost_address\x18\x01 \x01(\tR\vhostAddress\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12\x1c\n" + + "\tnamespace\x18\x03 \x01(\tR\tnamespace\x12X\n" + + "\x12workflow_execution\x18\x04 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\"\xde\x01\n" + + "\x1bDescribeHistoryHostResponse\x12#\n" + + "\rshards_number\x18\x01 \x01(\x05R\fshardsNumber\x12\x1b\n" + + "\tshard_ids\x18\x02 \x03(\x05R\bshardIds\x12]\n" + + "\x0fnamespace_cache\x18\x03 \x01(\v24.temporal.server.api.namespace.v1.NamespaceCacheInfoR\x0enamespaceCache\x12\x18\n" + + "\aaddress\x18\x05 \x01(\tR\aaddressJ\x04\b\x04\x10\x05\".\n" + + "\x11CloseShardRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\"\x14\n" + + "\x12CloseShardResponse\",\n" + + "\x0fGetShardRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\"`\n" + + "\x10GetShardResponse\x12L\n" + + "\n" + + "shard_info\x18\x01 \x01(\v2-.temporal.server.api.persistence.v1.ShardInfoR\tshardInfo\"\xe1\x01\n" + + "\x17ListHistoryTasksRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x12\x1a\n" + + "\bcategory\x18\x02 \x01(\x05R\bcategory\x12H\n" + + "\n" + + "task_range\x18\x03 \x01(\v2).temporal.server.api.history.v1.TaskRangeR\ttaskRange\x12\x1d\n" + + "\n" + + "batch_size\x18\x04 \x01(\x05R\tbatchSize\x12&\n" + + "\x0fnext_page_token\x18\x05 \x01(\fR\rnextPageToken\"\x83\x01\n" + + "\x18ListHistoryTasksResponse\x12?\n" + + "\x05tasks\x18\x01 \x03(\v2).temporal.server.api.adminservice.v1.TaskR\x05tasks\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\"\x92\x02\n" + + "\x04Task\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12\x17\n" + + "\atask_id\x18\x04 \x01(\x03R\x06taskId\x12C\n" + + "\ttask_type\x18\x05 \x01(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\btaskType\x127\n" + + "\tfire_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\bfireTime\x12\x18\n" + + "\aversion\x18\a \x01(\x03R\aversion\"\xa8\x01\n" + + "\x11RemoveTaskRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x12\x1a\n" + + "\bcategory\x18\x02 \x01(\x05R\bcategory\x12\x17\n" + + "\atask_id\x18\x03 \x01(\x03R\x06taskId\x12C\n" + + "\x0fvisibility_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime\"\x14\n" + + "\x12RemoveTaskResponse\"\x93\x03\n" + + "'GetWorkflowExecutionRawHistoryV2Request\x12!\n" + + "\fnamespace_id\x18\t \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12$\n" + + "\x0estart_event_id\x18\x03 \x01(\x03R\fstartEventId\x12.\n" + + "\x13start_event_version\x18\x04 \x01(\x03R\x11startEventVersion\x12 \n" + + "\fend_event_id\x18\x05 \x01(\x03R\n" + + "endEventId\x12*\n" + + "\x11end_event_version\x18\x06 \x01(\x03R\x0fendEventVersion\x12*\n" + + "\x11maximum_page_size\x18\a \x01(\x05R\x0fmaximumPageSize\x12&\n" + + "\x0fnext_page_token\x18\b \x01(\fR\rnextPageTokenJ\x04\b\x01\x10\x02\"\xa0\x02\n" + + "(GetWorkflowExecutionRawHistoryV2Response\x12&\n" + + "\x0fnext_page_token\x18\x01 \x01(\fR\rnextPageToken\x12I\n" + + "\x0fhistory_batches\x18\x02 \x03(\v2 .temporal.api.common.v1.DataBlobR\x0ehistoryBatches\x12W\n" + + "\x0fversion_history\x18\x03 \x01(\v2..temporal.server.api.history.v1.VersionHistoryR\x0eversionHistory\x12(\n" + + "\x10history_node_ids\x18\x04 \x03(\x03R\x0ehistoryNodeIds\"\x8b\x03\n" + + "%GetWorkflowExecutionRawHistoryRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12$\n" + + "\x0estart_event_id\x18\x03 \x01(\x03R\fstartEventId\x12.\n" + + "\x13start_event_version\x18\x04 \x01(\x03R\x11startEventVersion\x12 \n" + + "\fend_event_id\x18\x05 \x01(\x03R\n" + + "endEventId\x12*\n" + + "\x11end_event_version\x18\x06 \x01(\x03R\x0fendEventVersion\x12*\n" + + "\x11maximum_page_size\x18\a \x01(\x05R\x0fmaximumPageSize\x12&\n" + + "\x0fnext_page_token\x18\b \x01(\fR\rnextPageToken\"\x9e\x02\n" + + "&GetWorkflowExecutionRawHistoryResponse\x12&\n" + + "\x0fnext_page_token\x18\x01 \x01(\fR\rnextPageToken\x12I\n" + + "\x0fhistory_batches\x18\x02 \x03(\v2 .temporal.api.common.v1.DataBlobR\x0ehistoryBatches\x12W\n" + + "\x0fversion_history\x18\x03 \x01(\v2..temporal.server.api.history.v1.VersionHistoryR\x0eversionHistory\x12(\n" + + "\x10history_node_ids\x18\x04 \x03(\x03R\x0ehistoryNodeIds\"\x90\x01\n" + + "\x1dGetReplicationMessagesRequest\x12L\n" + + "\x06tokens\x18\x01 \x03(\v24.temporal.server.api.replication.v1.ReplicationTokenR\x06tokens\x12!\n" + + "\fcluster_name\x18\x02 \x01(\tR\vclusterName\"\x9a\x02\n" + + "\x1eGetReplicationMessagesResponse\x12}\n" + + "\x0eshard_messages\x18\x01 \x03(\v2V.temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.ShardMessagesEntryR\rshardMessages\x1ay\n" + + "\x12ShardMessagesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x05R\x03key\x12M\n" + + "\x05value\x18\x02 \x01(\v27.temporal.server.api.replication.v1.ReplicationMessagesR\x05value:\x028\x01\"\xc1\x01\n" + + "&GetNamespaceReplicationMessagesRequest\x129\n" + + "\x19last_retrieved_message_id\x18\x01 \x01(\x03R\x16lastRetrievedMessageId\x129\n" + + "\x19last_processed_message_id\x18\x02 \x01(\x03R\x16lastProcessedMessageId\x12!\n" + + "\fcluster_name\x18\x03 \x01(\tR\vclusterName\"~\n" + + "'GetNamespaceReplicationMessagesResponse\x12S\n" + + "\bmessages\x18\x01 \x01(\v27.temporal.server.api.replication.v1.ReplicationMessagesR\bmessages\"z\n" + + " GetDLQReplicationMessagesRequest\x12V\n" + + "\n" + + "task_infos\x18\x01 \x03(\v27.temporal.server.api.replication.v1.ReplicationTaskInfoR\ttaskInfos\"\x85\x01\n" + + "!GetDLQReplicationMessagesResponse\x12`\n" + + "\x11replication_tasks\x18\x01 \x03(\v23.temporal.server.api.replication.v1.ReplicationTaskR\x10replicationTasks\"\xd3\x01\n" + + "\x14ReapplyEventsRequest\x12!\n" + + "\fnamespace_id\x18\x04 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x128\n" + + "\x06events\x18\x03 \x01(\v2 .temporal.api.common.v1.DataBlobR\x06eventsJ\x04\b\x01\x10\x02\"\x17\n" + + "\x15ReapplyEventsResponse\"\xfa\x02\n" + + "\x1aAddSearchAttributesRequest\x12\x82\x01\n" + + "\x11search_attributes\x18\x01 \x03(\v2U.temporal.server.api.adminservice.v1.AddSearchAttributesRequest.SearchAttributesEntryR\x10searchAttributes\x12\x1d\n" + + "\n" + + "index_name\x18\x02 \x01(\tR\tindexName\x12,\n" + + "\x12skip_schema_update\x18\x03 \x01(\bR\x10skipSchemaUpdate\x12\x1c\n" + + "\tnamespace\x18\x04 \x01(\tR\tnamespace\x1al\n" + + "\x15SearchAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12=\n" + + "\x05value\x18\x02 \x01(\x0e2'.temporal.api.enums.v1.IndexedValueTypeR\x05value:\x028\x01\"\x1d\n" + + "\x1bAddSearchAttributesResponse\"\x89\x01\n" + + "\x1dRemoveSearchAttributesRequest\x12+\n" + + "\x11search_attributes\x18\x01 \x03(\tR\x10searchAttributes\x12\x1d\n" + + "\n" + + "index_name\x18\x02 \x01(\tR\tindexName\x12\x1c\n" + + "\tnamespace\x18\x03 \x01(\tR\tnamespace\" \n" + + "\x1eRemoveSearchAttributesResponse\"Y\n" + + "\x1aGetSearchAttributesRequest\x12\x1d\n" + + "\n" + + "index_name\x18\x01 \x01(\tR\tindexName\x12\x1c\n" + + "\tnamespace\x18\x02 \x01(\tR\tnamespace\"\x9a\x06\n" + + "\x1bGetSearchAttributesResponse\x12\x83\x01\n" + + "\x11custom_attributes\x18\x01 \x03(\v2V.temporal.server.api.adminservice.v1.GetSearchAttributesResponse.CustomAttributesEntryR\x10customAttributes\x12\x83\x01\n" + + "\x11system_attributes\x18\x02 \x03(\v2V.temporal.server.api.adminservice.v1.GetSearchAttributesResponse.SystemAttributesEntryR\x10systemAttributes\x12g\n" + + "\amapping\x18\x03 \x03(\v2M.temporal.server.api.adminservice.v1.GetSearchAttributesResponse.MappingEntryR\amapping\x12n\n" + + "\x1badd_workflow_execution_info\x18\x04 \x01(\v2/.temporal.api.workflow.v1.WorkflowExecutionInfoR\x18addWorkflowExecutionInfo\x1al\n" + + "\x15CustomAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12=\n" + + "\x05value\x18\x02 \x01(\x0e2'.temporal.api.enums.v1.IndexedValueTypeR\x05value:\x028\x01\x1al\n" + + "\x15SystemAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12=\n" + + "\x05value\x18\x02 \x01(\x0e2'.temporal.api.enums.v1.IndexedValueTypeR\x05value:\x028\x01\x1a:\n" + + "\fMappingEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\";\n" + + "\x16DescribeClusterRequest\x12!\n" + + "\fcluster_name\x18\x01 \x01(\tR\vclusterName\"\xe1\a\n" + + "\x17DescribeClusterResponse\x12\x7f\n" + + "\x11supported_clients\x18\x01 \x03(\v2R.temporal.server.api.adminservice.v1.DescribeClusterResponse.SupportedClientsEntryR\x10supportedClients\x12%\n" + + "\x0eserver_version\x18\x02 \x01(\tR\rserverVersion\x12W\n" + + "\x0fmembership_info\x18\x03 \x01(\v2..temporal.server.api.cluster.v1.MembershipInfoR\x0emembershipInfo\x12\x1d\n" + + "\n" + + "cluster_id\x18\x04 \x01(\tR\tclusterId\x12!\n" + + "\fcluster_name\x18\x05 \x01(\tR\vclusterName\x12.\n" + + "\x13history_shard_count\x18\x06 \x01(\x05R\x11historyShardCount\x12+\n" + + "\x11persistence_store\x18\a \x01(\tR\x10persistenceStore\x12)\n" + + "\x10visibility_store\x18\b \x01(\tR\x0fvisibilityStore\x12G\n" + + "\fversion_info\x18\t \x01(\v2$.temporal.api.version.v1.VersionInfoR\vversionInfo\x12<\n" + + "\x1afailover_version_increment\x18\n" + + " \x01(\x03R\x18failoverVersionIncrement\x128\n" + + "\x18initial_failover_version\x18\v \x01(\x03R\x16initialFailoverVersion\x12=\n" + + "\x1bis_global_namespace_enabled\x18\f \x01(\bR\x18isGlobalNamespaceEnabled\x12Z\n" + + "\x04tags\x18\r \x03(\v2F.temporal.server.api.adminservice.v1.DescribeClusterResponse.TagsEntryR\x04tags\x12!\n" + + "\fhttp_address\x18\x0e \x01(\tR\vhttpAddress\x1aC\n" + + "\x15SupportedClientsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a7\n" + + "\tTagsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"Z\n" + + "\x13ListClustersRequest\x12\x1b\n" + + "\tpage_size\x18\x01 \x01(\x05R\bpageSize\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\"\x8f\x01\n" + + "\x14ListClustersResponse\x12O\n" + + "\bclusters\x18\x01 \x03(\v23.temporal.server.api.persistence.v1.ClusterMetadataR\bclusters\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\"\xfc\x01\n" + + "\x1fAddOrUpdateRemoteClusterRequest\x12)\n" + + "\x10frontend_address\x18\x01 \x01(\tR\x0ffrontendAddress\x12G\n" + + " enable_remote_cluster_connection\x18\x02 \x01(\bR\x1denableRemoteClusterConnection\x126\n" + + "\x15frontend_http_address\x18\x03 \x01(\tB\x02\x18\x01R\x13frontendHttpAddress\x12-\n" + + "\x12enable_replication\x18\x04 \x01(\bR\x11enableReplication\"\"\n" + + " AddOrUpdateRemoteClusterResponse\"?\n" + + "\x1aRemoveRemoteClusterRequest\x12!\n" + + "\fcluster_name\x18\x01 \x01(\tR\vclusterName\"\x1d\n" + + "\x1bRemoveRemoteClusterResponse\"\x87\x03\n" + + "\x19ListClusterMembersRequest\x12M\n" + + "\x15last_heartbeat_within\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\x13lastHeartbeatWithin\x12\x1f\n" + + "\vrpc_address\x18\x02 \x01(\tR\n" + + "rpcAddress\x12\x17\n" + + "\ahost_id\x18\x03 \x01(\tR\x06hostId\x12C\n" + + "\x04role\x18\x04 \x01(\x0e2/.temporal.server.api.enums.v1.ClusterMemberRoleR\x04role\x12W\n" + + "\x1asession_started_after_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\x17sessionStartedAfterTime\x12\x1b\n" + + "\tpage_size\x18\x06 \x01(\x05R\bpageSize\x12&\n" + + "\x0fnext_page_token\x18\a \x01(\fR\rnextPageToken\"\x9a\x01\n" + + "\x1aListClusterMembersResponse\x12T\n" + + "\x0eactive_members\x18\x01 \x03(\v2-.temporal.server.api.cluster.v1.ClusterMemberR\ractiveMembers\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\"\xad\x02\n" + + "\x15GetDLQMessagesRequest\x12E\n" + + "\x04type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.DeadLetterQueueTypeR\x04type\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12%\n" + + "\x0esource_cluster\x18\x03 \x01(\tR\rsourceCluster\x127\n" + + "\x18inclusive_end_message_id\x18\x04 \x01(\x03R\x15inclusiveEndMessageId\x12*\n" + + "\x11maximum_page_size\x18\x05 \x01(\x05R\x0fmaximumPageSize\x12&\n" + + "\x0fnext_page_token\x18\x06 \x01(\fR\rnextPageToken\"\xd8\x02\n" + + "\x16GetDLQMessagesResponse\x12E\n" + + "\x04type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.DeadLetterQueueTypeR\x04type\x12`\n" + + "\x11replication_tasks\x18\x02 \x03(\v23.temporal.server.api.replication.v1.ReplicationTaskR\x10replicationTasks\x12&\n" + + "\x0fnext_page_token\x18\x03 \x01(\fR\rnextPageToken\x12m\n" + + "\x16replication_tasks_info\x18\x04 \x03(\v27.temporal.server.api.replication.v1.ReplicationTaskInfoR\x14replicationTasksInfo\"\xdb\x01\n" + + "\x17PurgeDLQMessagesRequest\x12E\n" + + "\x04type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.DeadLetterQueueTypeR\x04type\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12%\n" + + "\x0esource_cluster\x18\x03 \x01(\tR\rsourceCluster\x127\n" + + "\x18inclusive_end_message_id\x18\x04 \x01(\x03R\x15inclusiveEndMessageId\"\x1a\n" + + "\x18PurgeDLQMessagesResponse\"\xaf\x02\n" + + "\x17MergeDLQMessagesRequest\x12E\n" + + "\x04type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.DeadLetterQueueTypeR\x04type\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12%\n" + + "\x0esource_cluster\x18\x03 \x01(\tR\rsourceCluster\x127\n" + + "\x18inclusive_end_message_id\x18\x04 \x01(\x03R\x15inclusiveEndMessageId\x12*\n" + + "\x11maximum_page_size\x18\x05 \x01(\x05R\x0fmaximumPageSize\x12&\n" + + "\x0fnext_page_token\x18\x06 \x01(\fR\rnextPageToken\"B\n" + + "\x18MergeDLQMessagesResponse\x12&\n" + + "\x0fnext_page_token\x18\x01 \x01(\fR\rnextPageToken\"\xd0\x01\n" + + "\x1bRefreshWorkflowTasksRequest\x12!\n" + + "\fnamespace_id\x18\x03 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12\x1c\n" + + "\tarchetype\x18\x04 \x01(\tR\tarchetype\x12!\n" + + "\farchetype_id\x18\x05 \x01(\rR\varchetypeIdJ\x04\b\x01\x10\x02\"\x1e\n" + + "\x1cRefreshWorkflowTasksResponse\"\xaf\x02\n" + + "\x1dResendReplicationTasksRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12%\n" + + "\x0eremote_cluster\x18\x04 \x01(\tR\rremoteCluster\x12$\n" + + "\x0estart_event_id\x18\x05 \x01(\x03R\fstartEventId\x12#\n" + + "\rstart_version\x18\x06 \x01(\x03R\fstartVersion\x12 \n" + + "\fend_event_id\x18\a \x01(\x03R\n" + + "endEventId\x12\x1f\n" + + "\vend_version\x18\b \x01(\x03R\n" + + "endVersion\" \n" + + "\x1eResendReplicationTasksResponse\"\xe3\x02\n" + + "\x18GetTaskQueueTasksRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12L\n" + + "\x0ftask_queue_type\x18\x03 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12\x19\n" + + "\bmin_pass\x18\t \x01(\x03R\aminPass\x12\x1e\n" + + "\vmin_task_id\x18\x04 \x01(\x03R\tminTaskId\x12\x1e\n" + + "\vmax_task_id\x18\x05 \x01(\x03R\tmaxTaskId\x12\x1d\n" + + "\n" + + "batch_size\x18\x06 \x01(\x05R\tbatchSize\x12&\n" + + "\x0fnext_page_token\x18\a \x01(\fR\rnextPageToken\x12\x1a\n" + + "\bsubqueue\x18\b \x01(\x05R\bsubqueue\"\x90\x01\n" + + "\x19GetTaskQueueTasksResponse\x12K\n" + + "\x05tasks\x18\x01 \x03(\v25.temporal.server.api.persistence.v1.AllocatedTaskInfoR\x05tasks\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\"\xc8\x01\n" + + "\x1eDeleteWorkflowExecutionRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12\x1c\n" + + "\tarchetype\x18\x03 \x01(\tR\tarchetype\x12!\n" + + "\farchetype_id\x18\x04 \x01(\rR\varchetypeId\"=\n" + + "\x1fDeleteWorkflowExecutionResponse\x12\x1a\n" + + "\bwarnings\x18\x01 \x03(\tR\bwarnings\"\xaa\x01\n" + + "(StreamWorkflowReplicationMessagesRequest\x12p\n" + + "\x16sync_replication_state\x18\x01 \x01(\v28.temporal.server.api.replication.v1.SyncReplicationStateH\x00R\x14syncReplicationStateB\f\n" + + "\n" + + "attributes\"\x98\x01\n" + + ")StreamWorkflowReplicationMessagesResponse\x12]\n" + + "\bmessages\x18\x01 \x01(\v2?.temporal.server.api.replication.v1.WorkflowReplicationMessagesH\x00R\bmessagesB\f\n" + + "\n" + + "attributes\"U\n" + + "\x13GetNamespaceRequest\x12\x1e\n" + + "\tnamespace\x18\x01 \x01(\tH\x00R\tnamespace\x12\x10\n" + + "\x02id\x18\x02 \x01(\tH\x00R\x02idB\f\n" + + "\n" + + "attributes\"\xda\x03\n" + + "\x14GetNamespaceResponse\x12<\n" + + "\x04info\x18\x03 \x01(\v2(.temporal.api.namespace.v1.NamespaceInfoR\x04info\x12B\n" + + "\x06config\x18\x04 \x01(\v2*.temporal.api.namespace.v1.NamespaceConfigR\x06config\x12f\n" + + "\x12replication_config\x18\x05 \x01(\v27.temporal.api.replication.v1.NamespaceReplicationConfigR\x11replicationConfig\x12%\n" + + "\x0econfig_version\x18\x06 \x01(\x03R\rconfigVersion\x12)\n" + + "\x10failover_version\x18\a \x01(\x03R\x0ffailoverVersion\x12V\n" + + "\x10failover_history\x18\b \x03(\v2+.temporal.api.replication.v1.FailoverStatusR\x0ffailoverHistory\x12.\n" + + "\x13is_global_namespace\x18\t \x01(\bR\x11isGlobalNamespace\"\xa0\x01\n" + + "\x12GetDLQTasksRequest\x12E\n" + + "\adlq_key\x18\x01 \x01(\v2,.temporal.server.api.common.v1.HistoryDLQKeyR\x06dlqKey\x12\x1b\n" + + "\tpage_size\x18\x02 \x01(\x05R\bpageSize\x12&\n" + + "\x0fnext_page_token\x18\x03 \x01(\fR\rnextPageToken\"\x89\x01\n" + + "\x13GetDLQTasksResponse\x12J\n" + + "\tdlq_tasks\x18\x01 \x03(\v2-.temporal.server.api.common.v1.HistoryDLQTaskR\bdlqTasks\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\"\xd3\x01\n" + + "\x14PurgeDLQTasksRequest\x12E\n" + + "\adlq_key\x18\x01 \x01(\v2,.temporal.server.api.common.v1.HistoryDLQKeyR\x06dlqKey\x12t\n" + + "\x1binclusive_max_task_metadata\x18\x02 \x01(\v25.temporal.server.api.common.v1.HistoryDLQTaskMetadataR\x18inclusiveMaxTaskMetadata\"4\n" + + "\x15PurgeDLQTasksResponse\x12\x1b\n" + + "\tjob_token\x18\x01 \x01(\fR\bjobToken\"E\n" + + "\vDLQJobToken\x12\x1f\n" + + "\vworkflow_id\x18\x01 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x02 \x01(\tR\x05runId\"\xf2\x01\n" + + "\x14MergeDLQTasksRequest\x12E\n" + + "\adlq_key\x18\x01 \x01(\v2,.temporal.server.api.common.v1.HistoryDLQKeyR\x06dlqKey\x12t\n" + + "\x1binclusive_max_task_metadata\x18\x02 \x01(\v25.temporal.server.api.common.v1.HistoryDLQTaskMetadataR\x18inclusiveMaxTaskMetadata\x12\x1d\n" + + "\n" + + "batch_size\x18\x03 \x01(\x05R\tbatchSize\"4\n" + + "\x15MergeDLQTasksResponse\x12\x1b\n" + + "\tjob_token\x18\x01 \x01(\fR\bjobToken\"4\n" + + "\x15DescribeDLQJobRequest\x12\x1b\n" + + "\tjob_token\x18\x01 \x01(\fR\bjobToken\"\x92\x04\n" + + "\x16DescribeDLQJobResponse\x12E\n" + + "\adlq_key\x18\x01 \x01(\v2,.temporal.server.api.common.v1.HistoryDLQKeyR\x06dlqKey\x12U\n" + + "\x0eoperation_type\x18\x02 \x01(\x0e2..temporal.server.api.enums.v1.DLQOperationTypeR\roperationType\x12X\n" + + "\x0foperation_state\x18\x03 \x01(\x0e2/.temporal.server.api.enums.v1.DLQOperationStateR\x0eoperationState\x129\n" + + "\n" + + "start_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x125\n" + + "\bend_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\aendTime\x12$\n" + + "\x0emax_message_id\x18\x06 \x01(\x03R\fmaxMessageId\x129\n" + + "\x19last_processed_message_id\x18\a \x01(\x03R\x16lastProcessedMessageId\x12-\n" + + "\x12messages_processed\x18\b \x01(\x03R\x11messagesProcessed\"J\n" + + "\x13CancelDLQJobRequest\x12\x1b\n" + + "\tjob_token\x18\x01 \x01(\fR\bjobToken\x12\x16\n" + + "\x06reason\x18\x02 \x01(\tR\x06reason\"2\n" + + "\x14CancelDLQJobResponse\x12\x1a\n" + + "\bcanceled\x18\x01 \x01(\bR\bcanceled\"\xdc\x01\n" + + "\x0fAddTasksRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x12O\n" + + "\x05tasks\x18\x02 \x03(\v29.temporal.server.api.adminservice.v1.AddTasksRequest.TaskR\x05tasks\x1a]\n" + + "\x04Task\x12\x1f\n" + + "\vcategory_id\x18\x01 \x01(\x05R\n" + + "categoryId\x124\n" + + "\x04blob\x18\x02 \x01(\v2 .temporal.api.common.v1.DataBlobR\x04blob\"\x12\n" + + "\x10AddTasksResponse\"w\n" + + "\x11ListQueuesRequest\x12\x1d\n" + + "\n" + + "queue_type\x18\x01 \x01(\x05R\tqueueType\x12\x1b\n" + + "\tpage_size\x18\x02 \x01(\x05R\bpageSize\x12&\n" + + "\x0fnext_page_token\x18\x03 \x01(\fR\rnextPageToken\"\x90\x02\n" + + "\x12ListQueuesResponse\x12Y\n" + + "\x06queues\x18\x01 \x03(\v2A.temporal.server.api.adminservice.v1.ListQueuesResponse.QueueInfoR\x06queues\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\x1aw\n" + + "\tQueueInfo\x12\x1d\n" + + "\n" + + "queue_name\x18\x01 \x01(\tR\tqueueName\x12#\n" + + "\rmessage_count\x18\x02 \x01(\x03R\fmessageCount\x12&\n" + + "\x0flast_message_id\x18\x03 \x01(\x03R\rlastMessageId\"\x18\n" + + "\x16DeepHealthCheckRequest\"\xaa\x01\n" + + "\x17DeepHealthCheckResponse\x12?\n" + + "\x05state\x18\x01 \x01(\x0e2).temporal.server.api.enums.v1.HealthStateR\x05state\x12N\n" + + "\bservices\x18\x02 \x03(\v22.temporal.server.api.health.v1.ServiceHealthDetailR\bservices\"\xa0\x03\n" + + "\x18SyncWorkflowStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12j\n" + + "\x14versioned_transition\x18\x03 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransition\x12]\n" + + "\x11version_histories\x18\x04 \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistories\x12*\n" + + "\x11target_cluster_id\x18\x05 \x01(\x05R\x0ftargetClusterId\x12!\n" + + "\farchetype_id\x18\x06 \x01(\rR\varchetypeId\"\xb9\x01\n" + + "\x19SyncWorkflowStateResponse\x12\x83\x01\n" + + "\x1dversioned_transition_artifact\x18\x05 \x01(\v2?.temporal.server.api.replication.v1.VersionedTransitionArtifactR\x1bversionedTransitionArtifactJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03J\x04\b\x03\x10\x04J\x04\b\x04\x10\x05\"\xfd\x01\n" + + "*GenerateLastHistoryReplicationTasksRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12'\n" + + "\x0ftarget_clusters\x18\x03 \x03(\tR\x0etargetClusters\x12\x1c\n" + + "\tarchetype\x18\x04 \x01(\tR\tarchetype\x12!\n" + + "\farchetype_id\x18\x05 \x01(\rR\varchetypeId\"\x8a\x01\n" + + "+GenerateLastHistoryReplicationTasksResponse\x124\n" + + "\x16state_transition_count\x18\x01 \x01(\x03R\x14stateTransitionCount\x12%\n" + + "\x0ehistory_length\x18\x02 \x01(\x03R\rhistoryLength\"\xfc\x01\n" + + "!DescribeTaskQueuePartitionRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12f\n" + + "\x14task_queue_partition\x18\x02 \x01(\v24.temporal.server.api.taskqueue.v1.TaskQueuePartitionR\x12taskQueuePartition\x12Q\n" + + "\tbuild_ids\x18\x03 \x01(\v24.temporal.api.taskqueue.v1.TaskQueueVersionSelectionR\bbuildIds\"\xc8\x02\n" + + "\"DescribeTaskQueuePartitionResponse\x12\x97\x01\n" + + "\x16versions_info_internal\x18\x01 \x03(\v2a.temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse.VersionsInfoInternalEntryR\x14versionsInfoInternal\x1a\x87\x01\n" + + "\x19VersionsInfoInternalEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12T\n" + + "\x05value\x18\x02 \x01(\v2>.temporal.server.api.taskqueue.v1.TaskQueueVersionInfoInternalR\x05value:\x028\x01\"\xac\x01\n" + + "$ForceUnloadTaskQueuePartitionRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12f\n" + + "\x14task_queue_partition\x18\x02 \x01(\v24.temporal.server.api.taskqueue.v1.TaskQueuePartitionR\x12taskQueuePartition\"F\n" + + "%ForceUnloadTaskQueuePartitionResponse\x12\x1d\n" + + "\n" + + "was_loaded\x18\x01 \x01(\bR\twasLoaded\"\xcb\x01\n" + + "\x1bGetTaskQueueUserDataRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12L\n" + + "\x0ftask_queue_type\x18\x03 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12!\n" + + "\fpartition_id\x18\x04 \x01(\x05R\vpartitionId\"\x90\x01\n" + + "\x1cGetTaskQueueUserDataResponse\x12V\n" + + "\tuser_data\x18\x01 \x01(\v29.temporal.server.api.persistence.v1.TaskQueueTypeUserDataR\buserData\x12\x18\n" + + "\aversion\x18\x02 \x01(\x03R\aversion\"\x88\x03\n" + + "\x1fStartAdminBatchOperationRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12)\n" + + "\x10visibility_query\x18\x02 \x01(\tR\x0fvisibilityQuery\x12\x15\n" + + "\x06job_id\x18\x03 \x01(\tR\x05jobId\x12\x16\n" + + "\x06reason\x18\x04 \x01(\tR\x06reason\x12I\n" + + "\n" + + "executions\x18\x05 \x03(\v2).temporal.api.common.v1.WorkflowExecutionR\n" + + "executions\x12\x1a\n" + + "\bidentity\x18\x06 \x01(\tR\bidentity\x12y\n" + + "\x17refresh_tasks_operation\x18\n" + + " \x01(\v2?.temporal.server.api.adminservice.v1.BatchOperationRefreshTasksH\x00R\x15refreshTasksOperationB\v\n" + + "\toperation\"\"\n" + + " StartAdminBatchOperationResponse\"\x1c\n" + + "\x1aBatchOperationRefreshTasks\"\xe7\x02\n" + + "\x16MigrateScheduleRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x1f\n" + + "\vschedule_id\x18\x02 \x01(\tR\n" + + "scheduleId\x12c\n" + + "\x06target\x18\x03 \x01(\x0e2K.temporal.server.api.adminservice.v1.MigrateScheduleRequest.SchedulerTargetR\x06target\x12\x1a\n" + + "\bidentity\x18\x04 \x01(\tR\bidentity\x12\x1d\n" + + "\n" + + "request_id\x18\x05 \x01(\tR\trequestId\"n\n" + + "\x0fSchedulerTarget\x12 \n" + + "\x1cSCHEDULER_TARGET_UNSPECIFIED\x10\x00\x12\x1a\n" + + "\x16SCHEDULER_TARGET_CHASM\x10\x01\x12\x1d\n" + + "\x19SCHEDULER_TARGET_WORKFLOW\x10\x02\"\x19\n" + + "\x17MigrateScheduleResponseB8Z6go.temporal.io/server/api/adminservice/v1;adminserviceb\x06proto3" + +var ( + file_temporal_server_api_adminservice_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_api_adminservice_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_api_adminservice_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_api_adminservice_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_api_adminservice_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc), len(file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_api_adminservice_v1_request_response_proto_rawDescData +} + +var file_temporal_server_api_adminservice_v1_request_response_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 105) +var file_temporal_server_api_adminservice_v1_request_response_proto_goTypes = []any{ + (MigrateScheduleRequest_SchedulerTarget)(0), // 0: temporal.server.api.adminservice.v1.MigrateScheduleRequest.SchedulerTarget + (*RebuildMutableStateRequest)(nil), // 1: temporal.server.api.adminservice.v1.RebuildMutableStateRequest + (*RebuildMutableStateResponse)(nil), // 2: temporal.server.api.adminservice.v1.RebuildMutableStateResponse + (*ImportWorkflowExecutionRequest)(nil), // 3: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest + (*ImportWorkflowExecutionResponse)(nil), // 4: temporal.server.api.adminservice.v1.ImportWorkflowExecutionResponse + (*DescribeMutableStateRequest)(nil), // 5: temporal.server.api.adminservice.v1.DescribeMutableStateRequest + (*DescribeMutableStateResponse)(nil), // 6: temporal.server.api.adminservice.v1.DescribeMutableStateResponse + (*DescribeHistoryHostRequest)(nil), // 7: temporal.server.api.adminservice.v1.DescribeHistoryHostRequest + (*DescribeHistoryHostResponse)(nil), // 8: temporal.server.api.adminservice.v1.DescribeHistoryHostResponse + (*CloseShardRequest)(nil), // 9: temporal.server.api.adminservice.v1.CloseShardRequest + (*CloseShardResponse)(nil), // 10: temporal.server.api.adminservice.v1.CloseShardResponse + (*GetShardRequest)(nil), // 11: temporal.server.api.adminservice.v1.GetShardRequest + (*GetShardResponse)(nil), // 12: temporal.server.api.adminservice.v1.GetShardResponse + (*ListHistoryTasksRequest)(nil), // 13: temporal.server.api.adminservice.v1.ListHistoryTasksRequest + (*ListHistoryTasksResponse)(nil), // 14: temporal.server.api.adminservice.v1.ListHistoryTasksResponse + (*Task)(nil), // 15: temporal.server.api.adminservice.v1.Task + (*RemoveTaskRequest)(nil), // 16: temporal.server.api.adminservice.v1.RemoveTaskRequest + (*RemoveTaskResponse)(nil), // 17: temporal.server.api.adminservice.v1.RemoveTaskResponse + (*GetWorkflowExecutionRawHistoryV2Request)(nil), // 18: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request + (*GetWorkflowExecutionRawHistoryV2Response)(nil), // 19: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response + (*GetWorkflowExecutionRawHistoryRequest)(nil), // 20: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest + (*GetWorkflowExecutionRawHistoryResponse)(nil), // 21: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse + (*GetReplicationMessagesRequest)(nil), // 22: temporal.server.api.adminservice.v1.GetReplicationMessagesRequest + (*GetReplicationMessagesResponse)(nil), // 23: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse + (*GetNamespaceReplicationMessagesRequest)(nil), // 24: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesRequest + (*GetNamespaceReplicationMessagesResponse)(nil), // 25: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse + (*GetDLQReplicationMessagesRequest)(nil), // 26: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesRequest + (*GetDLQReplicationMessagesResponse)(nil), // 27: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse + (*ReapplyEventsRequest)(nil), // 28: temporal.server.api.adminservice.v1.ReapplyEventsRequest + (*ReapplyEventsResponse)(nil), // 29: temporal.server.api.adminservice.v1.ReapplyEventsResponse + (*AddSearchAttributesRequest)(nil), // 30: temporal.server.api.adminservice.v1.AddSearchAttributesRequest + (*AddSearchAttributesResponse)(nil), // 31: temporal.server.api.adminservice.v1.AddSearchAttributesResponse + (*RemoveSearchAttributesRequest)(nil), // 32: temporal.server.api.adminservice.v1.RemoveSearchAttributesRequest + (*RemoveSearchAttributesResponse)(nil), // 33: temporal.server.api.adminservice.v1.RemoveSearchAttributesResponse + (*GetSearchAttributesRequest)(nil), // 34: temporal.server.api.adminservice.v1.GetSearchAttributesRequest + (*GetSearchAttributesResponse)(nil), // 35: temporal.server.api.adminservice.v1.GetSearchAttributesResponse + (*DescribeClusterRequest)(nil), // 36: temporal.server.api.adminservice.v1.DescribeClusterRequest + (*DescribeClusterResponse)(nil), // 37: temporal.server.api.adminservice.v1.DescribeClusterResponse + (*ListClustersRequest)(nil), // 38: temporal.server.api.adminservice.v1.ListClustersRequest + (*ListClustersResponse)(nil), // 39: temporal.server.api.adminservice.v1.ListClustersResponse + (*AddOrUpdateRemoteClusterRequest)(nil), // 40: temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterRequest + (*AddOrUpdateRemoteClusterResponse)(nil), // 41: temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterResponse + (*RemoveRemoteClusterRequest)(nil), // 42: temporal.server.api.adminservice.v1.RemoveRemoteClusterRequest + (*RemoveRemoteClusterResponse)(nil), // 43: temporal.server.api.adminservice.v1.RemoveRemoteClusterResponse + (*ListClusterMembersRequest)(nil), // 44: temporal.server.api.adminservice.v1.ListClusterMembersRequest + (*ListClusterMembersResponse)(nil), // 45: temporal.server.api.adminservice.v1.ListClusterMembersResponse + (*GetDLQMessagesRequest)(nil), // 46: temporal.server.api.adminservice.v1.GetDLQMessagesRequest + (*GetDLQMessagesResponse)(nil), // 47: temporal.server.api.adminservice.v1.GetDLQMessagesResponse + (*PurgeDLQMessagesRequest)(nil), // 48: temporal.server.api.adminservice.v1.PurgeDLQMessagesRequest + (*PurgeDLQMessagesResponse)(nil), // 49: temporal.server.api.adminservice.v1.PurgeDLQMessagesResponse + (*MergeDLQMessagesRequest)(nil), // 50: temporal.server.api.adminservice.v1.MergeDLQMessagesRequest + (*MergeDLQMessagesResponse)(nil), // 51: temporal.server.api.adminservice.v1.MergeDLQMessagesResponse + (*RefreshWorkflowTasksRequest)(nil), // 52: temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest + (*RefreshWorkflowTasksResponse)(nil), // 53: temporal.server.api.adminservice.v1.RefreshWorkflowTasksResponse + (*ResendReplicationTasksRequest)(nil), // 54: temporal.server.api.adminservice.v1.ResendReplicationTasksRequest + (*ResendReplicationTasksResponse)(nil), // 55: temporal.server.api.adminservice.v1.ResendReplicationTasksResponse + (*GetTaskQueueTasksRequest)(nil), // 56: temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest + (*GetTaskQueueTasksResponse)(nil), // 57: temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse + (*DeleteWorkflowExecutionRequest)(nil), // 58: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest + (*DeleteWorkflowExecutionResponse)(nil), // 59: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse + (*StreamWorkflowReplicationMessagesRequest)(nil), // 60: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest + (*StreamWorkflowReplicationMessagesResponse)(nil), // 61: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse + (*GetNamespaceRequest)(nil), // 62: temporal.server.api.adminservice.v1.GetNamespaceRequest + (*GetNamespaceResponse)(nil), // 63: temporal.server.api.adminservice.v1.GetNamespaceResponse + (*GetDLQTasksRequest)(nil), // 64: temporal.server.api.adminservice.v1.GetDLQTasksRequest + (*GetDLQTasksResponse)(nil), // 65: temporal.server.api.adminservice.v1.GetDLQTasksResponse + (*PurgeDLQTasksRequest)(nil), // 66: temporal.server.api.adminservice.v1.PurgeDLQTasksRequest + (*PurgeDLQTasksResponse)(nil), // 67: temporal.server.api.adminservice.v1.PurgeDLQTasksResponse + (*DLQJobToken)(nil), // 68: temporal.server.api.adminservice.v1.DLQJobToken + (*MergeDLQTasksRequest)(nil), // 69: temporal.server.api.adminservice.v1.MergeDLQTasksRequest + (*MergeDLQTasksResponse)(nil), // 70: temporal.server.api.adminservice.v1.MergeDLQTasksResponse + (*DescribeDLQJobRequest)(nil), // 71: temporal.server.api.adminservice.v1.DescribeDLQJobRequest + (*DescribeDLQJobResponse)(nil), // 72: temporal.server.api.adminservice.v1.DescribeDLQJobResponse + (*CancelDLQJobRequest)(nil), // 73: temporal.server.api.adminservice.v1.CancelDLQJobRequest + (*CancelDLQJobResponse)(nil), // 74: temporal.server.api.adminservice.v1.CancelDLQJobResponse + (*AddTasksRequest)(nil), // 75: temporal.server.api.adminservice.v1.AddTasksRequest + (*AddTasksResponse)(nil), // 76: temporal.server.api.adminservice.v1.AddTasksResponse + (*ListQueuesRequest)(nil), // 77: temporal.server.api.adminservice.v1.ListQueuesRequest + (*ListQueuesResponse)(nil), // 78: temporal.server.api.adminservice.v1.ListQueuesResponse + (*DeepHealthCheckRequest)(nil), // 79: temporal.server.api.adminservice.v1.DeepHealthCheckRequest + (*DeepHealthCheckResponse)(nil), // 80: temporal.server.api.adminservice.v1.DeepHealthCheckResponse + (*SyncWorkflowStateRequest)(nil), // 81: temporal.server.api.adminservice.v1.SyncWorkflowStateRequest + (*SyncWorkflowStateResponse)(nil), // 82: temporal.server.api.adminservice.v1.SyncWorkflowStateResponse + (*GenerateLastHistoryReplicationTasksRequest)(nil), // 83: temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksRequest + (*GenerateLastHistoryReplicationTasksResponse)(nil), // 84: temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksResponse + (*DescribeTaskQueuePartitionRequest)(nil), // 85: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionRequest + (*DescribeTaskQueuePartitionResponse)(nil), // 86: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse + (*ForceUnloadTaskQueuePartitionRequest)(nil), // 87: temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionRequest + (*ForceUnloadTaskQueuePartitionResponse)(nil), // 88: temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionResponse + (*GetTaskQueueUserDataRequest)(nil), // 89: temporal.server.api.adminservice.v1.GetTaskQueueUserDataRequest + (*GetTaskQueueUserDataResponse)(nil), // 90: temporal.server.api.adminservice.v1.GetTaskQueueUserDataResponse + (*StartAdminBatchOperationRequest)(nil), // 91: temporal.server.api.adminservice.v1.StartAdminBatchOperationRequest + (*StartAdminBatchOperationResponse)(nil), // 92: temporal.server.api.adminservice.v1.StartAdminBatchOperationResponse + (*BatchOperationRefreshTasks)(nil), // 93: temporal.server.api.adminservice.v1.BatchOperationRefreshTasks + (*MigrateScheduleRequest)(nil), // 94: temporal.server.api.adminservice.v1.MigrateScheduleRequest + (*MigrateScheduleResponse)(nil), // 95: temporal.server.api.adminservice.v1.MigrateScheduleResponse + nil, // 96: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry + nil, // 97: temporal.server.api.adminservice.v1.AddSearchAttributesRequest.SearchAttributesEntry + nil, // 98: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.CustomAttributesEntry + nil, // 99: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.SystemAttributesEntry + nil, // 100: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.MappingEntry + nil, // 101: temporal.server.api.adminservice.v1.DescribeClusterResponse.SupportedClientsEntry + nil, // 102: temporal.server.api.adminservice.v1.DescribeClusterResponse.TagsEntry + (*AddTasksRequest_Task)(nil), // 103: temporal.server.api.adminservice.v1.AddTasksRequest.Task + (*ListQueuesResponse_QueueInfo)(nil), // 104: temporal.server.api.adminservice.v1.ListQueuesResponse.QueueInfo + nil, // 105: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse.VersionsInfoInternalEntry + (*v1.WorkflowExecution)(nil), // 106: temporal.api.common.v1.WorkflowExecution + (*v1.DataBlob)(nil), // 107: temporal.api.common.v1.DataBlob + (*v11.VersionHistory)(nil), // 108: temporal.server.api.history.v1.VersionHistory + (*v12.WorkflowMutableState)(nil), // 109: temporal.server.api.persistence.v1.WorkflowMutableState + (*v13.NamespaceCacheInfo)(nil), // 110: temporal.server.api.namespace.v1.NamespaceCacheInfo + (*v12.ShardInfo)(nil), // 111: temporal.server.api.persistence.v1.ShardInfo + (*v11.TaskRange)(nil), // 112: temporal.server.api.history.v1.TaskRange + (v14.TaskType)(0), // 113: temporal.server.api.enums.v1.TaskType + (*timestamppb.Timestamp)(nil), // 114: google.protobuf.Timestamp + (*v15.ReplicationToken)(nil), // 115: temporal.server.api.replication.v1.ReplicationToken + (*v15.ReplicationMessages)(nil), // 116: temporal.server.api.replication.v1.ReplicationMessages + (*v15.ReplicationTaskInfo)(nil), // 117: temporal.server.api.replication.v1.ReplicationTaskInfo + (*v15.ReplicationTask)(nil), // 118: temporal.server.api.replication.v1.ReplicationTask + (*v17.WorkflowExecutionInfo)(nil), // 119: temporal.api.workflow.v1.WorkflowExecutionInfo + (*v18.MembershipInfo)(nil), // 120: temporal.server.api.cluster.v1.MembershipInfo + (*v19.VersionInfo)(nil), // 121: temporal.api.version.v1.VersionInfo + (*v12.ClusterMetadata)(nil), // 122: temporal.server.api.persistence.v1.ClusterMetadata + (*durationpb.Duration)(nil), // 123: google.protobuf.Duration + (v14.ClusterMemberRole)(0), // 124: temporal.server.api.enums.v1.ClusterMemberRole + (*v18.ClusterMember)(nil), // 125: temporal.server.api.cluster.v1.ClusterMember + (v14.DeadLetterQueueType)(0), // 126: temporal.server.api.enums.v1.DeadLetterQueueType + (v16.TaskQueueType)(0), // 127: temporal.api.enums.v1.TaskQueueType + (*v12.AllocatedTaskInfo)(nil), // 128: temporal.server.api.persistence.v1.AllocatedTaskInfo + (*v15.SyncReplicationState)(nil), // 129: temporal.server.api.replication.v1.SyncReplicationState + (*v15.WorkflowReplicationMessages)(nil), // 130: temporal.server.api.replication.v1.WorkflowReplicationMessages + (*v110.NamespaceInfo)(nil), // 131: temporal.api.namespace.v1.NamespaceInfo + (*v110.NamespaceConfig)(nil), // 132: temporal.api.namespace.v1.NamespaceConfig + (*v111.NamespaceReplicationConfig)(nil), // 133: temporal.api.replication.v1.NamespaceReplicationConfig + (*v111.FailoverStatus)(nil), // 134: temporal.api.replication.v1.FailoverStatus + (*v112.HistoryDLQKey)(nil), // 135: temporal.server.api.common.v1.HistoryDLQKey + (*v112.HistoryDLQTask)(nil), // 136: temporal.server.api.common.v1.HistoryDLQTask + (*v112.HistoryDLQTaskMetadata)(nil), // 137: temporal.server.api.common.v1.HistoryDLQTaskMetadata + (v14.DLQOperationType)(0), // 138: temporal.server.api.enums.v1.DLQOperationType + (v14.DLQOperationState)(0), // 139: temporal.server.api.enums.v1.DLQOperationState + (v14.HealthState)(0), // 140: temporal.server.api.enums.v1.HealthState + (*v113.ServiceHealthDetail)(nil), // 141: temporal.server.api.health.v1.ServiceHealthDetail + (*v12.VersionedTransition)(nil), // 142: temporal.server.api.persistence.v1.VersionedTransition + (*v11.VersionHistories)(nil), // 143: temporal.server.api.history.v1.VersionHistories + (*v15.VersionedTransitionArtifact)(nil), // 144: temporal.server.api.replication.v1.VersionedTransitionArtifact + (*v114.TaskQueuePartition)(nil), // 145: temporal.server.api.taskqueue.v1.TaskQueuePartition + (*v115.TaskQueueVersionSelection)(nil), // 146: temporal.api.taskqueue.v1.TaskQueueVersionSelection + (*v12.TaskQueueTypeUserData)(nil), // 147: temporal.server.api.persistence.v1.TaskQueueTypeUserData + (v16.IndexedValueType)(0), // 148: temporal.api.enums.v1.IndexedValueType + (*v114.TaskQueueVersionInfoInternal)(nil), // 149: temporal.server.api.taskqueue.v1.TaskQueueVersionInfoInternal +} +var file_temporal_server_api_adminservice_v1_request_response_proto_depIdxs = []int32{ + 106, // 0: temporal.server.api.adminservice.v1.RebuildMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 106, // 1: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 107, // 2: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest.history_batches:type_name -> temporal.api.common.v1.DataBlob + 108, // 3: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest.version_history:type_name -> temporal.server.api.history.v1.VersionHistory + 106, // 4: temporal.server.api.adminservice.v1.DescribeMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 109, // 5: temporal.server.api.adminservice.v1.DescribeMutableStateResponse.cache_mutable_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState + 109, // 6: temporal.server.api.adminservice.v1.DescribeMutableStateResponse.database_mutable_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState + 106, // 7: temporal.server.api.adminservice.v1.DescribeHistoryHostRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 110, // 8: temporal.server.api.adminservice.v1.DescribeHistoryHostResponse.namespace_cache:type_name -> temporal.server.api.namespace.v1.NamespaceCacheInfo + 111, // 9: temporal.server.api.adminservice.v1.GetShardResponse.shard_info:type_name -> temporal.server.api.persistence.v1.ShardInfo + 112, // 10: temporal.server.api.adminservice.v1.ListHistoryTasksRequest.task_range:type_name -> temporal.server.api.history.v1.TaskRange + 15, // 11: temporal.server.api.adminservice.v1.ListHistoryTasksResponse.tasks:type_name -> temporal.server.api.adminservice.v1.Task + 113, // 12: temporal.server.api.adminservice.v1.Task.task_type:type_name -> temporal.server.api.enums.v1.TaskType + 114, // 13: temporal.server.api.adminservice.v1.Task.fire_time:type_name -> google.protobuf.Timestamp + 114, // 14: temporal.server.api.adminservice.v1.RemoveTaskRequest.visibility_time:type_name -> google.protobuf.Timestamp + 106, // 15: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 107, // 16: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response.history_batches:type_name -> temporal.api.common.v1.DataBlob + 108, // 17: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response.version_history:type_name -> temporal.server.api.history.v1.VersionHistory + 106, // 18: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 107, // 19: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse.history_batches:type_name -> temporal.api.common.v1.DataBlob + 108, // 20: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse.version_history:type_name -> temporal.server.api.history.v1.VersionHistory + 115, // 21: temporal.server.api.adminservice.v1.GetReplicationMessagesRequest.tokens:type_name -> temporal.server.api.replication.v1.ReplicationToken + 96, // 22: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.shard_messages:type_name -> temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry + 116, // 23: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse.messages:type_name -> temporal.server.api.replication.v1.ReplicationMessages + 117, // 24: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesRequest.task_infos:type_name -> temporal.server.api.replication.v1.ReplicationTaskInfo + 118, // 25: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask + 106, // 26: temporal.server.api.adminservice.v1.ReapplyEventsRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 107, // 27: temporal.server.api.adminservice.v1.ReapplyEventsRequest.events:type_name -> temporal.api.common.v1.DataBlob + 97, // 28: temporal.server.api.adminservice.v1.AddSearchAttributesRequest.search_attributes:type_name -> temporal.server.api.adminservice.v1.AddSearchAttributesRequest.SearchAttributesEntry + 98, // 29: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.custom_attributes:type_name -> temporal.server.api.adminservice.v1.GetSearchAttributesResponse.CustomAttributesEntry + 99, // 30: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.system_attributes:type_name -> temporal.server.api.adminservice.v1.GetSearchAttributesResponse.SystemAttributesEntry + 100, // 31: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.mapping:type_name -> temporal.server.api.adminservice.v1.GetSearchAttributesResponse.MappingEntry + 119, // 32: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.add_workflow_execution_info:type_name -> temporal.api.workflow.v1.WorkflowExecutionInfo + 101, // 33: temporal.server.api.adminservice.v1.DescribeClusterResponse.supported_clients:type_name -> temporal.server.api.adminservice.v1.DescribeClusterResponse.SupportedClientsEntry + 120, // 34: temporal.server.api.adminservice.v1.DescribeClusterResponse.membership_info:type_name -> temporal.server.api.cluster.v1.MembershipInfo + 121, // 35: temporal.server.api.adminservice.v1.DescribeClusterResponse.version_info:type_name -> temporal.api.version.v1.VersionInfo + 102, // 36: temporal.server.api.adminservice.v1.DescribeClusterResponse.tags:type_name -> temporal.server.api.adminservice.v1.DescribeClusterResponse.TagsEntry + 122, // 37: temporal.server.api.adminservice.v1.ListClustersResponse.clusters:type_name -> temporal.server.api.persistence.v1.ClusterMetadata + 123, // 38: temporal.server.api.adminservice.v1.ListClusterMembersRequest.last_heartbeat_within:type_name -> google.protobuf.Duration + 124, // 39: temporal.server.api.adminservice.v1.ListClusterMembersRequest.role:type_name -> temporal.server.api.enums.v1.ClusterMemberRole + 114, // 40: temporal.server.api.adminservice.v1.ListClusterMembersRequest.session_started_after_time:type_name -> google.protobuf.Timestamp + 125, // 41: temporal.server.api.adminservice.v1.ListClusterMembersResponse.active_members:type_name -> temporal.server.api.cluster.v1.ClusterMember + 126, // 42: temporal.server.api.adminservice.v1.GetDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType + 126, // 43: temporal.server.api.adminservice.v1.GetDLQMessagesResponse.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType + 118, // 44: temporal.server.api.adminservice.v1.GetDLQMessagesResponse.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask + 117, // 45: temporal.server.api.adminservice.v1.GetDLQMessagesResponse.replication_tasks_info:type_name -> temporal.server.api.replication.v1.ReplicationTaskInfo + 126, // 46: temporal.server.api.adminservice.v1.PurgeDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType + 126, // 47: temporal.server.api.adminservice.v1.MergeDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType + 106, // 48: temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 127, // 49: temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 128, // 50: temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse.tasks:type_name -> temporal.server.api.persistence.v1.AllocatedTaskInfo + 106, // 51: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 129, // 52: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest.sync_replication_state:type_name -> temporal.server.api.replication.v1.SyncReplicationState + 130, // 53: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse.messages:type_name -> temporal.server.api.replication.v1.WorkflowReplicationMessages + 131, // 54: temporal.server.api.adminservice.v1.GetNamespaceResponse.info:type_name -> temporal.api.namespace.v1.NamespaceInfo + 132, // 55: temporal.server.api.adminservice.v1.GetNamespaceResponse.config:type_name -> temporal.api.namespace.v1.NamespaceConfig + 133, // 56: temporal.server.api.adminservice.v1.GetNamespaceResponse.replication_config:type_name -> temporal.api.replication.v1.NamespaceReplicationConfig + 134, // 57: temporal.server.api.adminservice.v1.GetNamespaceResponse.failover_history:type_name -> temporal.api.replication.v1.FailoverStatus + 135, // 58: temporal.server.api.adminservice.v1.GetDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey + 136, // 59: temporal.server.api.adminservice.v1.GetDLQTasksResponse.dlq_tasks:type_name -> temporal.server.api.common.v1.HistoryDLQTask + 135, // 60: temporal.server.api.adminservice.v1.PurgeDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey + 137, // 61: temporal.server.api.adminservice.v1.PurgeDLQTasksRequest.inclusive_max_task_metadata:type_name -> temporal.server.api.common.v1.HistoryDLQTaskMetadata + 135, // 62: temporal.server.api.adminservice.v1.MergeDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey + 137, // 63: temporal.server.api.adminservice.v1.MergeDLQTasksRequest.inclusive_max_task_metadata:type_name -> temporal.server.api.common.v1.HistoryDLQTaskMetadata + 135, // 64: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey + 138, // 65: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.operation_type:type_name -> temporal.server.api.enums.v1.DLQOperationType + 139, // 66: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.operation_state:type_name -> temporal.server.api.enums.v1.DLQOperationState + 114, // 67: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.start_time:type_name -> google.protobuf.Timestamp + 114, // 68: temporal.server.api.adminservice.v1.DescribeDLQJobResponse.end_time:type_name -> google.protobuf.Timestamp + 103, // 69: temporal.server.api.adminservice.v1.AddTasksRequest.tasks:type_name -> temporal.server.api.adminservice.v1.AddTasksRequest.Task + 104, // 70: temporal.server.api.adminservice.v1.ListQueuesResponse.queues:type_name -> temporal.server.api.adminservice.v1.ListQueuesResponse.QueueInfo + 140, // 71: temporal.server.api.adminservice.v1.DeepHealthCheckResponse.state:type_name -> temporal.server.api.enums.v1.HealthState + 141, // 72: temporal.server.api.adminservice.v1.DeepHealthCheckResponse.services:type_name -> temporal.server.api.health.v1.ServiceHealthDetail + 106, // 73: temporal.server.api.adminservice.v1.SyncWorkflowStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 142, // 74: temporal.server.api.adminservice.v1.SyncWorkflowStateRequest.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 143, // 75: temporal.server.api.adminservice.v1.SyncWorkflowStateRequest.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories + 144, // 76: temporal.server.api.adminservice.v1.SyncWorkflowStateResponse.versioned_transition_artifact:type_name -> temporal.server.api.replication.v1.VersionedTransitionArtifact + 106, // 77: temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 145, // 78: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionRequest.task_queue_partition:type_name -> temporal.server.api.taskqueue.v1.TaskQueuePartition + 146, // 79: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionRequest.build_ids:type_name -> temporal.api.taskqueue.v1.TaskQueueVersionSelection + 105, // 80: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse.versions_info_internal:type_name -> temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse.VersionsInfoInternalEntry + 145, // 81: temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionRequest.task_queue_partition:type_name -> temporal.server.api.taskqueue.v1.TaskQueuePartition + 127, // 82: temporal.server.api.adminservice.v1.GetTaskQueueUserDataRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 147, // 83: temporal.server.api.adminservice.v1.GetTaskQueueUserDataResponse.user_data:type_name -> temporal.server.api.persistence.v1.TaskQueueTypeUserData + 106, // 84: temporal.server.api.adminservice.v1.StartAdminBatchOperationRequest.executions:type_name -> temporal.api.common.v1.WorkflowExecution + 93, // 85: temporal.server.api.adminservice.v1.StartAdminBatchOperationRequest.refresh_tasks_operation:type_name -> temporal.server.api.adminservice.v1.BatchOperationRefreshTasks + 0, // 86: temporal.server.api.adminservice.v1.MigrateScheduleRequest.target:type_name -> temporal.server.api.adminservice.v1.MigrateScheduleRequest.SchedulerTarget + 116, // 87: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry.value:type_name -> temporal.server.api.replication.v1.ReplicationMessages + 148, // 88: temporal.server.api.adminservice.v1.AddSearchAttributesRequest.SearchAttributesEntry.value:type_name -> temporal.api.enums.v1.IndexedValueType + 148, // 89: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.CustomAttributesEntry.value:type_name -> temporal.api.enums.v1.IndexedValueType + 148, // 90: temporal.server.api.adminservice.v1.GetSearchAttributesResponse.SystemAttributesEntry.value:type_name -> temporal.api.enums.v1.IndexedValueType + 107, // 91: temporal.server.api.adminservice.v1.AddTasksRequest.Task.blob:type_name -> temporal.api.common.v1.DataBlob + 149, // 92: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse.VersionsInfoInternalEntry.value:type_name -> temporal.server.api.taskqueue.v1.TaskQueueVersionInfoInternal + 93, // [93:93] is the sub-list for method output_type + 93, // [93:93] is the sub-list for method input_type + 93, // [93:93] is the sub-list for extension type_name + 93, // [93:93] is the sub-list for extension extendee + 0, // [0:93] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_adminservice_v1_request_response_proto_init() } +func file_temporal_server_api_adminservice_v1_request_response_proto_init() { + if File_temporal_server_api_adminservice_v1_request_response_proto != nil { + return } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[59].OneofWrappers = []interface{}{ + file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[59].OneofWrappers = []any{ (*StreamWorkflowReplicationMessagesRequest_SyncReplicationState)(nil), } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[60].OneofWrappers = []interface{}{ + file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[60].OneofWrappers = []any{ (*StreamWorkflowReplicationMessagesResponse_Messages)(nil), } - file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[61].OneofWrappers = []interface{}{ + file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[61].OneofWrappers = []any{ (*GetNamespaceRequest_Namespace)(nil), (*GetNamespaceRequest_Id)(nil), } + file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes[90].OneofWrappers = []any{ + (*StartAdminBatchOperationRequest_RefreshTasksOperation)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc, - NumEnums: 0, - NumMessages: 87, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc), len(file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc)), + NumEnums: 1, + NumMessages: 105, NumExtensions: 0, NumServices: 0, }, GoTypes: file_temporal_server_api_adminservice_v1_request_response_proto_goTypes, DependencyIndexes: file_temporal_server_api_adminservice_v1_request_response_proto_depIdxs, + EnumInfos: file_temporal_server_api_adminservice_v1_request_response_proto_enumTypes, MessageInfos: file_temporal_server_api_adminservice_v1_request_response_proto_msgTypes, }.Build() File_temporal_server_api_adminservice_v1_request_response_proto = out.File - file_temporal_server_api_adminservice_v1_request_response_proto_rawDesc = nil file_temporal_server_api_adminservice_v1_request_response_proto_goTypes = nil file_temporal_server_api_adminservice_v1_request_response_proto_depIdxs = nil } diff --git a/api/adminservice/v1/service.pb.go b/api/adminservice/v1/service.pb.go index 2039c0695b0..f60dfd1c775 100644 --- a/api/adminservice/v1/service.pb.go +++ b/api/adminservice/v1/service.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -30,7 +8,9 @@ package adminservice import ( reflect "reflect" + unsafe "unsafe" + _ "go.temporal.io/server/api/common/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -44,466 +24,153 @@ const ( var File_temporal_server_api_adminservice_v1_service_proto protoreflect.FileDescriptor -var file_temporal_server_api_adminservice_v1_service_proto_rawDesc = []byte{ - 0x0a, 0x31, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x3a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xdd, 0x2d, 0x0a, 0x0c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x9a, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3f, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x75, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0xa6, 0x01, 0x0a, 0x17, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9d, 0x01, 0x0a, 0x14, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9a, 0x01, 0x0a, 0x13, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, - 0x6f, 0x73, 0x74, 0x12, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x62, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x12, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x0a, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x12, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x91, 0x01, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x3c, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x0a, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xc1, 0x01, 0x0a, 0x20, 0x47, 0x65, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x56, 0x32, 0x12, 0x4c, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4d, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x56, 0x32, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xbb, 0x01, - 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x12, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4b, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa3, 0x01, 0x0a, 0x16, - 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0xbe, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x4b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x4c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0xac, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x12, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x46, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x44, 0x4c, 0x51, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x88, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, - 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9a, 0x01, 0x0a, - 0x13, 0x41, 0x64, 0x64, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x12, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x53, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa3, 0x01, 0x0a, 0x16, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x12, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x9a, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8e, 0x01, 0x0a, - 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x12, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3c, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x85, 0x01, - 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x38, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, - 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x97, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x12, 0x3e, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3f, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, - 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0xa9, 0x01, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x4f, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x44, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x4f, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x4f, 0x72, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9a, 0x01, 0x0a, 0x13, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x12, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, - 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x3a, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x91, 0x01, 0x0a, 0x10, 0x50, 0x75, 0x72, 0x67, 0x65, - 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x3c, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x91, 0x01, 0x0a, 0x10, 0x4d, - 0x65, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, - 0x3c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9d, - 0x01, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, - 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa3, - 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x3d, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa6, 0x01, 0x0a, 0x17, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x44, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xc8, 0x01, 0x0a, 0x21, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x4d, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4e, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, - 0x85, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x44, - 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, 0x01, 0x0a, - 0x0d, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x39, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x88, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x72, 0x67, - 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x65, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, - 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x44, - 0x4c, 0x51, 0x4a, 0x6f, 0x62, 0x12, 0x3a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x44, 0x4c, 0x51, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x44, 0x4c, 0x51, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0x85, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x44, 0x4c, 0x51, 0x4a, 0x6f, - 0x62, 0x12, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x44, 0x4c, - 0x51, 0x4a, 0x6f, 0x62, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x44, 0x4c, 0x51, 0x4a, 0x6f, 0x62, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x79, 0x0a, 0x08, 0x41, 0x64, 0x64, 0x54, - 0x61, 0x73, 0x6b, 0x73, 0x12, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x7f, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x73, 0x12, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_adminservice_v1_service_proto_rawDesc = "" + + "\n" + + "1temporal/server/api/adminservice/v1/service.proto\x12#temporal.server.api.adminservice.v1\x1a:temporal/server/api/adminservice/v1/request_response.proto\x1a0temporal/server/api/common/v1/api_category.proto2\xb3:\n" + + "\fAdminService\x12\xa0\x01\n" + + "\x13RebuildMutableState\x12?.temporal.server.api.adminservice.v1.RebuildMutableStateRequest\x1a@.temporal.server.api.adminservice.v1.RebuildMutableStateResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xac\x01\n" + + "\x17ImportWorkflowExecution\x12C.temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest\x1aD.temporal.server.api.adminservice.v1.ImportWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa3\x01\n" + + "\x14DescribeMutableState\x12@.temporal.server.api.adminservice.v1.DescribeMutableStateRequest\x1aA.temporal.server.api.adminservice.v1.DescribeMutableStateResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa0\x01\n" + + "\x13DescribeHistoryHost\x12?.temporal.server.api.adminservice.v1.DescribeHistoryHostRequest\x1a@.temporal.server.api.adminservice.v1.DescribeHistoryHostResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x7f\n" + + "\bGetShard\x124.temporal.server.api.adminservice.v1.GetShardRequest\x1a5.temporal.server.api.adminservice.v1.GetShardResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x85\x01\n" + + "\n" + + "CloseShard\x126.temporal.server.api.adminservice.v1.CloseShardRequest\x1a7.temporal.server.api.adminservice.v1.CloseShardResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x97\x01\n" + + "\x10ListHistoryTasks\x12<.temporal.server.api.adminservice.v1.ListHistoryTasksRequest\x1a=.temporal.server.api.adminservice.v1.ListHistoryTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x85\x01\n" + + "\n" + + "RemoveTask\x126.temporal.server.api.adminservice.v1.RemoveTaskRequest\x1a7.temporal.server.api.adminservice.v1.RemoveTaskResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xc7\x01\n" + + " GetWorkflowExecutionRawHistoryV2\x12L.temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request\x1aM.temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response\"\x06\x8a\xb5\x18\x02\b\x03\x12\xc1\x01\n" + + "\x1eGetWorkflowExecutionRawHistory\x12J.temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest\x1aK.temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa9\x01\n" + + "\x16GetReplicationMessages\x12B.temporal.server.api.adminservice.v1.GetReplicationMessagesRequest\x1aC.temporal.server.api.adminservice.v1.GetReplicationMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xc4\x01\n" + + "\x1fGetNamespaceReplicationMessages\x12K.temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesRequest\x1aL.temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xb2\x01\n" + + "\x19GetDLQReplicationMessages\x12E.temporal.server.api.adminservice.v1.GetDLQReplicationMessagesRequest\x1aF.temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x8e\x01\n" + + "\rReapplyEvents\x129.temporal.server.api.adminservice.v1.ReapplyEventsRequest\x1a:.temporal.server.api.adminservice.v1.ReapplyEventsResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa0\x01\n" + + "\x13AddSearchAttributes\x12?.temporal.server.api.adminservice.v1.AddSearchAttributesRequest\x1a@.temporal.server.api.adminservice.v1.AddSearchAttributesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa9\x01\n" + + "\x16RemoveSearchAttributes\x12B.temporal.server.api.adminservice.v1.RemoveSearchAttributesRequest\x1aC.temporal.server.api.adminservice.v1.RemoveSearchAttributesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa0\x01\n" + + "\x13GetSearchAttributes\x12?.temporal.server.api.adminservice.v1.GetSearchAttributesRequest\x1a@.temporal.server.api.adminservice.v1.GetSearchAttributesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x94\x01\n" + + "\x0fDescribeCluster\x12;.temporal.server.api.adminservice.v1.DescribeClusterRequest\x1a<.temporal.server.api.adminservice.v1.DescribeClusterResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x8b\x01\n" + + "\fListClusters\x128.temporal.server.api.adminservice.v1.ListClustersRequest\x1a9.temporal.server.api.adminservice.v1.ListClustersResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x9d\x01\n" + + "\x12ListClusterMembers\x12>.temporal.server.api.adminservice.v1.ListClusterMembersRequest\x1a?.temporal.server.api.adminservice.v1.ListClusterMembersResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xaf\x01\n" + + "\x18AddOrUpdateRemoteCluster\x12D.temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterRequest\x1aE.temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa0\x01\n" + + "\x13RemoveRemoteCluster\x12?.temporal.server.api.adminservice.v1.RemoveRemoteClusterRequest\x1a@.temporal.server.api.adminservice.v1.RemoveRemoteClusterResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x91\x01\n" + + "\x0eGetDLQMessages\x12:.temporal.server.api.adminservice.v1.GetDLQMessagesRequest\x1a;.temporal.server.api.adminservice.v1.GetDLQMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x97\x01\n" + + "\x10PurgeDLQMessages\x12<.temporal.server.api.adminservice.v1.PurgeDLQMessagesRequest\x1a=.temporal.server.api.adminservice.v1.PurgeDLQMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x97\x01\n" + + "\x10MergeDLQMessages\x12<.temporal.server.api.adminservice.v1.MergeDLQMessagesRequest\x1a=.temporal.server.api.adminservice.v1.MergeDLQMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa3\x01\n" + + "\x14RefreshWorkflowTasks\x12@.temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest\x1aA.temporal.server.api.adminservice.v1.RefreshWorkflowTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xaf\x01\n" + + "\x18StartAdminBatchOperation\x12D.temporal.server.api.adminservice.v1.StartAdminBatchOperationRequest\x1aE.temporal.server.api.adminservice.v1.StartAdminBatchOperationResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa9\x01\n" + + "\x16ResendReplicationTasks\x12B.temporal.server.api.adminservice.v1.ResendReplicationTasksRequest\x1aC.temporal.server.api.adminservice.v1.ResendReplicationTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x9a\x01\n" + + "\x11GetTaskQueueTasks\x12=.temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest\x1a>.temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xac\x01\n" + + "\x17DeleteWorkflowExecution\x12C.temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest\x1aD.temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xce\x01\n" + + "!StreamWorkflowReplicationMessages\x12M.temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest\x1aN.temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03(\x010\x01\x12\x8b\x01\n" + + "\fGetNamespace\x128.temporal.server.api.adminservice.v1.GetNamespaceRequest\x1a9.temporal.server.api.adminservice.v1.GetNamespaceResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x88\x01\n" + + "\vGetDLQTasks\x127.temporal.server.api.adminservice.v1.GetDLQTasksRequest\x1a8.temporal.server.api.adminservice.v1.GetDLQTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x8e\x01\n" + + "\rPurgeDLQTasks\x129.temporal.server.api.adminservice.v1.PurgeDLQTasksRequest\x1a:.temporal.server.api.adminservice.v1.PurgeDLQTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x8e\x01\n" + + "\rMergeDLQTasks\x129.temporal.server.api.adminservice.v1.MergeDLQTasksRequest\x1a:.temporal.server.api.adminservice.v1.MergeDLQTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x91\x01\n" + + "\x0eDescribeDLQJob\x12:.temporal.server.api.adminservice.v1.DescribeDLQJobRequest\x1a;.temporal.server.api.adminservice.v1.DescribeDLQJobResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x8b\x01\n" + + "\fCancelDLQJob\x128.temporal.server.api.adminservice.v1.CancelDLQJobRequest\x1a9.temporal.server.api.adminservice.v1.CancelDLQJobResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x7f\n" + + "\bAddTasks\x124.temporal.server.api.adminservice.v1.AddTasksRequest\x1a5.temporal.server.api.adminservice.v1.AddTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x85\x01\n" + + "\n" + + "ListQueues\x126.temporal.server.api.adminservice.v1.ListQueuesRequest\x1a7.temporal.server.api.adminservice.v1.ListQueuesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x94\x01\n" + + "\x0fDeepHealthCheck\x12;.temporal.server.api.adminservice.v1.DeepHealthCheckRequest\x1a<.temporal.server.api.adminservice.v1.DeepHealthCheckResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x9a\x01\n" + + "\x11SyncWorkflowState\x12=.temporal.server.api.adminservice.v1.SyncWorkflowStateRequest\x1a>.temporal.server.api.adminservice.v1.SyncWorkflowStateResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xd0\x01\n" + + "#GenerateLastHistoryReplicationTasks\x12O.temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksRequest\x1aP.temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xb5\x01\n" + + "\x1aDescribeTaskQueuePartition\x12F.temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionRequest\x1aG.temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xbe\x01\n" + + "\x1dForceUnloadTaskQueuePartition\x12I.temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionRequest\x1aJ.temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa3\x01\n" + + "\x14GetTaskQueueUserData\x12@.temporal.server.api.adminservice.v1.GetTaskQueueUserDataRequest\x1aA.temporal.server.api.adminservice.v1.GetTaskQueueUserDataResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x94\x01\n" + + "\x0fMigrateSchedule\x12;.temporal.server.api.adminservice.v1.MigrateScheduleRequest\x1a<.temporal.server.api.adminservice.v1.MigrateScheduleResponse\"\x06\x8a\xb5\x18\x02\b\x03B8Z6go.temporal.io/server/api/adminservice/v1;adminserviceb\x06proto3" -var file_temporal_server_api_adminservice_v1_service_proto_goTypes = []interface{}{ - (*RebuildMutableStateRequest)(nil), // 0: temporal.server.api.adminservice.v1.RebuildMutableStateRequest - (*ImportWorkflowExecutionRequest)(nil), // 1: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest - (*DescribeMutableStateRequest)(nil), // 2: temporal.server.api.adminservice.v1.DescribeMutableStateRequest - (*DescribeHistoryHostRequest)(nil), // 3: temporal.server.api.adminservice.v1.DescribeHistoryHostRequest - (*GetShardRequest)(nil), // 4: temporal.server.api.adminservice.v1.GetShardRequest - (*CloseShardRequest)(nil), // 5: temporal.server.api.adminservice.v1.CloseShardRequest - (*ListHistoryTasksRequest)(nil), // 6: temporal.server.api.adminservice.v1.ListHistoryTasksRequest - (*RemoveTaskRequest)(nil), // 7: temporal.server.api.adminservice.v1.RemoveTaskRequest - (*GetWorkflowExecutionRawHistoryV2Request)(nil), // 8: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request - (*GetWorkflowExecutionRawHistoryRequest)(nil), // 9: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest - (*GetReplicationMessagesRequest)(nil), // 10: temporal.server.api.adminservice.v1.GetReplicationMessagesRequest - (*GetNamespaceReplicationMessagesRequest)(nil), // 11: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesRequest - (*GetDLQReplicationMessagesRequest)(nil), // 12: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesRequest - (*ReapplyEventsRequest)(nil), // 13: temporal.server.api.adminservice.v1.ReapplyEventsRequest - (*AddSearchAttributesRequest)(nil), // 14: temporal.server.api.adminservice.v1.AddSearchAttributesRequest - (*RemoveSearchAttributesRequest)(nil), // 15: temporal.server.api.adminservice.v1.RemoveSearchAttributesRequest - (*GetSearchAttributesRequest)(nil), // 16: temporal.server.api.adminservice.v1.GetSearchAttributesRequest - (*DescribeClusterRequest)(nil), // 17: temporal.server.api.adminservice.v1.DescribeClusterRequest - (*ListClustersRequest)(nil), // 18: temporal.server.api.adminservice.v1.ListClustersRequest - (*ListClusterMembersRequest)(nil), // 19: temporal.server.api.adminservice.v1.ListClusterMembersRequest - (*AddOrUpdateRemoteClusterRequest)(nil), // 20: temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterRequest - (*RemoveRemoteClusterRequest)(nil), // 21: temporal.server.api.adminservice.v1.RemoveRemoteClusterRequest - (*GetDLQMessagesRequest)(nil), // 22: temporal.server.api.adminservice.v1.GetDLQMessagesRequest - (*PurgeDLQMessagesRequest)(nil), // 23: temporal.server.api.adminservice.v1.PurgeDLQMessagesRequest - (*MergeDLQMessagesRequest)(nil), // 24: temporal.server.api.adminservice.v1.MergeDLQMessagesRequest - (*RefreshWorkflowTasksRequest)(nil), // 25: temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest - (*ResendReplicationTasksRequest)(nil), // 26: temporal.server.api.adminservice.v1.ResendReplicationTasksRequest - (*GetTaskQueueTasksRequest)(nil), // 27: temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest - (*DeleteWorkflowExecutionRequest)(nil), // 28: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest - (*StreamWorkflowReplicationMessagesRequest)(nil), // 29: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest - (*GetNamespaceRequest)(nil), // 30: temporal.server.api.adminservice.v1.GetNamespaceRequest - (*GetDLQTasksRequest)(nil), // 31: temporal.server.api.adminservice.v1.GetDLQTasksRequest - (*PurgeDLQTasksRequest)(nil), // 32: temporal.server.api.adminservice.v1.PurgeDLQTasksRequest - (*MergeDLQTasksRequest)(nil), // 33: temporal.server.api.adminservice.v1.MergeDLQTasksRequest - (*DescribeDLQJobRequest)(nil), // 34: temporal.server.api.adminservice.v1.DescribeDLQJobRequest - (*CancelDLQJobRequest)(nil), // 35: temporal.server.api.adminservice.v1.CancelDLQJobRequest - (*AddTasksRequest)(nil), // 36: temporal.server.api.adminservice.v1.AddTasksRequest - (*ListQueuesRequest)(nil), // 37: temporal.server.api.adminservice.v1.ListQueuesRequest - (*RebuildMutableStateResponse)(nil), // 38: temporal.server.api.adminservice.v1.RebuildMutableStateResponse - (*ImportWorkflowExecutionResponse)(nil), // 39: temporal.server.api.adminservice.v1.ImportWorkflowExecutionResponse - (*DescribeMutableStateResponse)(nil), // 40: temporal.server.api.adminservice.v1.DescribeMutableStateResponse - (*DescribeHistoryHostResponse)(nil), // 41: temporal.server.api.adminservice.v1.DescribeHistoryHostResponse - (*GetShardResponse)(nil), // 42: temporal.server.api.adminservice.v1.GetShardResponse - (*CloseShardResponse)(nil), // 43: temporal.server.api.adminservice.v1.CloseShardResponse - (*ListHistoryTasksResponse)(nil), // 44: temporal.server.api.adminservice.v1.ListHistoryTasksResponse - (*RemoveTaskResponse)(nil), // 45: temporal.server.api.adminservice.v1.RemoveTaskResponse - (*GetWorkflowExecutionRawHistoryV2Response)(nil), // 46: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response - (*GetWorkflowExecutionRawHistoryResponse)(nil), // 47: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse - (*GetReplicationMessagesResponse)(nil), // 48: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse - (*GetNamespaceReplicationMessagesResponse)(nil), // 49: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse - (*GetDLQReplicationMessagesResponse)(nil), // 50: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse - (*ReapplyEventsResponse)(nil), // 51: temporal.server.api.adminservice.v1.ReapplyEventsResponse - (*AddSearchAttributesResponse)(nil), // 52: temporal.server.api.adminservice.v1.AddSearchAttributesResponse - (*RemoveSearchAttributesResponse)(nil), // 53: temporal.server.api.adminservice.v1.RemoveSearchAttributesResponse - (*GetSearchAttributesResponse)(nil), // 54: temporal.server.api.adminservice.v1.GetSearchAttributesResponse - (*DescribeClusterResponse)(nil), // 55: temporal.server.api.adminservice.v1.DescribeClusterResponse - (*ListClustersResponse)(nil), // 56: temporal.server.api.adminservice.v1.ListClustersResponse - (*ListClusterMembersResponse)(nil), // 57: temporal.server.api.adminservice.v1.ListClusterMembersResponse - (*AddOrUpdateRemoteClusterResponse)(nil), // 58: temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterResponse - (*RemoveRemoteClusterResponse)(nil), // 59: temporal.server.api.adminservice.v1.RemoveRemoteClusterResponse - (*GetDLQMessagesResponse)(nil), // 60: temporal.server.api.adminservice.v1.GetDLQMessagesResponse - (*PurgeDLQMessagesResponse)(nil), // 61: temporal.server.api.adminservice.v1.PurgeDLQMessagesResponse - (*MergeDLQMessagesResponse)(nil), // 62: temporal.server.api.adminservice.v1.MergeDLQMessagesResponse - (*RefreshWorkflowTasksResponse)(nil), // 63: temporal.server.api.adminservice.v1.RefreshWorkflowTasksResponse - (*ResendReplicationTasksResponse)(nil), // 64: temporal.server.api.adminservice.v1.ResendReplicationTasksResponse - (*GetTaskQueueTasksResponse)(nil), // 65: temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse - (*DeleteWorkflowExecutionResponse)(nil), // 66: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse - (*StreamWorkflowReplicationMessagesResponse)(nil), // 67: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse - (*GetNamespaceResponse)(nil), // 68: temporal.server.api.adminservice.v1.GetNamespaceResponse - (*GetDLQTasksResponse)(nil), // 69: temporal.server.api.adminservice.v1.GetDLQTasksResponse - (*PurgeDLQTasksResponse)(nil), // 70: temporal.server.api.adminservice.v1.PurgeDLQTasksResponse - (*MergeDLQTasksResponse)(nil), // 71: temporal.server.api.adminservice.v1.MergeDLQTasksResponse - (*DescribeDLQJobResponse)(nil), // 72: temporal.server.api.adminservice.v1.DescribeDLQJobResponse - (*CancelDLQJobResponse)(nil), // 73: temporal.server.api.adminservice.v1.CancelDLQJobResponse - (*AddTasksResponse)(nil), // 74: temporal.server.api.adminservice.v1.AddTasksResponse - (*ListQueuesResponse)(nil), // 75: temporal.server.api.adminservice.v1.ListQueuesResponse +var file_temporal_server_api_adminservice_v1_service_proto_goTypes = []any{ + (*RebuildMutableStateRequest)(nil), // 0: temporal.server.api.adminservice.v1.RebuildMutableStateRequest + (*ImportWorkflowExecutionRequest)(nil), // 1: temporal.server.api.adminservice.v1.ImportWorkflowExecutionRequest + (*DescribeMutableStateRequest)(nil), // 2: temporal.server.api.adminservice.v1.DescribeMutableStateRequest + (*DescribeHistoryHostRequest)(nil), // 3: temporal.server.api.adminservice.v1.DescribeHistoryHostRequest + (*GetShardRequest)(nil), // 4: temporal.server.api.adminservice.v1.GetShardRequest + (*CloseShardRequest)(nil), // 5: temporal.server.api.adminservice.v1.CloseShardRequest + (*ListHistoryTasksRequest)(nil), // 6: temporal.server.api.adminservice.v1.ListHistoryTasksRequest + (*RemoveTaskRequest)(nil), // 7: temporal.server.api.adminservice.v1.RemoveTaskRequest + (*GetWorkflowExecutionRawHistoryV2Request)(nil), // 8: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request + (*GetWorkflowExecutionRawHistoryRequest)(nil), // 9: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest + (*GetReplicationMessagesRequest)(nil), // 10: temporal.server.api.adminservice.v1.GetReplicationMessagesRequest + (*GetNamespaceReplicationMessagesRequest)(nil), // 11: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesRequest + (*GetDLQReplicationMessagesRequest)(nil), // 12: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesRequest + (*ReapplyEventsRequest)(nil), // 13: temporal.server.api.adminservice.v1.ReapplyEventsRequest + (*AddSearchAttributesRequest)(nil), // 14: temporal.server.api.adminservice.v1.AddSearchAttributesRequest + (*RemoveSearchAttributesRequest)(nil), // 15: temporal.server.api.adminservice.v1.RemoveSearchAttributesRequest + (*GetSearchAttributesRequest)(nil), // 16: temporal.server.api.adminservice.v1.GetSearchAttributesRequest + (*DescribeClusterRequest)(nil), // 17: temporal.server.api.adminservice.v1.DescribeClusterRequest + (*ListClustersRequest)(nil), // 18: temporal.server.api.adminservice.v1.ListClustersRequest + (*ListClusterMembersRequest)(nil), // 19: temporal.server.api.adminservice.v1.ListClusterMembersRequest + (*AddOrUpdateRemoteClusterRequest)(nil), // 20: temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterRequest + (*RemoveRemoteClusterRequest)(nil), // 21: temporal.server.api.adminservice.v1.RemoveRemoteClusterRequest + (*GetDLQMessagesRequest)(nil), // 22: temporal.server.api.adminservice.v1.GetDLQMessagesRequest + (*PurgeDLQMessagesRequest)(nil), // 23: temporal.server.api.adminservice.v1.PurgeDLQMessagesRequest + (*MergeDLQMessagesRequest)(nil), // 24: temporal.server.api.adminservice.v1.MergeDLQMessagesRequest + (*RefreshWorkflowTasksRequest)(nil), // 25: temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest + (*StartAdminBatchOperationRequest)(nil), // 26: temporal.server.api.adminservice.v1.StartAdminBatchOperationRequest + (*ResendReplicationTasksRequest)(nil), // 27: temporal.server.api.adminservice.v1.ResendReplicationTasksRequest + (*GetTaskQueueTasksRequest)(nil), // 28: temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest + (*DeleteWorkflowExecutionRequest)(nil), // 29: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest + (*StreamWorkflowReplicationMessagesRequest)(nil), // 30: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest + (*GetNamespaceRequest)(nil), // 31: temporal.server.api.adminservice.v1.GetNamespaceRequest + (*GetDLQTasksRequest)(nil), // 32: temporal.server.api.adminservice.v1.GetDLQTasksRequest + (*PurgeDLQTasksRequest)(nil), // 33: temporal.server.api.adminservice.v1.PurgeDLQTasksRequest + (*MergeDLQTasksRequest)(nil), // 34: temporal.server.api.adminservice.v1.MergeDLQTasksRequest + (*DescribeDLQJobRequest)(nil), // 35: temporal.server.api.adminservice.v1.DescribeDLQJobRequest + (*CancelDLQJobRequest)(nil), // 36: temporal.server.api.adminservice.v1.CancelDLQJobRequest + (*AddTasksRequest)(nil), // 37: temporal.server.api.adminservice.v1.AddTasksRequest + (*ListQueuesRequest)(nil), // 38: temporal.server.api.adminservice.v1.ListQueuesRequest + (*DeepHealthCheckRequest)(nil), // 39: temporal.server.api.adminservice.v1.DeepHealthCheckRequest + (*SyncWorkflowStateRequest)(nil), // 40: temporal.server.api.adminservice.v1.SyncWorkflowStateRequest + (*GenerateLastHistoryReplicationTasksRequest)(nil), // 41: temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksRequest + (*DescribeTaskQueuePartitionRequest)(nil), // 42: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionRequest + (*ForceUnloadTaskQueuePartitionRequest)(nil), // 43: temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionRequest + (*GetTaskQueueUserDataRequest)(nil), // 44: temporal.server.api.adminservice.v1.GetTaskQueueUserDataRequest + (*MigrateScheduleRequest)(nil), // 45: temporal.server.api.adminservice.v1.MigrateScheduleRequest + (*RebuildMutableStateResponse)(nil), // 46: temporal.server.api.adminservice.v1.RebuildMutableStateResponse + (*ImportWorkflowExecutionResponse)(nil), // 47: temporal.server.api.adminservice.v1.ImportWorkflowExecutionResponse + (*DescribeMutableStateResponse)(nil), // 48: temporal.server.api.adminservice.v1.DescribeMutableStateResponse + (*DescribeHistoryHostResponse)(nil), // 49: temporal.server.api.adminservice.v1.DescribeHistoryHostResponse + (*GetShardResponse)(nil), // 50: temporal.server.api.adminservice.v1.GetShardResponse + (*CloseShardResponse)(nil), // 51: temporal.server.api.adminservice.v1.CloseShardResponse + (*ListHistoryTasksResponse)(nil), // 52: temporal.server.api.adminservice.v1.ListHistoryTasksResponse + (*RemoveTaskResponse)(nil), // 53: temporal.server.api.adminservice.v1.RemoveTaskResponse + (*GetWorkflowExecutionRawHistoryV2Response)(nil), // 54: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response + (*GetWorkflowExecutionRawHistoryResponse)(nil), // 55: temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse + (*GetReplicationMessagesResponse)(nil), // 56: temporal.server.api.adminservice.v1.GetReplicationMessagesResponse + (*GetNamespaceReplicationMessagesResponse)(nil), // 57: temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse + (*GetDLQReplicationMessagesResponse)(nil), // 58: temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse + (*ReapplyEventsResponse)(nil), // 59: temporal.server.api.adminservice.v1.ReapplyEventsResponse + (*AddSearchAttributesResponse)(nil), // 60: temporal.server.api.adminservice.v1.AddSearchAttributesResponse + (*RemoveSearchAttributesResponse)(nil), // 61: temporal.server.api.adminservice.v1.RemoveSearchAttributesResponse + (*GetSearchAttributesResponse)(nil), // 62: temporal.server.api.adminservice.v1.GetSearchAttributesResponse + (*DescribeClusterResponse)(nil), // 63: temporal.server.api.adminservice.v1.DescribeClusterResponse + (*ListClustersResponse)(nil), // 64: temporal.server.api.adminservice.v1.ListClustersResponse + (*ListClusterMembersResponse)(nil), // 65: temporal.server.api.adminservice.v1.ListClusterMembersResponse + (*AddOrUpdateRemoteClusterResponse)(nil), // 66: temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterResponse + (*RemoveRemoteClusterResponse)(nil), // 67: temporal.server.api.adminservice.v1.RemoveRemoteClusterResponse + (*GetDLQMessagesResponse)(nil), // 68: temporal.server.api.adminservice.v1.GetDLQMessagesResponse + (*PurgeDLQMessagesResponse)(nil), // 69: temporal.server.api.adminservice.v1.PurgeDLQMessagesResponse + (*MergeDLQMessagesResponse)(nil), // 70: temporal.server.api.adminservice.v1.MergeDLQMessagesResponse + (*RefreshWorkflowTasksResponse)(nil), // 71: temporal.server.api.adminservice.v1.RefreshWorkflowTasksResponse + (*StartAdminBatchOperationResponse)(nil), // 72: temporal.server.api.adminservice.v1.StartAdminBatchOperationResponse + (*ResendReplicationTasksResponse)(nil), // 73: temporal.server.api.adminservice.v1.ResendReplicationTasksResponse + (*GetTaskQueueTasksResponse)(nil), // 74: temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse + (*DeleteWorkflowExecutionResponse)(nil), // 75: temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse + (*StreamWorkflowReplicationMessagesResponse)(nil), // 76: temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse + (*GetNamespaceResponse)(nil), // 77: temporal.server.api.adminservice.v1.GetNamespaceResponse + (*GetDLQTasksResponse)(nil), // 78: temporal.server.api.adminservice.v1.GetDLQTasksResponse + (*PurgeDLQTasksResponse)(nil), // 79: temporal.server.api.adminservice.v1.PurgeDLQTasksResponse + (*MergeDLQTasksResponse)(nil), // 80: temporal.server.api.adminservice.v1.MergeDLQTasksResponse + (*DescribeDLQJobResponse)(nil), // 81: temporal.server.api.adminservice.v1.DescribeDLQJobResponse + (*CancelDLQJobResponse)(nil), // 82: temporal.server.api.adminservice.v1.CancelDLQJobResponse + (*AddTasksResponse)(nil), // 83: temporal.server.api.adminservice.v1.AddTasksResponse + (*ListQueuesResponse)(nil), // 84: temporal.server.api.adminservice.v1.ListQueuesResponse + (*DeepHealthCheckResponse)(nil), // 85: temporal.server.api.adminservice.v1.DeepHealthCheckResponse + (*SyncWorkflowStateResponse)(nil), // 86: temporal.server.api.adminservice.v1.SyncWorkflowStateResponse + (*GenerateLastHistoryReplicationTasksResponse)(nil), // 87: temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksResponse + (*DescribeTaskQueuePartitionResponse)(nil), // 88: temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse + (*ForceUnloadTaskQueuePartitionResponse)(nil), // 89: temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionResponse + (*GetTaskQueueUserDataResponse)(nil), // 90: temporal.server.api.adminservice.v1.GetTaskQueueUserDataResponse + (*MigrateScheduleResponse)(nil), // 91: temporal.server.api.adminservice.v1.MigrateScheduleResponse } var file_temporal_server_api_adminservice_v1_service_proto_depIdxs = []int32{ 0, // 0: temporal.server.api.adminservice.v1.AdminService.RebuildMutableState:input_type -> temporal.server.api.adminservice.v1.RebuildMutableStateRequest @@ -532,58 +199,74 @@ var file_temporal_server_api_adminservice_v1_service_proto_depIdxs = []int32{ 23, // 23: temporal.server.api.adminservice.v1.AdminService.PurgeDLQMessages:input_type -> temporal.server.api.adminservice.v1.PurgeDLQMessagesRequest 24, // 24: temporal.server.api.adminservice.v1.AdminService.MergeDLQMessages:input_type -> temporal.server.api.adminservice.v1.MergeDLQMessagesRequest 25, // 25: temporal.server.api.adminservice.v1.AdminService.RefreshWorkflowTasks:input_type -> temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest - 26, // 26: temporal.server.api.adminservice.v1.AdminService.ResendReplicationTasks:input_type -> temporal.server.api.adminservice.v1.ResendReplicationTasksRequest - 27, // 27: temporal.server.api.adminservice.v1.AdminService.GetTaskQueueTasks:input_type -> temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest - 28, // 28: temporal.server.api.adminservice.v1.AdminService.DeleteWorkflowExecution:input_type -> temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest - 29, // 29: temporal.server.api.adminservice.v1.AdminService.StreamWorkflowReplicationMessages:input_type -> temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest - 30, // 30: temporal.server.api.adminservice.v1.AdminService.GetNamespace:input_type -> temporal.server.api.adminservice.v1.GetNamespaceRequest - 31, // 31: temporal.server.api.adminservice.v1.AdminService.GetDLQTasks:input_type -> temporal.server.api.adminservice.v1.GetDLQTasksRequest - 32, // 32: temporal.server.api.adminservice.v1.AdminService.PurgeDLQTasks:input_type -> temporal.server.api.adminservice.v1.PurgeDLQTasksRequest - 33, // 33: temporal.server.api.adminservice.v1.AdminService.MergeDLQTasks:input_type -> temporal.server.api.adminservice.v1.MergeDLQTasksRequest - 34, // 34: temporal.server.api.adminservice.v1.AdminService.DescribeDLQJob:input_type -> temporal.server.api.adminservice.v1.DescribeDLQJobRequest - 35, // 35: temporal.server.api.adminservice.v1.AdminService.CancelDLQJob:input_type -> temporal.server.api.adminservice.v1.CancelDLQJobRequest - 36, // 36: temporal.server.api.adminservice.v1.AdminService.AddTasks:input_type -> temporal.server.api.adminservice.v1.AddTasksRequest - 37, // 37: temporal.server.api.adminservice.v1.AdminService.ListQueues:input_type -> temporal.server.api.adminservice.v1.ListQueuesRequest - 38, // 38: temporal.server.api.adminservice.v1.AdminService.RebuildMutableState:output_type -> temporal.server.api.adminservice.v1.RebuildMutableStateResponse - 39, // 39: temporal.server.api.adminservice.v1.AdminService.ImportWorkflowExecution:output_type -> temporal.server.api.adminservice.v1.ImportWorkflowExecutionResponse - 40, // 40: temporal.server.api.adminservice.v1.AdminService.DescribeMutableState:output_type -> temporal.server.api.adminservice.v1.DescribeMutableStateResponse - 41, // 41: temporal.server.api.adminservice.v1.AdminService.DescribeHistoryHost:output_type -> temporal.server.api.adminservice.v1.DescribeHistoryHostResponse - 42, // 42: temporal.server.api.adminservice.v1.AdminService.GetShard:output_type -> temporal.server.api.adminservice.v1.GetShardResponse - 43, // 43: temporal.server.api.adminservice.v1.AdminService.CloseShard:output_type -> temporal.server.api.adminservice.v1.CloseShardResponse - 44, // 44: temporal.server.api.adminservice.v1.AdminService.ListHistoryTasks:output_type -> temporal.server.api.adminservice.v1.ListHistoryTasksResponse - 45, // 45: temporal.server.api.adminservice.v1.AdminService.RemoveTask:output_type -> temporal.server.api.adminservice.v1.RemoveTaskResponse - 46, // 46: temporal.server.api.adminservice.v1.AdminService.GetWorkflowExecutionRawHistoryV2:output_type -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response - 47, // 47: temporal.server.api.adminservice.v1.AdminService.GetWorkflowExecutionRawHistory:output_type -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse - 48, // 48: temporal.server.api.adminservice.v1.AdminService.GetReplicationMessages:output_type -> temporal.server.api.adminservice.v1.GetReplicationMessagesResponse - 49, // 49: temporal.server.api.adminservice.v1.AdminService.GetNamespaceReplicationMessages:output_type -> temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse - 50, // 50: temporal.server.api.adminservice.v1.AdminService.GetDLQReplicationMessages:output_type -> temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse - 51, // 51: temporal.server.api.adminservice.v1.AdminService.ReapplyEvents:output_type -> temporal.server.api.adminservice.v1.ReapplyEventsResponse - 52, // 52: temporal.server.api.adminservice.v1.AdminService.AddSearchAttributes:output_type -> temporal.server.api.adminservice.v1.AddSearchAttributesResponse - 53, // 53: temporal.server.api.adminservice.v1.AdminService.RemoveSearchAttributes:output_type -> temporal.server.api.adminservice.v1.RemoveSearchAttributesResponse - 54, // 54: temporal.server.api.adminservice.v1.AdminService.GetSearchAttributes:output_type -> temporal.server.api.adminservice.v1.GetSearchAttributesResponse - 55, // 55: temporal.server.api.adminservice.v1.AdminService.DescribeCluster:output_type -> temporal.server.api.adminservice.v1.DescribeClusterResponse - 56, // 56: temporal.server.api.adminservice.v1.AdminService.ListClusters:output_type -> temporal.server.api.adminservice.v1.ListClustersResponse - 57, // 57: temporal.server.api.adminservice.v1.AdminService.ListClusterMembers:output_type -> temporal.server.api.adminservice.v1.ListClusterMembersResponse - 58, // 58: temporal.server.api.adminservice.v1.AdminService.AddOrUpdateRemoteCluster:output_type -> temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterResponse - 59, // 59: temporal.server.api.adminservice.v1.AdminService.RemoveRemoteCluster:output_type -> temporal.server.api.adminservice.v1.RemoveRemoteClusterResponse - 60, // 60: temporal.server.api.adminservice.v1.AdminService.GetDLQMessages:output_type -> temporal.server.api.adminservice.v1.GetDLQMessagesResponse - 61, // 61: temporal.server.api.adminservice.v1.AdminService.PurgeDLQMessages:output_type -> temporal.server.api.adminservice.v1.PurgeDLQMessagesResponse - 62, // 62: temporal.server.api.adminservice.v1.AdminService.MergeDLQMessages:output_type -> temporal.server.api.adminservice.v1.MergeDLQMessagesResponse - 63, // 63: temporal.server.api.adminservice.v1.AdminService.RefreshWorkflowTasks:output_type -> temporal.server.api.adminservice.v1.RefreshWorkflowTasksResponse - 64, // 64: temporal.server.api.adminservice.v1.AdminService.ResendReplicationTasks:output_type -> temporal.server.api.adminservice.v1.ResendReplicationTasksResponse - 65, // 65: temporal.server.api.adminservice.v1.AdminService.GetTaskQueueTasks:output_type -> temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse - 66, // 66: temporal.server.api.adminservice.v1.AdminService.DeleteWorkflowExecution:output_type -> temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse - 67, // 67: temporal.server.api.adminservice.v1.AdminService.StreamWorkflowReplicationMessages:output_type -> temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse - 68, // 68: temporal.server.api.adminservice.v1.AdminService.GetNamespace:output_type -> temporal.server.api.adminservice.v1.GetNamespaceResponse - 69, // 69: temporal.server.api.adminservice.v1.AdminService.GetDLQTasks:output_type -> temporal.server.api.adminservice.v1.GetDLQTasksResponse - 70, // 70: temporal.server.api.adminservice.v1.AdminService.PurgeDLQTasks:output_type -> temporal.server.api.adminservice.v1.PurgeDLQTasksResponse - 71, // 71: temporal.server.api.adminservice.v1.AdminService.MergeDLQTasks:output_type -> temporal.server.api.adminservice.v1.MergeDLQTasksResponse - 72, // 72: temporal.server.api.adminservice.v1.AdminService.DescribeDLQJob:output_type -> temporal.server.api.adminservice.v1.DescribeDLQJobResponse - 73, // 73: temporal.server.api.adminservice.v1.AdminService.CancelDLQJob:output_type -> temporal.server.api.adminservice.v1.CancelDLQJobResponse - 74, // 74: temporal.server.api.adminservice.v1.AdminService.AddTasks:output_type -> temporal.server.api.adminservice.v1.AddTasksResponse - 75, // 75: temporal.server.api.adminservice.v1.AdminService.ListQueues:output_type -> temporal.server.api.adminservice.v1.ListQueuesResponse - 38, // [38:76] is the sub-list for method output_type - 0, // [0:38] is the sub-list for method input_type + 26, // 26: temporal.server.api.adminservice.v1.AdminService.StartAdminBatchOperation:input_type -> temporal.server.api.adminservice.v1.StartAdminBatchOperationRequest + 27, // 27: temporal.server.api.adminservice.v1.AdminService.ResendReplicationTasks:input_type -> temporal.server.api.adminservice.v1.ResendReplicationTasksRequest + 28, // 28: temporal.server.api.adminservice.v1.AdminService.GetTaskQueueTasks:input_type -> temporal.server.api.adminservice.v1.GetTaskQueueTasksRequest + 29, // 29: temporal.server.api.adminservice.v1.AdminService.DeleteWorkflowExecution:input_type -> temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest + 30, // 30: temporal.server.api.adminservice.v1.AdminService.StreamWorkflowReplicationMessages:input_type -> temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesRequest + 31, // 31: temporal.server.api.adminservice.v1.AdminService.GetNamespace:input_type -> temporal.server.api.adminservice.v1.GetNamespaceRequest + 32, // 32: temporal.server.api.adminservice.v1.AdminService.GetDLQTasks:input_type -> temporal.server.api.adminservice.v1.GetDLQTasksRequest + 33, // 33: temporal.server.api.adminservice.v1.AdminService.PurgeDLQTasks:input_type -> temporal.server.api.adminservice.v1.PurgeDLQTasksRequest + 34, // 34: temporal.server.api.adminservice.v1.AdminService.MergeDLQTasks:input_type -> temporal.server.api.adminservice.v1.MergeDLQTasksRequest + 35, // 35: temporal.server.api.adminservice.v1.AdminService.DescribeDLQJob:input_type -> temporal.server.api.adminservice.v1.DescribeDLQJobRequest + 36, // 36: temporal.server.api.adminservice.v1.AdminService.CancelDLQJob:input_type -> temporal.server.api.adminservice.v1.CancelDLQJobRequest + 37, // 37: temporal.server.api.adminservice.v1.AdminService.AddTasks:input_type -> temporal.server.api.adminservice.v1.AddTasksRequest + 38, // 38: temporal.server.api.adminservice.v1.AdminService.ListQueues:input_type -> temporal.server.api.adminservice.v1.ListQueuesRequest + 39, // 39: temporal.server.api.adminservice.v1.AdminService.DeepHealthCheck:input_type -> temporal.server.api.adminservice.v1.DeepHealthCheckRequest + 40, // 40: temporal.server.api.adminservice.v1.AdminService.SyncWorkflowState:input_type -> temporal.server.api.adminservice.v1.SyncWorkflowStateRequest + 41, // 41: temporal.server.api.adminservice.v1.AdminService.GenerateLastHistoryReplicationTasks:input_type -> temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksRequest + 42, // 42: temporal.server.api.adminservice.v1.AdminService.DescribeTaskQueuePartition:input_type -> temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionRequest + 43, // 43: temporal.server.api.adminservice.v1.AdminService.ForceUnloadTaskQueuePartition:input_type -> temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionRequest + 44, // 44: temporal.server.api.adminservice.v1.AdminService.GetTaskQueueUserData:input_type -> temporal.server.api.adminservice.v1.GetTaskQueueUserDataRequest + 45, // 45: temporal.server.api.adminservice.v1.AdminService.MigrateSchedule:input_type -> temporal.server.api.adminservice.v1.MigrateScheduleRequest + 46, // 46: temporal.server.api.adminservice.v1.AdminService.RebuildMutableState:output_type -> temporal.server.api.adminservice.v1.RebuildMutableStateResponse + 47, // 47: temporal.server.api.adminservice.v1.AdminService.ImportWorkflowExecution:output_type -> temporal.server.api.adminservice.v1.ImportWorkflowExecutionResponse + 48, // 48: temporal.server.api.adminservice.v1.AdminService.DescribeMutableState:output_type -> temporal.server.api.adminservice.v1.DescribeMutableStateResponse + 49, // 49: temporal.server.api.adminservice.v1.AdminService.DescribeHistoryHost:output_type -> temporal.server.api.adminservice.v1.DescribeHistoryHostResponse + 50, // 50: temporal.server.api.adminservice.v1.AdminService.GetShard:output_type -> temporal.server.api.adminservice.v1.GetShardResponse + 51, // 51: temporal.server.api.adminservice.v1.AdminService.CloseShard:output_type -> temporal.server.api.adminservice.v1.CloseShardResponse + 52, // 52: temporal.server.api.adminservice.v1.AdminService.ListHistoryTasks:output_type -> temporal.server.api.adminservice.v1.ListHistoryTasksResponse + 53, // 53: temporal.server.api.adminservice.v1.AdminService.RemoveTask:output_type -> temporal.server.api.adminservice.v1.RemoveTaskResponse + 54, // 54: temporal.server.api.adminservice.v1.AdminService.GetWorkflowExecutionRawHistoryV2:output_type -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response + 55, // 55: temporal.server.api.adminservice.v1.AdminService.GetWorkflowExecutionRawHistory:output_type -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse + 56, // 56: temporal.server.api.adminservice.v1.AdminService.GetReplicationMessages:output_type -> temporal.server.api.adminservice.v1.GetReplicationMessagesResponse + 57, // 57: temporal.server.api.adminservice.v1.AdminService.GetNamespaceReplicationMessages:output_type -> temporal.server.api.adminservice.v1.GetNamespaceReplicationMessagesResponse + 58, // 58: temporal.server.api.adminservice.v1.AdminService.GetDLQReplicationMessages:output_type -> temporal.server.api.adminservice.v1.GetDLQReplicationMessagesResponse + 59, // 59: temporal.server.api.adminservice.v1.AdminService.ReapplyEvents:output_type -> temporal.server.api.adminservice.v1.ReapplyEventsResponse + 60, // 60: temporal.server.api.adminservice.v1.AdminService.AddSearchAttributes:output_type -> temporal.server.api.adminservice.v1.AddSearchAttributesResponse + 61, // 61: temporal.server.api.adminservice.v1.AdminService.RemoveSearchAttributes:output_type -> temporal.server.api.adminservice.v1.RemoveSearchAttributesResponse + 62, // 62: temporal.server.api.adminservice.v1.AdminService.GetSearchAttributes:output_type -> temporal.server.api.adminservice.v1.GetSearchAttributesResponse + 63, // 63: temporal.server.api.adminservice.v1.AdminService.DescribeCluster:output_type -> temporal.server.api.adminservice.v1.DescribeClusterResponse + 64, // 64: temporal.server.api.adminservice.v1.AdminService.ListClusters:output_type -> temporal.server.api.adminservice.v1.ListClustersResponse + 65, // 65: temporal.server.api.adminservice.v1.AdminService.ListClusterMembers:output_type -> temporal.server.api.adminservice.v1.ListClusterMembersResponse + 66, // 66: temporal.server.api.adminservice.v1.AdminService.AddOrUpdateRemoteCluster:output_type -> temporal.server.api.adminservice.v1.AddOrUpdateRemoteClusterResponse + 67, // 67: temporal.server.api.adminservice.v1.AdminService.RemoveRemoteCluster:output_type -> temporal.server.api.adminservice.v1.RemoveRemoteClusterResponse + 68, // 68: temporal.server.api.adminservice.v1.AdminService.GetDLQMessages:output_type -> temporal.server.api.adminservice.v1.GetDLQMessagesResponse + 69, // 69: temporal.server.api.adminservice.v1.AdminService.PurgeDLQMessages:output_type -> temporal.server.api.adminservice.v1.PurgeDLQMessagesResponse + 70, // 70: temporal.server.api.adminservice.v1.AdminService.MergeDLQMessages:output_type -> temporal.server.api.adminservice.v1.MergeDLQMessagesResponse + 71, // 71: temporal.server.api.adminservice.v1.AdminService.RefreshWorkflowTasks:output_type -> temporal.server.api.adminservice.v1.RefreshWorkflowTasksResponse + 72, // 72: temporal.server.api.adminservice.v1.AdminService.StartAdminBatchOperation:output_type -> temporal.server.api.adminservice.v1.StartAdminBatchOperationResponse + 73, // 73: temporal.server.api.adminservice.v1.AdminService.ResendReplicationTasks:output_type -> temporal.server.api.adminservice.v1.ResendReplicationTasksResponse + 74, // 74: temporal.server.api.adminservice.v1.AdminService.GetTaskQueueTasks:output_type -> temporal.server.api.adminservice.v1.GetTaskQueueTasksResponse + 75, // 75: temporal.server.api.adminservice.v1.AdminService.DeleteWorkflowExecution:output_type -> temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse + 76, // 76: temporal.server.api.adminservice.v1.AdminService.StreamWorkflowReplicationMessages:output_type -> temporal.server.api.adminservice.v1.StreamWorkflowReplicationMessagesResponse + 77, // 77: temporal.server.api.adminservice.v1.AdminService.GetNamespace:output_type -> temporal.server.api.adminservice.v1.GetNamespaceResponse + 78, // 78: temporal.server.api.adminservice.v1.AdminService.GetDLQTasks:output_type -> temporal.server.api.adminservice.v1.GetDLQTasksResponse + 79, // 79: temporal.server.api.adminservice.v1.AdminService.PurgeDLQTasks:output_type -> temporal.server.api.adminservice.v1.PurgeDLQTasksResponse + 80, // 80: temporal.server.api.adminservice.v1.AdminService.MergeDLQTasks:output_type -> temporal.server.api.adminservice.v1.MergeDLQTasksResponse + 81, // 81: temporal.server.api.adminservice.v1.AdminService.DescribeDLQJob:output_type -> temporal.server.api.adminservice.v1.DescribeDLQJobResponse + 82, // 82: temporal.server.api.adminservice.v1.AdminService.CancelDLQJob:output_type -> temporal.server.api.adminservice.v1.CancelDLQJobResponse + 83, // 83: temporal.server.api.adminservice.v1.AdminService.AddTasks:output_type -> temporal.server.api.adminservice.v1.AddTasksResponse + 84, // 84: temporal.server.api.adminservice.v1.AdminService.ListQueues:output_type -> temporal.server.api.adminservice.v1.ListQueuesResponse + 85, // 85: temporal.server.api.adminservice.v1.AdminService.DeepHealthCheck:output_type -> temporal.server.api.adminservice.v1.DeepHealthCheckResponse + 86, // 86: temporal.server.api.adminservice.v1.AdminService.SyncWorkflowState:output_type -> temporal.server.api.adminservice.v1.SyncWorkflowStateResponse + 87, // 87: temporal.server.api.adminservice.v1.AdminService.GenerateLastHistoryReplicationTasks:output_type -> temporal.server.api.adminservice.v1.GenerateLastHistoryReplicationTasksResponse + 88, // 88: temporal.server.api.adminservice.v1.AdminService.DescribeTaskQueuePartition:output_type -> temporal.server.api.adminservice.v1.DescribeTaskQueuePartitionResponse + 89, // 89: temporal.server.api.adminservice.v1.AdminService.ForceUnloadTaskQueuePartition:output_type -> temporal.server.api.adminservice.v1.ForceUnloadTaskQueuePartitionResponse + 90, // 90: temporal.server.api.adminservice.v1.AdminService.GetTaskQueueUserData:output_type -> temporal.server.api.adminservice.v1.GetTaskQueueUserDataResponse + 91, // 91: temporal.server.api.adminservice.v1.AdminService.MigrateSchedule:output_type -> temporal.server.api.adminservice.v1.MigrateScheduleResponse + 46, // [46:92] is the sub-list for method output_type + 0, // [0:46] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -599,7 +282,7 @@ func file_temporal_server_api_adminservice_v1_service_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_adminservice_v1_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_adminservice_v1_service_proto_rawDesc), len(file_temporal_server_api_adminservice_v1_service_proto_rawDesc)), NumEnums: 0, NumMessages: 0, NumExtensions: 0, @@ -609,7 +292,6 @@ func file_temporal_server_api_adminservice_v1_service_proto_init() { DependencyIndexes: file_temporal_server_api_adminservice_v1_service_proto_depIdxs, }.Build() File_temporal_server_api_adminservice_v1_service_proto = out.File - file_temporal_server_api_adminservice_v1_service_proto_rawDesc = nil file_temporal_server_api_adminservice_v1_service_proto_goTypes = nil file_temporal_server_api_adminservice_v1_service_proto_depIdxs = nil } diff --git a/api/adminservice/v1/service_grpc.pb.go b/api/adminservice/v1/service_grpc.pb.go index 1cd098ae081..9e64fcb17ff 100644 --- a/api/adminservice/v1/service_grpc.pb.go +++ b/api/adminservice/v1/service_grpc.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // plugins: // - protoc-gen-go-grpc @@ -42,44 +20,52 @@ import ( const _ = grpc.SupportPackageIsVersion7 const ( - AdminService_RebuildMutableState_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RebuildMutableState" - AdminService_ImportWorkflowExecution_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ImportWorkflowExecution" - AdminService_DescribeMutableState_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeMutableState" - AdminService_DescribeHistoryHost_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeHistoryHost" - AdminService_GetShard_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetShard" - AdminService_CloseShard_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/CloseShard" - AdminService_ListHistoryTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ListHistoryTasks" - AdminService_RemoveTask_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RemoveTask" - AdminService_GetWorkflowExecutionRawHistoryV2_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetWorkflowExecutionRawHistoryV2" - AdminService_GetWorkflowExecutionRawHistory_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetWorkflowExecutionRawHistory" - AdminService_GetReplicationMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetReplicationMessages" - AdminService_GetNamespaceReplicationMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetNamespaceReplicationMessages" - AdminService_GetDLQReplicationMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetDLQReplicationMessages" - AdminService_ReapplyEvents_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ReapplyEvents" - AdminService_AddSearchAttributes_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/AddSearchAttributes" - AdminService_RemoveSearchAttributes_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RemoveSearchAttributes" - AdminService_GetSearchAttributes_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetSearchAttributes" - AdminService_DescribeCluster_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeCluster" - AdminService_ListClusters_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ListClusters" - AdminService_ListClusterMembers_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ListClusterMembers" - AdminService_AddOrUpdateRemoteCluster_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/AddOrUpdateRemoteCluster" - AdminService_RemoveRemoteCluster_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RemoveRemoteCluster" - AdminService_GetDLQMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetDLQMessages" - AdminService_PurgeDLQMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/PurgeDLQMessages" - AdminService_MergeDLQMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/MergeDLQMessages" - AdminService_RefreshWorkflowTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RefreshWorkflowTasks" - AdminService_ResendReplicationTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ResendReplicationTasks" - AdminService_GetTaskQueueTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetTaskQueueTasks" - AdminService_DeleteWorkflowExecution_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DeleteWorkflowExecution" - AdminService_StreamWorkflowReplicationMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/StreamWorkflowReplicationMessages" - AdminService_GetNamespace_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetNamespace" - AdminService_GetDLQTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetDLQTasks" - AdminService_PurgeDLQTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/PurgeDLQTasks" - AdminService_MergeDLQTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/MergeDLQTasks" - AdminService_DescribeDLQJob_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeDLQJob" - AdminService_CancelDLQJob_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/CancelDLQJob" - AdminService_AddTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/AddTasks" - AdminService_ListQueues_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ListQueues" + AdminService_RebuildMutableState_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RebuildMutableState" + AdminService_ImportWorkflowExecution_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ImportWorkflowExecution" + AdminService_DescribeMutableState_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeMutableState" + AdminService_DescribeHistoryHost_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeHistoryHost" + AdminService_GetShard_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetShard" + AdminService_CloseShard_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/CloseShard" + AdminService_ListHistoryTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ListHistoryTasks" + AdminService_RemoveTask_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RemoveTask" + AdminService_GetWorkflowExecutionRawHistoryV2_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetWorkflowExecutionRawHistoryV2" + AdminService_GetWorkflowExecutionRawHistory_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetWorkflowExecutionRawHistory" + AdminService_GetReplicationMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetReplicationMessages" + AdminService_GetNamespaceReplicationMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetNamespaceReplicationMessages" + AdminService_GetDLQReplicationMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetDLQReplicationMessages" + AdminService_ReapplyEvents_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ReapplyEvents" + AdminService_AddSearchAttributes_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/AddSearchAttributes" + AdminService_RemoveSearchAttributes_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RemoveSearchAttributes" + AdminService_GetSearchAttributes_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetSearchAttributes" + AdminService_DescribeCluster_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeCluster" + AdminService_ListClusters_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ListClusters" + AdminService_ListClusterMembers_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ListClusterMembers" + AdminService_AddOrUpdateRemoteCluster_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/AddOrUpdateRemoteCluster" + AdminService_RemoveRemoteCluster_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RemoveRemoteCluster" + AdminService_GetDLQMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetDLQMessages" + AdminService_PurgeDLQMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/PurgeDLQMessages" + AdminService_MergeDLQMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/MergeDLQMessages" + AdminService_RefreshWorkflowTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/RefreshWorkflowTasks" + AdminService_StartAdminBatchOperation_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/StartAdminBatchOperation" + AdminService_ResendReplicationTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ResendReplicationTasks" + AdminService_GetTaskQueueTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetTaskQueueTasks" + AdminService_DeleteWorkflowExecution_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DeleteWorkflowExecution" + AdminService_StreamWorkflowReplicationMessages_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/StreamWorkflowReplicationMessages" + AdminService_GetNamespace_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetNamespace" + AdminService_GetDLQTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetDLQTasks" + AdminService_PurgeDLQTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/PurgeDLQTasks" + AdminService_MergeDLQTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/MergeDLQTasks" + AdminService_DescribeDLQJob_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeDLQJob" + AdminService_CancelDLQJob_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/CancelDLQJob" + AdminService_AddTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/AddTasks" + AdminService_ListQueues_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ListQueues" + AdminService_DeepHealthCheck_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DeepHealthCheck" + AdminService_SyncWorkflowState_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/SyncWorkflowState" + AdminService_GenerateLastHistoryReplicationTasks_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GenerateLastHistoryReplicationTasks" + AdminService_DescribeTaskQueuePartition_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/DescribeTaskQueuePartition" + AdminService_ForceUnloadTaskQueuePartition_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/ForceUnloadTaskQueuePartition" + AdminService_GetTaskQueueUserData_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/GetTaskQueueUserData" + AdminService_MigrateSchedule_FullMethodName = "/temporal.server.api.adminservice.v1.AdminService/MigrateSchedule" ) // AdminServiceClient is the client API for AdminService service. @@ -147,6 +133,8 @@ type AdminServiceClient interface { MergeDLQMessages(ctx context.Context, in *MergeDLQMessagesRequest, opts ...grpc.CallOption) (*MergeDLQMessagesResponse, error) // RefreshWorkflowTasks refreshes all tasks of a workflow. RefreshWorkflowTasks(ctx context.Context, in *RefreshWorkflowTasksRequest, opts ...grpc.CallOption) (*RefreshWorkflowTasksResponse, error) + // StartAdminBatchOperation starts an admin batch operation. Supports internal operations like RefreshWorkflowTasks. + StartAdminBatchOperation(ctx context.Context, in *StartAdminBatchOperationRequest, opts ...grpc.CallOption) (*StartAdminBatchOperationResponse, error) // ResendReplicationTasks requests replication tasks from remote cluster and apply tasks to current cluster. ResendReplicationTasks(ctx context.Context, in *ResendReplicationTasksRequest, opts ...grpc.CallOption) (*ResendReplicationTasksResponse, error) // GetTaskQueueTasks returns tasks from task queue. @@ -165,6 +153,14 @@ type AdminServiceClient interface { CancelDLQJob(ctx context.Context, in *CancelDLQJobRequest, opts ...grpc.CallOption) (*CancelDLQJobResponse, error) AddTasks(ctx context.Context, in *AddTasksRequest, opts ...grpc.CallOption) (*AddTasksResponse, error) ListQueues(ctx context.Context, in *ListQueuesRequest, opts ...grpc.CallOption) (*ListQueuesResponse, error) + DeepHealthCheck(ctx context.Context, in *DeepHealthCheckRequest, opts ...grpc.CallOption) (*DeepHealthCheckResponse, error) + SyncWorkflowState(ctx context.Context, in *SyncWorkflowStateRequest, opts ...grpc.CallOption) (*SyncWorkflowStateResponse, error) + GenerateLastHistoryReplicationTasks(ctx context.Context, in *GenerateLastHistoryReplicationTasksRequest, opts ...grpc.CallOption) (*GenerateLastHistoryReplicationTasksResponse, error) + DescribeTaskQueuePartition(ctx context.Context, in *DescribeTaskQueuePartitionRequest, opts ...grpc.CallOption) (*DescribeTaskQueuePartitionResponse, error) + ForceUnloadTaskQueuePartition(ctx context.Context, in *ForceUnloadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*ForceUnloadTaskQueuePartitionResponse, error) + GetTaskQueueUserData(ctx context.Context, in *GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*GetTaskQueueUserDataResponse, error) + // MigrateSchedule migrates a schedule between V1 (workflow-backed) and V2 (CHASM-backed) implementations. + MigrateSchedule(ctx context.Context, in *MigrateScheduleRequest, opts ...grpc.CallOption) (*MigrateScheduleResponse, error) } type adminServiceClient struct { @@ -409,6 +405,15 @@ func (c *adminServiceClient) RefreshWorkflowTasks(ctx context.Context, in *Refre return out, nil } +func (c *adminServiceClient) StartAdminBatchOperation(ctx context.Context, in *StartAdminBatchOperationRequest, opts ...grpc.CallOption) (*StartAdminBatchOperationResponse, error) { + out := new(StartAdminBatchOperationResponse) + err := c.cc.Invoke(ctx, AdminService_StartAdminBatchOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *adminServiceClient) ResendReplicationTasks(ctx context.Context, in *ResendReplicationTasksRequest, opts ...grpc.CallOption) (*ResendReplicationTasksResponse, error) { out := new(ResendReplicationTasksResponse) err := c.cc.Invoke(ctx, AdminService_ResendReplicationTasks_FullMethodName, in, out, opts...) @@ -539,6 +544,69 @@ func (c *adminServiceClient) ListQueues(ctx context.Context, in *ListQueuesReque return out, nil } +func (c *adminServiceClient) DeepHealthCheck(ctx context.Context, in *DeepHealthCheckRequest, opts ...grpc.CallOption) (*DeepHealthCheckResponse, error) { + out := new(DeepHealthCheckResponse) + err := c.cc.Invoke(ctx, AdminService_DeepHealthCheck_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *adminServiceClient) SyncWorkflowState(ctx context.Context, in *SyncWorkflowStateRequest, opts ...grpc.CallOption) (*SyncWorkflowStateResponse, error) { + out := new(SyncWorkflowStateResponse) + err := c.cc.Invoke(ctx, AdminService_SyncWorkflowState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *adminServiceClient) GenerateLastHistoryReplicationTasks(ctx context.Context, in *GenerateLastHistoryReplicationTasksRequest, opts ...grpc.CallOption) (*GenerateLastHistoryReplicationTasksResponse, error) { + out := new(GenerateLastHistoryReplicationTasksResponse) + err := c.cc.Invoke(ctx, AdminService_GenerateLastHistoryReplicationTasks_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *adminServiceClient) DescribeTaskQueuePartition(ctx context.Context, in *DescribeTaskQueuePartitionRequest, opts ...grpc.CallOption) (*DescribeTaskQueuePartitionResponse, error) { + out := new(DescribeTaskQueuePartitionResponse) + err := c.cc.Invoke(ctx, AdminService_DescribeTaskQueuePartition_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *adminServiceClient) ForceUnloadTaskQueuePartition(ctx context.Context, in *ForceUnloadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*ForceUnloadTaskQueuePartitionResponse, error) { + out := new(ForceUnloadTaskQueuePartitionResponse) + err := c.cc.Invoke(ctx, AdminService_ForceUnloadTaskQueuePartition_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *adminServiceClient) GetTaskQueueUserData(ctx context.Context, in *GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*GetTaskQueueUserDataResponse, error) { + out := new(GetTaskQueueUserDataResponse) + err := c.cc.Invoke(ctx, AdminService_GetTaskQueueUserData_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *adminServiceClient) MigrateSchedule(ctx context.Context, in *MigrateScheduleRequest, opts ...grpc.CallOption) (*MigrateScheduleResponse, error) { + out := new(MigrateScheduleResponse) + err := c.cc.Invoke(ctx, AdminService_MigrateSchedule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // AdminServiceServer is the server API for AdminService service. // All implementations must embed UnimplementedAdminServiceServer // for forward compatibility @@ -604,6 +672,8 @@ type AdminServiceServer interface { MergeDLQMessages(context.Context, *MergeDLQMessagesRequest) (*MergeDLQMessagesResponse, error) // RefreshWorkflowTasks refreshes all tasks of a workflow. RefreshWorkflowTasks(context.Context, *RefreshWorkflowTasksRequest) (*RefreshWorkflowTasksResponse, error) + // StartAdminBatchOperation starts an admin batch operation. Supports internal operations like RefreshWorkflowTasks. + StartAdminBatchOperation(context.Context, *StartAdminBatchOperationRequest) (*StartAdminBatchOperationResponse, error) // ResendReplicationTasks requests replication tasks from remote cluster and apply tasks to current cluster. ResendReplicationTasks(context.Context, *ResendReplicationTasksRequest) (*ResendReplicationTasksResponse, error) // GetTaskQueueTasks returns tasks from task queue. @@ -622,6 +692,14 @@ type AdminServiceServer interface { CancelDLQJob(context.Context, *CancelDLQJobRequest) (*CancelDLQJobResponse, error) AddTasks(context.Context, *AddTasksRequest) (*AddTasksResponse, error) ListQueues(context.Context, *ListQueuesRequest) (*ListQueuesResponse, error) + DeepHealthCheck(context.Context, *DeepHealthCheckRequest) (*DeepHealthCheckResponse, error) + SyncWorkflowState(context.Context, *SyncWorkflowStateRequest) (*SyncWorkflowStateResponse, error) + GenerateLastHistoryReplicationTasks(context.Context, *GenerateLastHistoryReplicationTasksRequest) (*GenerateLastHistoryReplicationTasksResponse, error) + DescribeTaskQueuePartition(context.Context, *DescribeTaskQueuePartitionRequest) (*DescribeTaskQueuePartitionResponse, error) + ForceUnloadTaskQueuePartition(context.Context, *ForceUnloadTaskQueuePartitionRequest) (*ForceUnloadTaskQueuePartitionResponse, error) + GetTaskQueueUserData(context.Context, *GetTaskQueueUserDataRequest) (*GetTaskQueueUserDataResponse, error) + // MigrateSchedule migrates a schedule between V1 (workflow-backed) and V2 (CHASM-backed) implementations. + MigrateSchedule(context.Context, *MigrateScheduleRequest) (*MigrateScheduleResponse, error) mustEmbedUnimplementedAdminServiceServer() } @@ -707,6 +785,9 @@ func (UnimplementedAdminServiceServer) MergeDLQMessages(context.Context, *MergeD func (UnimplementedAdminServiceServer) RefreshWorkflowTasks(context.Context, *RefreshWorkflowTasksRequest) (*RefreshWorkflowTasksResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RefreshWorkflowTasks not implemented") } +func (UnimplementedAdminServiceServer) StartAdminBatchOperation(context.Context, *StartAdminBatchOperationRequest) (*StartAdminBatchOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartAdminBatchOperation not implemented") +} func (UnimplementedAdminServiceServer) ResendReplicationTasks(context.Context, *ResendReplicationTasksRequest) (*ResendReplicationTasksResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ResendReplicationTasks not implemented") } @@ -743,6 +824,27 @@ func (UnimplementedAdminServiceServer) AddTasks(context.Context, *AddTasksReques func (UnimplementedAdminServiceServer) ListQueues(context.Context, *ListQueuesRequest) (*ListQueuesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListQueues not implemented") } +func (UnimplementedAdminServiceServer) DeepHealthCheck(context.Context, *DeepHealthCheckRequest) (*DeepHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeepHealthCheck not implemented") +} +func (UnimplementedAdminServiceServer) SyncWorkflowState(context.Context, *SyncWorkflowStateRequest) (*SyncWorkflowStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SyncWorkflowState not implemented") +} +func (UnimplementedAdminServiceServer) GenerateLastHistoryReplicationTasks(context.Context, *GenerateLastHistoryReplicationTasksRequest) (*GenerateLastHistoryReplicationTasksResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GenerateLastHistoryReplicationTasks not implemented") +} +func (UnimplementedAdminServiceServer) DescribeTaskQueuePartition(context.Context, *DescribeTaskQueuePartitionRequest) (*DescribeTaskQueuePartitionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeTaskQueuePartition not implemented") +} +func (UnimplementedAdminServiceServer) ForceUnloadTaskQueuePartition(context.Context, *ForceUnloadTaskQueuePartitionRequest) (*ForceUnloadTaskQueuePartitionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ForceUnloadTaskQueuePartition not implemented") +} +func (UnimplementedAdminServiceServer) GetTaskQueueUserData(context.Context, *GetTaskQueueUserDataRequest) (*GetTaskQueueUserDataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTaskQueueUserData not implemented") +} +func (UnimplementedAdminServiceServer) MigrateSchedule(context.Context, *MigrateScheduleRequest) (*MigrateScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MigrateSchedule not implemented") +} func (UnimplementedAdminServiceServer) mustEmbedUnimplementedAdminServiceServer() {} // UnsafeAdminServiceServer may be embedded to opt out of forward compatibility for this service. @@ -1224,6 +1326,24 @@ func _AdminService_RefreshWorkflowTasks_Handler(srv interface{}, ctx context.Con return interceptor(ctx, in, info, handler) } +func _AdminService_StartAdminBatchOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartAdminBatchOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).StartAdminBatchOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AdminService_StartAdminBatchOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).StartAdminBatchOperation(ctx, req.(*StartAdminBatchOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _AdminService_ResendReplicationTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ResendReplicationTasksRequest) if err := dec(in); err != nil { @@ -1448,6 +1568,132 @@ func _AdminService_ListQueues_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _AdminService_DeepHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeepHealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).DeepHealthCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AdminService_DeepHealthCheck_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).DeepHealthCheck(ctx, req.(*DeepHealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AdminService_SyncWorkflowState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SyncWorkflowStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).SyncWorkflowState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AdminService_SyncWorkflowState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).SyncWorkflowState(ctx, req.(*SyncWorkflowStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AdminService_GenerateLastHistoryReplicationTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GenerateLastHistoryReplicationTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).GenerateLastHistoryReplicationTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AdminService_GenerateLastHistoryReplicationTasks_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).GenerateLastHistoryReplicationTasks(ctx, req.(*GenerateLastHistoryReplicationTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AdminService_DescribeTaskQueuePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeTaskQueuePartitionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).DescribeTaskQueuePartition(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AdminService_DescribeTaskQueuePartition_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).DescribeTaskQueuePartition(ctx, req.(*DescribeTaskQueuePartitionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AdminService_ForceUnloadTaskQueuePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ForceUnloadTaskQueuePartitionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).ForceUnloadTaskQueuePartition(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AdminService_ForceUnloadTaskQueuePartition_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).ForceUnloadTaskQueuePartition(ctx, req.(*ForceUnloadTaskQueuePartitionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AdminService_GetTaskQueueUserData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskQueueUserDataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).GetTaskQueueUserData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AdminService_GetTaskQueueUserData_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).GetTaskQueueUserData(ctx, req.(*GetTaskQueueUserDataRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _AdminService_MigrateSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MigrateScheduleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AdminServiceServer).MigrateSchedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: AdminService_MigrateSchedule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AdminServiceServer).MigrateSchedule(ctx, req.(*MigrateScheduleRequest)) + } + return interceptor(ctx, in, info, handler) +} + // AdminService_ServiceDesc is the grpc.ServiceDesc for AdminService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -1559,6 +1805,10 @@ var AdminService_ServiceDesc = grpc.ServiceDesc{ MethodName: "RefreshWorkflowTasks", Handler: _AdminService_RefreshWorkflowTasks_Handler, }, + { + MethodName: "StartAdminBatchOperation", + Handler: _AdminService_StartAdminBatchOperation_Handler, + }, { MethodName: "ResendReplicationTasks", Handler: _AdminService_ResendReplicationTasks_Handler, @@ -1603,6 +1853,34 @@ var AdminService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ListQueues", Handler: _AdminService_ListQueues_Handler, }, + { + MethodName: "DeepHealthCheck", + Handler: _AdminService_DeepHealthCheck_Handler, + }, + { + MethodName: "SyncWorkflowState", + Handler: _AdminService_SyncWorkflowState_Handler, + }, + { + MethodName: "GenerateLastHistoryReplicationTasks", + Handler: _AdminService_GenerateLastHistoryReplicationTasks_Handler, + }, + { + MethodName: "DescribeTaskQueuePartition", + Handler: _AdminService_DescribeTaskQueuePartition_Handler, + }, + { + MethodName: "ForceUnloadTaskQueuePartition", + Handler: _AdminService_ForceUnloadTaskQueuePartition_Handler, + }, + { + MethodName: "GetTaskQueueUserData", + Handler: _AdminService_GetTaskQueueUserData_Handler, + }, + { + MethodName: "MigrateSchedule", + Handler: _AdminService_MigrateSchedule_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/api/adminservicemock/v1/service.pb.mock.go b/api/adminservicemock/v1/service.pb.mock.go index f5a101447d6..250d7c21b7c 100644 --- a/api/adminservicemock/v1/service.pb.mock.go +++ b/api/adminservicemock/v1/service.pb.mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: api/adminservice/v1/service.pb.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package adminservicemock -source api/adminservice/v1/service.pb.go -destination api/adminservicemock/v1/service.pb.mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: adminservice/v1/service.pb.go // Package adminservicemock is a generated GoMock package. package adminservicemock diff --git a/api/adminservicemock/v1/service_grpc.pb.mock.go b/api/adminservicemock/v1/service_grpc.pb.mock.go index 9fd33981f99..26f1e936e0c 100644 --- a/api/adminservicemock/v1/service_grpc.pb.mock.go +++ b/api/adminservicemock/v1/service_grpc.pb.mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: api/adminservice/v1/service_grpc.pb.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package adminservicemock -source api/adminservice/v1/service_grpc.pb.go -destination api/adminservicemock/v1/service_grpc.pb.mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: adminservice/v1/service_grpc.pb.go // Package adminservicemock is a generated GoMock package. package adminservicemock @@ -32,8 +13,8 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" adminservice "go.temporal.io/server/api/adminservice/v1" + gomock "go.uber.org/mock/gomock" grpc "google.golang.org/grpc" metadata "google.golang.org/grpc/metadata" ) @@ -42,6 +23,7 @@ import ( type MockAdminServiceClient struct { ctrl *gomock.Controller recorder *MockAdminServiceClientMockRecorder + isgomock struct{} } // MockAdminServiceClientMockRecorder is the mock recorder for MockAdminServiceClient. @@ -64,7 +46,7 @@ func (m *MockAdminServiceClient) EXPECT() *MockAdminServiceClientMockRecorder { // AddOrUpdateRemoteCluster mocks base method. func (m *MockAdminServiceClient) AddOrUpdateRemoteCluster(ctx context.Context, in *adminservice.AddOrUpdateRemoteClusterRequest, opts ...grpc.CallOption) (*adminservice.AddOrUpdateRemoteClusterResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -75,16 +57,16 @@ func (m *MockAdminServiceClient) AddOrUpdateRemoteCluster(ctx context.Context, i } // AddOrUpdateRemoteCluster indicates an expected call of AddOrUpdateRemoteCluster. -func (mr *MockAdminServiceClientMockRecorder) AddOrUpdateRemoteCluster(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) AddOrUpdateRemoteCluster(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddOrUpdateRemoteCluster", reflect.TypeOf((*MockAdminServiceClient)(nil).AddOrUpdateRemoteCluster), varargs...) } // AddSearchAttributes mocks base method. func (m *MockAdminServiceClient) AddSearchAttributes(ctx context.Context, in *adminservice.AddSearchAttributesRequest, opts ...grpc.CallOption) (*adminservice.AddSearchAttributesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -95,16 +77,16 @@ func (m *MockAdminServiceClient) AddSearchAttributes(ctx context.Context, in *ad } // AddSearchAttributes indicates an expected call of AddSearchAttributes. -func (mr *MockAdminServiceClientMockRecorder) AddSearchAttributes(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) AddSearchAttributes(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSearchAttributes", reflect.TypeOf((*MockAdminServiceClient)(nil).AddSearchAttributes), varargs...) } // AddTasks mocks base method. func (m *MockAdminServiceClient) AddTasks(ctx context.Context, in *adminservice.AddTasksRequest, opts ...grpc.CallOption) (*adminservice.AddTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -115,16 +97,16 @@ func (m *MockAdminServiceClient) AddTasks(ctx context.Context, in *adminservice. } // AddTasks indicates an expected call of AddTasks. -func (mr *MockAdminServiceClientMockRecorder) AddTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) AddTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).AddTasks), varargs...) } // CancelDLQJob mocks base method. func (m *MockAdminServiceClient) CancelDLQJob(ctx context.Context, in *adminservice.CancelDLQJobRequest, opts ...grpc.CallOption) (*adminservice.CancelDLQJobResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -135,16 +117,16 @@ func (m *MockAdminServiceClient) CancelDLQJob(ctx context.Context, in *adminserv } // CancelDLQJob indicates an expected call of CancelDLQJob. -func (mr *MockAdminServiceClientMockRecorder) CancelDLQJob(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) CancelDLQJob(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelDLQJob", reflect.TypeOf((*MockAdminServiceClient)(nil).CancelDLQJob), varargs...) } // CloseShard mocks base method. func (m *MockAdminServiceClient) CloseShard(ctx context.Context, in *adminservice.CloseShardRequest, opts ...grpc.CallOption) (*adminservice.CloseShardResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -155,16 +137,36 @@ func (m *MockAdminServiceClient) CloseShard(ctx context.Context, in *adminservic } // CloseShard indicates an expected call of CloseShard. -func (mr *MockAdminServiceClientMockRecorder) CloseShard(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) CloseShard(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseShard", reflect.TypeOf((*MockAdminServiceClient)(nil).CloseShard), varargs...) } +// DeepHealthCheck mocks base method. +func (m *MockAdminServiceClient) DeepHealthCheck(ctx context.Context, in *adminservice.DeepHealthCheckRequest, opts ...grpc.CallOption) (*adminservice.DeepHealthCheckResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeepHealthCheck", varargs...) + ret0, _ := ret[0].(*adminservice.DeepHealthCheckResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeepHealthCheck indicates an expected call of DeepHealthCheck. +func (mr *MockAdminServiceClientMockRecorder) DeepHealthCheck(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeepHealthCheck", reflect.TypeOf((*MockAdminServiceClient)(nil).DeepHealthCheck), varargs...) +} + // DeleteWorkflowExecution mocks base method. func (m *MockAdminServiceClient) DeleteWorkflowExecution(ctx context.Context, in *adminservice.DeleteWorkflowExecutionRequest, opts ...grpc.CallOption) (*adminservice.DeleteWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -175,16 +177,16 @@ func (m *MockAdminServiceClient) DeleteWorkflowExecution(ctx context.Context, in } // DeleteWorkflowExecution indicates an expected call of DeleteWorkflowExecution. -func (mr *MockAdminServiceClientMockRecorder) DeleteWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) DeleteWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MockAdminServiceClient)(nil).DeleteWorkflowExecution), varargs...) } // DescribeCluster mocks base method. func (m *MockAdminServiceClient) DescribeCluster(ctx context.Context, in *adminservice.DescribeClusterRequest, opts ...grpc.CallOption) (*adminservice.DescribeClusterResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -195,16 +197,16 @@ func (m *MockAdminServiceClient) DescribeCluster(ctx context.Context, in *admins } // DescribeCluster indicates an expected call of DescribeCluster. -func (mr *MockAdminServiceClientMockRecorder) DescribeCluster(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) DescribeCluster(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeCluster", reflect.TypeOf((*MockAdminServiceClient)(nil).DescribeCluster), varargs...) } // DescribeDLQJob mocks base method. func (m *MockAdminServiceClient) DescribeDLQJob(ctx context.Context, in *adminservice.DescribeDLQJobRequest, opts ...grpc.CallOption) (*adminservice.DescribeDLQJobResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -215,16 +217,16 @@ func (m *MockAdminServiceClient) DescribeDLQJob(ctx context.Context, in *adminse } // DescribeDLQJob indicates an expected call of DescribeDLQJob. -func (mr *MockAdminServiceClientMockRecorder) DescribeDLQJob(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) DescribeDLQJob(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeDLQJob", reflect.TypeOf((*MockAdminServiceClient)(nil).DescribeDLQJob), varargs...) } // DescribeHistoryHost mocks base method. func (m *MockAdminServiceClient) DescribeHistoryHost(ctx context.Context, in *adminservice.DescribeHistoryHostRequest, opts ...grpc.CallOption) (*adminservice.DescribeHistoryHostResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -235,16 +237,16 @@ func (m *MockAdminServiceClient) DescribeHistoryHost(ctx context.Context, in *ad } // DescribeHistoryHost indicates an expected call of DescribeHistoryHost. -func (mr *MockAdminServiceClientMockRecorder) DescribeHistoryHost(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) DescribeHistoryHost(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeHistoryHost", reflect.TypeOf((*MockAdminServiceClient)(nil).DescribeHistoryHost), varargs...) } // DescribeMutableState mocks base method. func (m *MockAdminServiceClient) DescribeMutableState(ctx context.Context, in *adminservice.DescribeMutableStateRequest, opts ...grpc.CallOption) (*adminservice.DescribeMutableStateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -255,16 +257,76 @@ func (m *MockAdminServiceClient) DescribeMutableState(ctx context.Context, in *a } // DescribeMutableState indicates an expected call of DescribeMutableState. -func (mr *MockAdminServiceClientMockRecorder) DescribeMutableState(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) DescribeMutableState(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMutableState", reflect.TypeOf((*MockAdminServiceClient)(nil).DescribeMutableState), varargs...) } +// DescribeTaskQueuePartition mocks base method. +func (m *MockAdminServiceClient) DescribeTaskQueuePartition(ctx context.Context, in *adminservice.DescribeTaskQueuePartitionRequest, opts ...grpc.CallOption) (*adminservice.DescribeTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeTaskQueuePartition", varargs...) + ret0, _ := ret[0].(*adminservice.DescribeTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTaskQueuePartition indicates an expected call of DescribeTaskQueuePartition. +func (mr *MockAdminServiceClientMockRecorder) DescribeTaskQueuePartition(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTaskQueuePartition", reflect.TypeOf((*MockAdminServiceClient)(nil).DescribeTaskQueuePartition), varargs...) +} + +// ForceUnloadTaskQueuePartition mocks base method. +func (m *MockAdminServiceClient) ForceUnloadTaskQueuePartition(ctx context.Context, in *adminservice.ForceUnloadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*adminservice.ForceUnloadTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ForceUnloadTaskQueuePartition", varargs...) + ret0, _ := ret[0].(*adminservice.ForceUnloadTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ForceUnloadTaskQueuePartition indicates an expected call of ForceUnloadTaskQueuePartition. +func (mr *MockAdminServiceClientMockRecorder) ForceUnloadTaskQueuePartition(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUnloadTaskQueuePartition", reflect.TypeOf((*MockAdminServiceClient)(nil).ForceUnloadTaskQueuePartition), varargs...) +} + +// GenerateLastHistoryReplicationTasks mocks base method. +func (m *MockAdminServiceClient) GenerateLastHistoryReplicationTasks(ctx context.Context, in *adminservice.GenerateLastHistoryReplicationTasksRequest, opts ...grpc.CallOption) (*adminservice.GenerateLastHistoryReplicationTasksResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GenerateLastHistoryReplicationTasks", varargs...) + ret0, _ := ret[0].(*adminservice.GenerateLastHistoryReplicationTasksResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GenerateLastHistoryReplicationTasks indicates an expected call of GenerateLastHistoryReplicationTasks. +func (mr *MockAdminServiceClientMockRecorder) GenerateLastHistoryReplicationTasks(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateLastHistoryReplicationTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).GenerateLastHistoryReplicationTasks), varargs...) +} + // GetDLQMessages mocks base method. func (m *MockAdminServiceClient) GetDLQMessages(ctx context.Context, in *adminservice.GetDLQMessagesRequest, opts ...grpc.CallOption) (*adminservice.GetDLQMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -275,16 +337,16 @@ func (m *MockAdminServiceClient) GetDLQMessages(ctx context.Context, in *adminse } // GetDLQMessages indicates an expected call of GetDLQMessages. -func (mr *MockAdminServiceClientMockRecorder) GetDLQMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetDLQMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQMessages", reflect.TypeOf((*MockAdminServiceClient)(nil).GetDLQMessages), varargs...) } // GetDLQReplicationMessages mocks base method. func (m *MockAdminServiceClient) GetDLQReplicationMessages(ctx context.Context, in *adminservice.GetDLQReplicationMessagesRequest, opts ...grpc.CallOption) (*adminservice.GetDLQReplicationMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -295,16 +357,16 @@ func (m *MockAdminServiceClient) GetDLQReplicationMessages(ctx context.Context, } // GetDLQReplicationMessages indicates an expected call of GetDLQReplicationMessages. -func (mr *MockAdminServiceClientMockRecorder) GetDLQReplicationMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetDLQReplicationMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQReplicationMessages", reflect.TypeOf((*MockAdminServiceClient)(nil).GetDLQReplicationMessages), varargs...) } // GetDLQTasks mocks base method. func (m *MockAdminServiceClient) GetDLQTasks(ctx context.Context, in *adminservice.GetDLQTasksRequest, opts ...grpc.CallOption) (*adminservice.GetDLQTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -315,16 +377,16 @@ func (m *MockAdminServiceClient) GetDLQTasks(ctx context.Context, in *adminservi } // GetDLQTasks indicates an expected call of GetDLQTasks. -func (mr *MockAdminServiceClientMockRecorder) GetDLQTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetDLQTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).GetDLQTasks), varargs...) } // GetNamespace mocks base method. func (m *MockAdminServiceClient) GetNamespace(ctx context.Context, in *adminservice.GetNamespaceRequest, opts ...grpc.CallOption) (*adminservice.GetNamespaceResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -335,16 +397,16 @@ func (m *MockAdminServiceClient) GetNamespace(ctx context.Context, in *adminserv } // GetNamespace indicates an expected call of GetNamespace. -func (mr *MockAdminServiceClientMockRecorder) GetNamespace(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetNamespace(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockAdminServiceClient)(nil).GetNamespace), varargs...) } // GetNamespaceReplicationMessages mocks base method. func (m *MockAdminServiceClient) GetNamespaceReplicationMessages(ctx context.Context, in *adminservice.GetNamespaceReplicationMessagesRequest, opts ...grpc.CallOption) (*adminservice.GetNamespaceReplicationMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -355,16 +417,16 @@ func (m *MockAdminServiceClient) GetNamespaceReplicationMessages(ctx context.Con } // GetNamespaceReplicationMessages indicates an expected call of GetNamespaceReplicationMessages. -func (mr *MockAdminServiceClientMockRecorder) GetNamespaceReplicationMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetNamespaceReplicationMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespaceReplicationMessages", reflect.TypeOf((*MockAdminServiceClient)(nil).GetNamespaceReplicationMessages), varargs...) } // GetReplicationMessages mocks base method. func (m *MockAdminServiceClient) GetReplicationMessages(ctx context.Context, in *adminservice.GetReplicationMessagesRequest, opts ...grpc.CallOption) (*adminservice.GetReplicationMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -375,16 +437,16 @@ func (m *MockAdminServiceClient) GetReplicationMessages(ctx context.Context, in } // GetReplicationMessages indicates an expected call of GetReplicationMessages. -func (mr *MockAdminServiceClientMockRecorder) GetReplicationMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetReplicationMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationMessages", reflect.TypeOf((*MockAdminServiceClient)(nil).GetReplicationMessages), varargs...) } // GetSearchAttributes mocks base method. func (m *MockAdminServiceClient) GetSearchAttributes(ctx context.Context, in *adminservice.GetSearchAttributesRequest, opts ...grpc.CallOption) (*adminservice.GetSearchAttributesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -395,16 +457,16 @@ func (m *MockAdminServiceClient) GetSearchAttributes(ctx context.Context, in *ad } // GetSearchAttributes indicates an expected call of GetSearchAttributes. -func (mr *MockAdminServiceClientMockRecorder) GetSearchAttributes(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetSearchAttributes(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSearchAttributes", reflect.TypeOf((*MockAdminServiceClient)(nil).GetSearchAttributes), varargs...) } // GetShard mocks base method. func (m *MockAdminServiceClient) GetShard(ctx context.Context, in *adminservice.GetShardRequest, opts ...grpc.CallOption) (*adminservice.GetShardResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -415,16 +477,16 @@ func (m *MockAdminServiceClient) GetShard(ctx context.Context, in *adminservice. } // GetShard indicates an expected call of GetShard. -func (mr *MockAdminServiceClientMockRecorder) GetShard(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetShard(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShard", reflect.TypeOf((*MockAdminServiceClient)(nil).GetShard), varargs...) } // GetTaskQueueTasks mocks base method. func (m *MockAdminServiceClient) GetTaskQueueTasks(ctx context.Context, in *adminservice.GetTaskQueueTasksRequest, opts ...grpc.CallOption) (*adminservice.GetTaskQueueTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -435,16 +497,36 @@ func (m *MockAdminServiceClient) GetTaskQueueTasks(ctx context.Context, in *admi } // GetTaskQueueTasks indicates an expected call of GetTaskQueueTasks. -func (mr *MockAdminServiceClientMockRecorder) GetTaskQueueTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetTaskQueueTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueueTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).GetTaskQueueTasks), varargs...) } +// GetTaskQueueUserData mocks base method. +func (m *MockAdminServiceClient) GetTaskQueueUserData(ctx context.Context, in *adminservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*adminservice.GetTaskQueueUserDataResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetTaskQueueUserData", varargs...) + ret0, _ := ret[0].(*adminservice.GetTaskQueueUserDataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskQueueUserData indicates an expected call of GetTaskQueueUserData. +func (mr *MockAdminServiceClientMockRecorder) GetTaskQueueUserData(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueueUserData", reflect.TypeOf((*MockAdminServiceClient)(nil).GetTaskQueueUserData), varargs...) +} + // GetWorkflowExecutionRawHistory mocks base method. func (m *MockAdminServiceClient) GetWorkflowExecutionRawHistory(ctx context.Context, in *adminservice.GetWorkflowExecutionRawHistoryRequest, opts ...grpc.CallOption) (*adminservice.GetWorkflowExecutionRawHistoryResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -455,16 +537,16 @@ func (m *MockAdminServiceClient) GetWorkflowExecutionRawHistory(ctx context.Cont } // GetWorkflowExecutionRawHistory indicates an expected call of GetWorkflowExecutionRawHistory. -func (mr *MockAdminServiceClientMockRecorder) GetWorkflowExecutionRawHistory(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetWorkflowExecutionRawHistory(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionRawHistory", reflect.TypeOf((*MockAdminServiceClient)(nil).GetWorkflowExecutionRawHistory), varargs...) } // GetWorkflowExecutionRawHistoryV2 mocks base method. func (m *MockAdminServiceClient) GetWorkflowExecutionRawHistoryV2(ctx context.Context, in *adminservice.GetWorkflowExecutionRawHistoryV2Request, opts ...grpc.CallOption) (*adminservice.GetWorkflowExecutionRawHistoryV2Response, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -475,16 +557,16 @@ func (m *MockAdminServiceClient) GetWorkflowExecutionRawHistoryV2(ctx context.Co } // GetWorkflowExecutionRawHistoryV2 indicates an expected call of GetWorkflowExecutionRawHistoryV2. -func (mr *MockAdminServiceClientMockRecorder) GetWorkflowExecutionRawHistoryV2(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) GetWorkflowExecutionRawHistoryV2(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionRawHistoryV2", reflect.TypeOf((*MockAdminServiceClient)(nil).GetWorkflowExecutionRawHistoryV2), varargs...) } // ImportWorkflowExecution mocks base method. func (m *MockAdminServiceClient) ImportWorkflowExecution(ctx context.Context, in *adminservice.ImportWorkflowExecutionRequest, opts ...grpc.CallOption) (*adminservice.ImportWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -495,16 +577,16 @@ func (m *MockAdminServiceClient) ImportWorkflowExecution(ctx context.Context, in } // ImportWorkflowExecution indicates an expected call of ImportWorkflowExecution. -func (mr *MockAdminServiceClientMockRecorder) ImportWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) ImportWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportWorkflowExecution", reflect.TypeOf((*MockAdminServiceClient)(nil).ImportWorkflowExecution), varargs...) } // ListClusterMembers mocks base method. func (m *MockAdminServiceClient) ListClusterMembers(ctx context.Context, in *adminservice.ListClusterMembersRequest, opts ...grpc.CallOption) (*adminservice.ListClusterMembersResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -515,16 +597,16 @@ func (m *MockAdminServiceClient) ListClusterMembers(ctx context.Context, in *adm } // ListClusterMembers indicates an expected call of ListClusterMembers. -func (mr *MockAdminServiceClientMockRecorder) ListClusterMembers(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) ListClusterMembers(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusterMembers", reflect.TypeOf((*MockAdminServiceClient)(nil).ListClusterMembers), varargs...) } // ListClusters mocks base method. func (m *MockAdminServiceClient) ListClusters(ctx context.Context, in *adminservice.ListClustersRequest, opts ...grpc.CallOption) (*adminservice.ListClustersResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -535,16 +617,16 @@ func (m *MockAdminServiceClient) ListClusters(ctx context.Context, in *adminserv } // ListClusters indicates an expected call of ListClusters. -func (mr *MockAdminServiceClientMockRecorder) ListClusters(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) ListClusters(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusters", reflect.TypeOf((*MockAdminServiceClient)(nil).ListClusters), varargs...) } // ListHistoryTasks mocks base method. func (m *MockAdminServiceClient) ListHistoryTasks(ctx context.Context, in *adminservice.ListHistoryTasksRequest, opts ...grpc.CallOption) (*adminservice.ListHistoryTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -555,16 +637,16 @@ func (m *MockAdminServiceClient) ListHistoryTasks(ctx context.Context, in *admin } // ListHistoryTasks indicates an expected call of ListHistoryTasks. -func (mr *MockAdminServiceClientMockRecorder) ListHistoryTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) ListHistoryTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListHistoryTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).ListHistoryTasks), varargs...) } // ListQueues mocks base method. func (m *MockAdminServiceClient) ListQueues(ctx context.Context, in *adminservice.ListQueuesRequest, opts ...grpc.CallOption) (*adminservice.ListQueuesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -575,16 +657,16 @@ func (m *MockAdminServiceClient) ListQueues(ctx context.Context, in *adminservic } // ListQueues indicates an expected call of ListQueues. -func (mr *MockAdminServiceClientMockRecorder) ListQueues(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) ListQueues(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListQueues", reflect.TypeOf((*MockAdminServiceClient)(nil).ListQueues), varargs...) } // MergeDLQMessages mocks base method. func (m *MockAdminServiceClient) MergeDLQMessages(ctx context.Context, in *adminservice.MergeDLQMessagesRequest, opts ...grpc.CallOption) (*adminservice.MergeDLQMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -595,16 +677,16 @@ func (m *MockAdminServiceClient) MergeDLQMessages(ctx context.Context, in *admin } // MergeDLQMessages indicates an expected call of MergeDLQMessages. -func (mr *MockAdminServiceClientMockRecorder) MergeDLQMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) MergeDLQMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeDLQMessages", reflect.TypeOf((*MockAdminServiceClient)(nil).MergeDLQMessages), varargs...) } // MergeDLQTasks mocks base method. func (m *MockAdminServiceClient) MergeDLQTasks(ctx context.Context, in *adminservice.MergeDLQTasksRequest, opts ...grpc.CallOption) (*adminservice.MergeDLQTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -615,16 +697,36 @@ func (m *MockAdminServiceClient) MergeDLQTasks(ctx context.Context, in *adminser } // MergeDLQTasks indicates an expected call of MergeDLQTasks. -func (mr *MockAdminServiceClientMockRecorder) MergeDLQTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) MergeDLQTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeDLQTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).MergeDLQTasks), varargs...) } +// MigrateSchedule mocks base method. +func (m *MockAdminServiceClient) MigrateSchedule(ctx context.Context, in *adminservice.MigrateScheduleRequest, opts ...grpc.CallOption) (*adminservice.MigrateScheduleResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "MigrateSchedule", varargs...) + ret0, _ := ret[0].(*adminservice.MigrateScheduleResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MigrateSchedule indicates an expected call of MigrateSchedule. +func (mr *MockAdminServiceClientMockRecorder) MigrateSchedule(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MigrateSchedule", reflect.TypeOf((*MockAdminServiceClient)(nil).MigrateSchedule), varargs...) +} + // PurgeDLQMessages mocks base method. func (m *MockAdminServiceClient) PurgeDLQMessages(ctx context.Context, in *adminservice.PurgeDLQMessagesRequest, opts ...grpc.CallOption) (*adminservice.PurgeDLQMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -635,16 +737,16 @@ func (m *MockAdminServiceClient) PurgeDLQMessages(ctx context.Context, in *admin } // PurgeDLQMessages indicates an expected call of PurgeDLQMessages. -func (mr *MockAdminServiceClientMockRecorder) PurgeDLQMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) PurgeDLQMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurgeDLQMessages", reflect.TypeOf((*MockAdminServiceClient)(nil).PurgeDLQMessages), varargs...) } // PurgeDLQTasks mocks base method. func (m *MockAdminServiceClient) PurgeDLQTasks(ctx context.Context, in *adminservice.PurgeDLQTasksRequest, opts ...grpc.CallOption) (*adminservice.PurgeDLQTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -655,16 +757,16 @@ func (m *MockAdminServiceClient) PurgeDLQTasks(ctx context.Context, in *adminser } // PurgeDLQTasks indicates an expected call of PurgeDLQTasks. -func (mr *MockAdminServiceClientMockRecorder) PurgeDLQTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) PurgeDLQTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurgeDLQTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).PurgeDLQTasks), varargs...) } // ReapplyEvents mocks base method. func (m *MockAdminServiceClient) ReapplyEvents(ctx context.Context, in *adminservice.ReapplyEventsRequest, opts ...grpc.CallOption) (*adminservice.ReapplyEventsResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -675,16 +777,16 @@ func (m *MockAdminServiceClient) ReapplyEvents(ctx context.Context, in *adminser } // ReapplyEvents indicates an expected call of ReapplyEvents. -func (mr *MockAdminServiceClientMockRecorder) ReapplyEvents(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) ReapplyEvents(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReapplyEvents", reflect.TypeOf((*MockAdminServiceClient)(nil).ReapplyEvents), varargs...) } // RebuildMutableState mocks base method. func (m *MockAdminServiceClient) RebuildMutableState(ctx context.Context, in *adminservice.RebuildMutableStateRequest, opts ...grpc.CallOption) (*adminservice.RebuildMutableStateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -695,16 +797,16 @@ func (m *MockAdminServiceClient) RebuildMutableState(ctx context.Context, in *ad } // RebuildMutableState indicates an expected call of RebuildMutableState. -func (mr *MockAdminServiceClientMockRecorder) RebuildMutableState(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) RebuildMutableState(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebuildMutableState", reflect.TypeOf((*MockAdminServiceClient)(nil).RebuildMutableState), varargs...) } // RefreshWorkflowTasks mocks base method. func (m *MockAdminServiceClient) RefreshWorkflowTasks(ctx context.Context, in *adminservice.RefreshWorkflowTasksRequest, opts ...grpc.CallOption) (*adminservice.RefreshWorkflowTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -715,16 +817,16 @@ func (m *MockAdminServiceClient) RefreshWorkflowTasks(ctx context.Context, in *a } // RefreshWorkflowTasks indicates an expected call of RefreshWorkflowTasks. -func (mr *MockAdminServiceClientMockRecorder) RefreshWorkflowTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) RefreshWorkflowTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshWorkflowTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).RefreshWorkflowTasks), varargs...) } // RemoveRemoteCluster mocks base method. func (m *MockAdminServiceClient) RemoveRemoteCluster(ctx context.Context, in *adminservice.RemoveRemoteClusterRequest, opts ...grpc.CallOption) (*adminservice.RemoveRemoteClusterResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -735,16 +837,16 @@ func (m *MockAdminServiceClient) RemoveRemoteCluster(ctx context.Context, in *ad } // RemoveRemoteCluster indicates an expected call of RemoveRemoteCluster. -func (mr *MockAdminServiceClientMockRecorder) RemoveRemoteCluster(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) RemoveRemoteCluster(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRemoteCluster", reflect.TypeOf((*MockAdminServiceClient)(nil).RemoveRemoteCluster), varargs...) } // RemoveSearchAttributes mocks base method. func (m *MockAdminServiceClient) RemoveSearchAttributes(ctx context.Context, in *adminservice.RemoveSearchAttributesRequest, opts ...grpc.CallOption) (*adminservice.RemoveSearchAttributesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -755,16 +857,16 @@ func (m *MockAdminServiceClient) RemoveSearchAttributes(ctx context.Context, in } // RemoveSearchAttributes indicates an expected call of RemoveSearchAttributes. -func (mr *MockAdminServiceClientMockRecorder) RemoveSearchAttributes(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) RemoveSearchAttributes(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSearchAttributes", reflect.TypeOf((*MockAdminServiceClient)(nil).RemoveSearchAttributes), varargs...) } // RemoveTask mocks base method. func (m *MockAdminServiceClient) RemoveTask(ctx context.Context, in *adminservice.RemoveTaskRequest, opts ...grpc.CallOption) (*adminservice.RemoveTaskResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -775,16 +877,16 @@ func (m *MockAdminServiceClient) RemoveTask(ctx context.Context, in *adminservic } // RemoveTask indicates an expected call of RemoveTask. -func (mr *MockAdminServiceClientMockRecorder) RemoveTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) RemoveTask(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTask", reflect.TypeOf((*MockAdminServiceClient)(nil).RemoveTask), varargs...) } // ResendReplicationTasks mocks base method. func (m *MockAdminServiceClient) ResendReplicationTasks(ctx context.Context, in *adminservice.ResendReplicationTasksRequest, opts ...grpc.CallOption) (*adminservice.ResendReplicationTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -795,16 +897,36 @@ func (m *MockAdminServiceClient) ResendReplicationTasks(ctx context.Context, in } // ResendReplicationTasks indicates an expected call of ResendReplicationTasks. -func (mr *MockAdminServiceClientMockRecorder) ResendReplicationTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) ResendReplicationTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResendReplicationTasks", reflect.TypeOf((*MockAdminServiceClient)(nil).ResendReplicationTasks), varargs...) } +// StartAdminBatchOperation mocks base method. +func (m *MockAdminServiceClient) StartAdminBatchOperation(ctx context.Context, in *adminservice.StartAdminBatchOperationRequest, opts ...grpc.CallOption) (*adminservice.StartAdminBatchOperationResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "StartAdminBatchOperation", varargs...) + ret0, _ := ret[0].(*adminservice.StartAdminBatchOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartAdminBatchOperation indicates an expected call of StartAdminBatchOperation. +func (mr *MockAdminServiceClientMockRecorder) StartAdminBatchOperation(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartAdminBatchOperation", reflect.TypeOf((*MockAdminServiceClient)(nil).StartAdminBatchOperation), varargs...) +} + // StreamWorkflowReplicationMessages mocks base method. func (m *MockAdminServiceClient) StreamWorkflowReplicationMessages(ctx context.Context, opts ...grpc.CallOption) (adminservice.AdminService_StreamWorkflowReplicationMessagesClient, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx} + varargs := []any{ctx} for _, a := range opts { varargs = append(varargs, a) } @@ -815,16 +937,37 @@ func (m *MockAdminServiceClient) StreamWorkflowReplicationMessages(ctx context.C } // StreamWorkflowReplicationMessages indicates an expected call of StreamWorkflowReplicationMessages. -func (mr *MockAdminServiceClientMockRecorder) StreamWorkflowReplicationMessages(ctx interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockAdminServiceClientMockRecorder) StreamWorkflowReplicationMessages(ctx any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx}, opts...) + varargs := append([]any{ctx}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWorkflowReplicationMessages", reflect.TypeOf((*MockAdminServiceClient)(nil).StreamWorkflowReplicationMessages), varargs...) } +// SyncWorkflowState mocks base method. +func (m *MockAdminServiceClient) SyncWorkflowState(ctx context.Context, in *adminservice.SyncWorkflowStateRequest, opts ...grpc.CallOption) (*adminservice.SyncWorkflowStateResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SyncWorkflowState", varargs...) + ret0, _ := ret[0].(*adminservice.SyncWorkflowStateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncWorkflowState indicates an expected call of SyncWorkflowState. +func (mr *MockAdminServiceClientMockRecorder) SyncWorkflowState(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWorkflowState", reflect.TypeOf((*MockAdminServiceClient)(nil).SyncWorkflowState), varargs...) +} + // MockAdminService_StreamWorkflowReplicationMessagesClient is a mock of AdminService_StreamWorkflowReplicationMessagesClient interface. type MockAdminService_StreamWorkflowReplicationMessagesClient struct { ctrl *gomock.Controller recorder *MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder + isgomock struct{} } // MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder is the mock recorder for MockAdminService_StreamWorkflowReplicationMessagesClient. @@ -911,7 +1054,7 @@ func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesClient) RecvMsg(m a } // RecvMsg indicates an expected call of RecvMsg. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder) RecvMsg(m any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesClient)(nil).RecvMsg), m) } @@ -925,7 +1068,7 @@ func (m *MockAdminService_StreamWorkflowReplicationMessagesClient) Send(arg0 *ad } // Send indicates an expected call of Send. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder) Send(arg0 interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder) Send(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesClient)(nil).Send), arg0) } @@ -939,7 +1082,7 @@ func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesClient) SendMsg(m a } // SendMsg indicates an expected call of SendMsg. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder) SendMsg(m interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder) SendMsg(m any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesClient)(nil).SendMsg), m) } @@ -962,6 +1105,7 @@ func (mr *MockAdminService_StreamWorkflowReplicationMessagesClientMockRecorder) type MockAdminServiceServer struct { ctrl *gomock.Controller recorder *MockAdminServiceServerMockRecorder + isgomock struct{} } // MockAdminServiceServerMockRecorder is the mock recorder for MockAdminServiceServer. @@ -991,7 +1135,7 @@ func (m *MockAdminServiceServer) AddOrUpdateRemoteCluster(arg0 context.Context, } // AddOrUpdateRemoteCluster indicates an expected call of AddOrUpdateRemoteCluster. -func (mr *MockAdminServiceServerMockRecorder) AddOrUpdateRemoteCluster(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) AddOrUpdateRemoteCluster(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddOrUpdateRemoteCluster", reflect.TypeOf((*MockAdminServiceServer)(nil).AddOrUpdateRemoteCluster), arg0, arg1) } @@ -1006,7 +1150,7 @@ func (m *MockAdminServiceServer) AddSearchAttributes(arg0 context.Context, arg1 } // AddSearchAttributes indicates an expected call of AddSearchAttributes. -func (mr *MockAdminServiceServerMockRecorder) AddSearchAttributes(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) AddSearchAttributes(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSearchAttributes", reflect.TypeOf((*MockAdminServiceServer)(nil).AddSearchAttributes), arg0, arg1) } @@ -1021,7 +1165,7 @@ func (m *MockAdminServiceServer) AddTasks(arg0 context.Context, arg1 *adminservi } // AddTasks indicates an expected call of AddTasks. -func (mr *MockAdminServiceServerMockRecorder) AddTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) AddTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).AddTasks), arg0, arg1) } @@ -1036,7 +1180,7 @@ func (m *MockAdminServiceServer) CancelDLQJob(arg0 context.Context, arg1 *admins } // CancelDLQJob indicates an expected call of CancelDLQJob. -func (mr *MockAdminServiceServerMockRecorder) CancelDLQJob(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) CancelDLQJob(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelDLQJob", reflect.TypeOf((*MockAdminServiceServer)(nil).CancelDLQJob), arg0, arg1) } @@ -1051,11 +1195,26 @@ func (m *MockAdminServiceServer) CloseShard(arg0 context.Context, arg1 *adminser } // CloseShard indicates an expected call of CloseShard. -func (mr *MockAdminServiceServerMockRecorder) CloseShard(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) CloseShard(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseShard", reflect.TypeOf((*MockAdminServiceServer)(nil).CloseShard), arg0, arg1) } +// DeepHealthCheck mocks base method. +func (m *MockAdminServiceServer) DeepHealthCheck(arg0 context.Context, arg1 *adminservice.DeepHealthCheckRequest) (*adminservice.DeepHealthCheckResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeepHealthCheck", arg0, arg1) + ret0, _ := ret[0].(*adminservice.DeepHealthCheckResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeepHealthCheck indicates an expected call of DeepHealthCheck. +func (mr *MockAdminServiceServerMockRecorder) DeepHealthCheck(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeepHealthCheck", reflect.TypeOf((*MockAdminServiceServer)(nil).DeepHealthCheck), arg0, arg1) +} + // DeleteWorkflowExecution mocks base method. func (m *MockAdminServiceServer) DeleteWorkflowExecution(arg0 context.Context, arg1 *adminservice.DeleteWorkflowExecutionRequest) (*adminservice.DeleteWorkflowExecutionResponse, error) { m.ctrl.T.Helper() @@ -1066,7 +1225,7 @@ func (m *MockAdminServiceServer) DeleteWorkflowExecution(arg0 context.Context, a } // DeleteWorkflowExecution indicates an expected call of DeleteWorkflowExecution. -func (mr *MockAdminServiceServerMockRecorder) DeleteWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) DeleteWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MockAdminServiceServer)(nil).DeleteWorkflowExecution), arg0, arg1) } @@ -1081,7 +1240,7 @@ func (m *MockAdminServiceServer) DescribeCluster(arg0 context.Context, arg1 *adm } // DescribeCluster indicates an expected call of DescribeCluster. -func (mr *MockAdminServiceServerMockRecorder) DescribeCluster(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) DescribeCluster(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeCluster", reflect.TypeOf((*MockAdminServiceServer)(nil).DescribeCluster), arg0, arg1) } @@ -1096,7 +1255,7 @@ func (m *MockAdminServiceServer) DescribeDLQJob(arg0 context.Context, arg1 *admi } // DescribeDLQJob indicates an expected call of DescribeDLQJob. -func (mr *MockAdminServiceServerMockRecorder) DescribeDLQJob(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) DescribeDLQJob(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeDLQJob", reflect.TypeOf((*MockAdminServiceServer)(nil).DescribeDLQJob), arg0, arg1) } @@ -1111,7 +1270,7 @@ func (m *MockAdminServiceServer) DescribeHistoryHost(arg0 context.Context, arg1 } // DescribeHistoryHost indicates an expected call of DescribeHistoryHost. -func (mr *MockAdminServiceServerMockRecorder) DescribeHistoryHost(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) DescribeHistoryHost(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeHistoryHost", reflect.TypeOf((*MockAdminServiceServer)(nil).DescribeHistoryHost), arg0, arg1) } @@ -1126,11 +1285,56 @@ func (m *MockAdminServiceServer) DescribeMutableState(arg0 context.Context, arg1 } // DescribeMutableState indicates an expected call of DescribeMutableState. -func (mr *MockAdminServiceServerMockRecorder) DescribeMutableState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) DescribeMutableState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMutableState", reflect.TypeOf((*MockAdminServiceServer)(nil).DescribeMutableState), arg0, arg1) } +// DescribeTaskQueuePartition mocks base method. +func (m *MockAdminServiceServer) DescribeTaskQueuePartition(arg0 context.Context, arg1 *adminservice.DescribeTaskQueuePartitionRequest) (*adminservice.DescribeTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTaskQueuePartition", arg0, arg1) + ret0, _ := ret[0].(*adminservice.DescribeTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTaskQueuePartition indicates an expected call of DescribeTaskQueuePartition. +func (mr *MockAdminServiceServerMockRecorder) DescribeTaskQueuePartition(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTaskQueuePartition", reflect.TypeOf((*MockAdminServiceServer)(nil).DescribeTaskQueuePartition), arg0, arg1) +} + +// ForceUnloadTaskQueuePartition mocks base method. +func (m *MockAdminServiceServer) ForceUnloadTaskQueuePartition(arg0 context.Context, arg1 *adminservice.ForceUnloadTaskQueuePartitionRequest) (*adminservice.ForceUnloadTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ForceUnloadTaskQueuePartition", arg0, arg1) + ret0, _ := ret[0].(*adminservice.ForceUnloadTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ForceUnloadTaskQueuePartition indicates an expected call of ForceUnloadTaskQueuePartition. +func (mr *MockAdminServiceServerMockRecorder) ForceUnloadTaskQueuePartition(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUnloadTaskQueuePartition", reflect.TypeOf((*MockAdminServiceServer)(nil).ForceUnloadTaskQueuePartition), arg0, arg1) +} + +// GenerateLastHistoryReplicationTasks mocks base method. +func (m *MockAdminServiceServer) GenerateLastHistoryReplicationTasks(arg0 context.Context, arg1 *adminservice.GenerateLastHistoryReplicationTasksRequest) (*adminservice.GenerateLastHistoryReplicationTasksResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GenerateLastHistoryReplicationTasks", arg0, arg1) + ret0, _ := ret[0].(*adminservice.GenerateLastHistoryReplicationTasksResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GenerateLastHistoryReplicationTasks indicates an expected call of GenerateLastHistoryReplicationTasks. +func (mr *MockAdminServiceServerMockRecorder) GenerateLastHistoryReplicationTasks(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateLastHistoryReplicationTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).GenerateLastHistoryReplicationTasks), arg0, arg1) +} + // GetDLQMessages mocks base method. func (m *MockAdminServiceServer) GetDLQMessages(arg0 context.Context, arg1 *adminservice.GetDLQMessagesRequest) (*adminservice.GetDLQMessagesResponse, error) { m.ctrl.T.Helper() @@ -1141,7 +1345,7 @@ func (m *MockAdminServiceServer) GetDLQMessages(arg0 context.Context, arg1 *admi } // GetDLQMessages indicates an expected call of GetDLQMessages. -func (mr *MockAdminServiceServerMockRecorder) GetDLQMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetDLQMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQMessages", reflect.TypeOf((*MockAdminServiceServer)(nil).GetDLQMessages), arg0, arg1) } @@ -1156,7 +1360,7 @@ func (m *MockAdminServiceServer) GetDLQReplicationMessages(arg0 context.Context, } // GetDLQReplicationMessages indicates an expected call of GetDLQReplicationMessages. -func (mr *MockAdminServiceServerMockRecorder) GetDLQReplicationMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetDLQReplicationMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQReplicationMessages", reflect.TypeOf((*MockAdminServiceServer)(nil).GetDLQReplicationMessages), arg0, arg1) } @@ -1171,7 +1375,7 @@ func (m *MockAdminServiceServer) GetDLQTasks(arg0 context.Context, arg1 *adminse } // GetDLQTasks indicates an expected call of GetDLQTasks. -func (mr *MockAdminServiceServerMockRecorder) GetDLQTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetDLQTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).GetDLQTasks), arg0, arg1) } @@ -1186,7 +1390,7 @@ func (m *MockAdminServiceServer) GetNamespace(arg0 context.Context, arg1 *admins } // GetNamespace indicates an expected call of GetNamespace. -func (mr *MockAdminServiceServerMockRecorder) GetNamespace(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetNamespace(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespace", reflect.TypeOf((*MockAdminServiceServer)(nil).GetNamespace), arg0, arg1) } @@ -1201,7 +1405,7 @@ func (m *MockAdminServiceServer) GetNamespaceReplicationMessages(arg0 context.Co } // GetNamespaceReplicationMessages indicates an expected call of GetNamespaceReplicationMessages. -func (mr *MockAdminServiceServerMockRecorder) GetNamespaceReplicationMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetNamespaceReplicationMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNamespaceReplicationMessages", reflect.TypeOf((*MockAdminServiceServer)(nil).GetNamespaceReplicationMessages), arg0, arg1) } @@ -1216,7 +1420,7 @@ func (m *MockAdminServiceServer) GetReplicationMessages(arg0 context.Context, ar } // GetReplicationMessages indicates an expected call of GetReplicationMessages. -func (mr *MockAdminServiceServerMockRecorder) GetReplicationMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetReplicationMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationMessages", reflect.TypeOf((*MockAdminServiceServer)(nil).GetReplicationMessages), arg0, arg1) } @@ -1231,7 +1435,7 @@ func (m *MockAdminServiceServer) GetSearchAttributes(arg0 context.Context, arg1 } // GetSearchAttributes indicates an expected call of GetSearchAttributes. -func (mr *MockAdminServiceServerMockRecorder) GetSearchAttributes(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetSearchAttributes(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSearchAttributes", reflect.TypeOf((*MockAdminServiceServer)(nil).GetSearchAttributes), arg0, arg1) } @@ -1246,7 +1450,7 @@ func (m *MockAdminServiceServer) GetShard(arg0 context.Context, arg1 *adminservi } // GetShard indicates an expected call of GetShard. -func (mr *MockAdminServiceServerMockRecorder) GetShard(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetShard(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShard", reflect.TypeOf((*MockAdminServiceServer)(nil).GetShard), arg0, arg1) } @@ -1261,11 +1465,26 @@ func (m *MockAdminServiceServer) GetTaskQueueTasks(arg0 context.Context, arg1 *a } // GetTaskQueueTasks indicates an expected call of GetTaskQueueTasks. -func (mr *MockAdminServiceServerMockRecorder) GetTaskQueueTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetTaskQueueTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueueTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).GetTaskQueueTasks), arg0, arg1) } +// GetTaskQueueUserData mocks base method. +func (m *MockAdminServiceServer) GetTaskQueueUserData(arg0 context.Context, arg1 *adminservice.GetTaskQueueUserDataRequest) (*adminservice.GetTaskQueueUserDataResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTaskQueueUserData", arg0, arg1) + ret0, _ := ret[0].(*adminservice.GetTaskQueueUserDataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTaskQueueUserData indicates an expected call of GetTaskQueueUserData. +func (mr *MockAdminServiceServerMockRecorder) GetTaskQueueUserData(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueueUserData", reflect.TypeOf((*MockAdminServiceServer)(nil).GetTaskQueueUserData), arg0, arg1) +} + // GetWorkflowExecutionRawHistory mocks base method. func (m *MockAdminServiceServer) GetWorkflowExecutionRawHistory(arg0 context.Context, arg1 *adminservice.GetWorkflowExecutionRawHistoryRequest) (*adminservice.GetWorkflowExecutionRawHistoryResponse, error) { m.ctrl.T.Helper() @@ -1276,7 +1495,7 @@ func (m *MockAdminServiceServer) GetWorkflowExecutionRawHistory(arg0 context.Con } // GetWorkflowExecutionRawHistory indicates an expected call of GetWorkflowExecutionRawHistory. -func (mr *MockAdminServiceServerMockRecorder) GetWorkflowExecutionRawHistory(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetWorkflowExecutionRawHistory(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionRawHistory", reflect.TypeOf((*MockAdminServiceServer)(nil).GetWorkflowExecutionRawHistory), arg0, arg1) } @@ -1291,7 +1510,7 @@ func (m *MockAdminServiceServer) GetWorkflowExecutionRawHistoryV2(arg0 context.C } // GetWorkflowExecutionRawHistoryV2 indicates an expected call of GetWorkflowExecutionRawHistoryV2. -func (mr *MockAdminServiceServerMockRecorder) GetWorkflowExecutionRawHistoryV2(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) GetWorkflowExecutionRawHistoryV2(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionRawHistoryV2", reflect.TypeOf((*MockAdminServiceServer)(nil).GetWorkflowExecutionRawHistoryV2), arg0, arg1) } @@ -1306,7 +1525,7 @@ func (m *MockAdminServiceServer) ImportWorkflowExecution(arg0 context.Context, a } // ImportWorkflowExecution indicates an expected call of ImportWorkflowExecution. -func (mr *MockAdminServiceServerMockRecorder) ImportWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) ImportWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportWorkflowExecution", reflect.TypeOf((*MockAdminServiceServer)(nil).ImportWorkflowExecution), arg0, arg1) } @@ -1321,7 +1540,7 @@ func (m *MockAdminServiceServer) ListClusterMembers(arg0 context.Context, arg1 * } // ListClusterMembers indicates an expected call of ListClusterMembers. -func (mr *MockAdminServiceServerMockRecorder) ListClusterMembers(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) ListClusterMembers(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusterMembers", reflect.TypeOf((*MockAdminServiceServer)(nil).ListClusterMembers), arg0, arg1) } @@ -1336,7 +1555,7 @@ func (m *MockAdminServiceServer) ListClusters(arg0 context.Context, arg1 *admins } // ListClusters indicates an expected call of ListClusters. -func (mr *MockAdminServiceServerMockRecorder) ListClusters(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) ListClusters(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusters", reflect.TypeOf((*MockAdminServiceServer)(nil).ListClusters), arg0, arg1) } @@ -1351,7 +1570,7 @@ func (m *MockAdminServiceServer) ListHistoryTasks(arg0 context.Context, arg1 *ad } // ListHistoryTasks indicates an expected call of ListHistoryTasks. -func (mr *MockAdminServiceServerMockRecorder) ListHistoryTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) ListHistoryTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListHistoryTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).ListHistoryTasks), arg0, arg1) } @@ -1366,7 +1585,7 @@ func (m *MockAdminServiceServer) ListQueues(arg0 context.Context, arg1 *adminser } // ListQueues indicates an expected call of ListQueues. -func (mr *MockAdminServiceServerMockRecorder) ListQueues(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) ListQueues(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListQueues", reflect.TypeOf((*MockAdminServiceServer)(nil).ListQueues), arg0, arg1) } @@ -1381,7 +1600,7 @@ func (m *MockAdminServiceServer) MergeDLQMessages(arg0 context.Context, arg1 *ad } // MergeDLQMessages indicates an expected call of MergeDLQMessages. -func (mr *MockAdminServiceServerMockRecorder) MergeDLQMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) MergeDLQMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeDLQMessages", reflect.TypeOf((*MockAdminServiceServer)(nil).MergeDLQMessages), arg0, arg1) } @@ -1396,11 +1615,26 @@ func (m *MockAdminServiceServer) MergeDLQTasks(arg0 context.Context, arg1 *admin } // MergeDLQTasks indicates an expected call of MergeDLQTasks. -func (mr *MockAdminServiceServerMockRecorder) MergeDLQTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) MergeDLQTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeDLQTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).MergeDLQTasks), arg0, arg1) } +// MigrateSchedule mocks base method. +func (m *MockAdminServiceServer) MigrateSchedule(arg0 context.Context, arg1 *adminservice.MigrateScheduleRequest) (*adminservice.MigrateScheduleResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MigrateSchedule", arg0, arg1) + ret0, _ := ret[0].(*adminservice.MigrateScheduleResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MigrateSchedule indicates an expected call of MigrateSchedule. +func (mr *MockAdminServiceServerMockRecorder) MigrateSchedule(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MigrateSchedule", reflect.TypeOf((*MockAdminServiceServer)(nil).MigrateSchedule), arg0, arg1) +} + // PurgeDLQMessages mocks base method. func (m *MockAdminServiceServer) PurgeDLQMessages(arg0 context.Context, arg1 *adminservice.PurgeDLQMessagesRequest) (*adminservice.PurgeDLQMessagesResponse, error) { m.ctrl.T.Helper() @@ -1411,7 +1645,7 @@ func (m *MockAdminServiceServer) PurgeDLQMessages(arg0 context.Context, arg1 *ad } // PurgeDLQMessages indicates an expected call of PurgeDLQMessages. -func (mr *MockAdminServiceServerMockRecorder) PurgeDLQMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) PurgeDLQMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurgeDLQMessages", reflect.TypeOf((*MockAdminServiceServer)(nil).PurgeDLQMessages), arg0, arg1) } @@ -1426,7 +1660,7 @@ func (m *MockAdminServiceServer) PurgeDLQTasks(arg0 context.Context, arg1 *admin } // PurgeDLQTasks indicates an expected call of PurgeDLQTasks. -func (mr *MockAdminServiceServerMockRecorder) PurgeDLQTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) PurgeDLQTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurgeDLQTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).PurgeDLQTasks), arg0, arg1) } @@ -1441,7 +1675,7 @@ func (m *MockAdminServiceServer) ReapplyEvents(arg0 context.Context, arg1 *admin } // ReapplyEvents indicates an expected call of ReapplyEvents. -func (mr *MockAdminServiceServerMockRecorder) ReapplyEvents(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) ReapplyEvents(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReapplyEvents", reflect.TypeOf((*MockAdminServiceServer)(nil).ReapplyEvents), arg0, arg1) } @@ -1456,7 +1690,7 @@ func (m *MockAdminServiceServer) RebuildMutableState(arg0 context.Context, arg1 } // RebuildMutableState indicates an expected call of RebuildMutableState. -func (mr *MockAdminServiceServerMockRecorder) RebuildMutableState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) RebuildMutableState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebuildMutableState", reflect.TypeOf((*MockAdminServiceServer)(nil).RebuildMutableState), arg0, arg1) } @@ -1471,7 +1705,7 @@ func (m *MockAdminServiceServer) RefreshWorkflowTasks(arg0 context.Context, arg1 } // RefreshWorkflowTasks indicates an expected call of RefreshWorkflowTasks. -func (mr *MockAdminServiceServerMockRecorder) RefreshWorkflowTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) RefreshWorkflowTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshWorkflowTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).RefreshWorkflowTasks), arg0, arg1) } @@ -1486,7 +1720,7 @@ func (m *MockAdminServiceServer) RemoveRemoteCluster(arg0 context.Context, arg1 } // RemoveRemoteCluster indicates an expected call of RemoveRemoteCluster. -func (mr *MockAdminServiceServerMockRecorder) RemoveRemoteCluster(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) RemoveRemoteCluster(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRemoteCluster", reflect.TypeOf((*MockAdminServiceServer)(nil).RemoveRemoteCluster), arg0, arg1) } @@ -1501,7 +1735,7 @@ func (m *MockAdminServiceServer) RemoveSearchAttributes(arg0 context.Context, ar } // RemoveSearchAttributes indicates an expected call of RemoveSearchAttributes. -func (mr *MockAdminServiceServerMockRecorder) RemoveSearchAttributes(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) RemoveSearchAttributes(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSearchAttributes", reflect.TypeOf((*MockAdminServiceServer)(nil).RemoveSearchAttributes), arg0, arg1) } @@ -1516,7 +1750,7 @@ func (m *MockAdminServiceServer) RemoveTask(arg0 context.Context, arg1 *adminser } // RemoveTask indicates an expected call of RemoveTask. -func (mr *MockAdminServiceServerMockRecorder) RemoveTask(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) RemoveTask(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTask", reflect.TypeOf((*MockAdminServiceServer)(nil).RemoveTask), arg0, arg1) } @@ -1531,11 +1765,26 @@ func (m *MockAdminServiceServer) ResendReplicationTasks(arg0 context.Context, ar } // ResendReplicationTasks indicates an expected call of ResendReplicationTasks. -func (mr *MockAdminServiceServerMockRecorder) ResendReplicationTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) ResendReplicationTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResendReplicationTasks", reflect.TypeOf((*MockAdminServiceServer)(nil).ResendReplicationTasks), arg0, arg1) } +// StartAdminBatchOperation mocks base method. +func (m *MockAdminServiceServer) StartAdminBatchOperation(arg0 context.Context, arg1 *adminservice.StartAdminBatchOperationRequest) (*adminservice.StartAdminBatchOperationResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartAdminBatchOperation", arg0, arg1) + ret0, _ := ret[0].(*adminservice.StartAdminBatchOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartAdminBatchOperation indicates an expected call of StartAdminBatchOperation. +func (mr *MockAdminServiceServerMockRecorder) StartAdminBatchOperation(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartAdminBatchOperation", reflect.TypeOf((*MockAdminServiceServer)(nil).StartAdminBatchOperation), arg0, arg1) +} + // StreamWorkflowReplicationMessages mocks base method. func (m *MockAdminServiceServer) StreamWorkflowReplicationMessages(arg0 adminservice.AdminService_StreamWorkflowReplicationMessagesServer) error { m.ctrl.T.Helper() @@ -1545,11 +1794,26 @@ func (m *MockAdminServiceServer) StreamWorkflowReplicationMessages(arg0 adminser } // StreamWorkflowReplicationMessages indicates an expected call of StreamWorkflowReplicationMessages. -func (mr *MockAdminServiceServerMockRecorder) StreamWorkflowReplicationMessages(arg0 interface{}) *gomock.Call { +func (mr *MockAdminServiceServerMockRecorder) StreamWorkflowReplicationMessages(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWorkflowReplicationMessages", reflect.TypeOf((*MockAdminServiceServer)(nil).StreamWorkflowReplicationMessages), arg0) } +// SyncWorkflowState mocks base method. +func (m *MockAdminServiceServer) SyncWorkflowState(arg0 context.Context, arg1 *adminservice.SyncWorkflowStateRequest) (*adminservice.SyncWorkflowStateResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncWorkflowState", arg0, arg1) + ret0, _ := ret[0].(*adminservice.SyncWorkflowStateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncWorkflowState indicates an expected call of SyncWorkflowState. +func (mr *MockAdminServiceServerMockRecorder) SyncWorkflowState(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWorkflowState", reflect.TypeOf((*MockAdminServiceServer)(nil).SyncWorkflowState), arg0, arg1) +} + // mustEmbedUnimplementedAdminServiceServer mocks base method. func (m *MockAdminServiceServer) mustEmbedUnimplementedAdminServiceServer() { m.ctrl.T.Helper() @@ -1566,6 +1830,7 @@ func (mr *MockAdminServiceServerMockRecorder) mustEmbedUnimplementedAdminService type MockUnsafeAdminServiceServer struct { ctrl *gomock.Controller recorder *MockUnsafeAdminServiceServerMockRecorder + isgomock struct{} } // MockUnsafeAdminServiceServerMockRecorder is the mock recorder for MockUnsafeAdminServiceServer. @@ -1601,6 +1866,7 @@ func (mr *MockUnsafeAdminServiceServerMockRecorder) mustEmbedUnimplementedAdminS type MockAdminService_StreamWorkflowReplicationMessagesServer struct { ctrl *gomock.Controller recorder *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder + isgomock struct{} } // MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder is the mock recorder for MockAdminService_StreamWorkflowReplicationMessagesServer. @@ -1658,7 +1924,7 @@ func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesServer) RecvMsg(m a } // RecvMsg indicates an expected call of RecvMsg. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) RecvMsg(m any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesServer)(nil).RecvMsg), m) } @@ -1672,7 +1938,7 @@ func (m *MockAdminService_StreamWorkflowReplicationMessagesServer) Send(arg0 *ad } // Send indicates an expected call of Send. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) Send(arg0 interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) Send(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesServer)(nil).Send), arg0) } @@ -1686,7 +1952,7 @@ func (m *MockAdminService_StreamWorkflowReplicationMessagesServer) SendHeader(ar } // SendHeader indicates an expected call of SendHeader. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) SendHeader(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesServer)(nil).SendHeader), arg0) } @@ -1700,7 +1966,7 @@ func (m_2 *MockAdminService_StreamWorkflowReplicationMessagesServer) SendMsg(m a } // SendMsg indicates an expected call of SendMsg. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) SendMsg(m interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) SendMsg(m any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesServer)(nil).SendMsg), m) } @@ -1714,7 +1980,7 @@ func (m *MockAdminService_StreamWorkflowReplicationMessagesServer) SetHeader(arg } // SetHeader indicates an expected call of SetHeader. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) SetHeader(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesServer)(nil).SetHeader), arg0) } @@ -1726,7 +1992,7 @@ func (m *MockAdminService_StreamWorkflowReplicationMessagesServer) SetTrailer(ar } // SetTrailer indicates an expected call of SetTrailer. -func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { +func (mr *MockAdminService_StreamWorkflowReplicationMessagesServerMockRecorder) SetTrailer(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockAdminService_StreamWorkflowReplicationMessagesServer)(nil).SetTrailer), arg0) } diff --git a/api/archiver/v1/message.go-helpers.pb.go b/api/archiver/v1/message.go-helpers.pb.go index 900a268ca27..9d7ff824222 100644 --- a/api/archiver/v1/message.go-helpers.pb.go +++ b/api/archiver/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package archiver diff --git a/api/archiver/v1/message.pb.go b/api/archiver/v1/message.pb.go index b487c531f9e..8bd8356939a 100644 --- a/api/archiver/v1/message.pb.go +++ b/api/archiver/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,12 +9,14 @@ package archiver import ( reflect "reflect" sync "sync" + unsafe "unsafe" v12 "go.temporal.io/api/common/v1" v11 "go.temporal.io/api/enums/v1" v1 "go.temporal.io/api/history/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) @@ -48,29 +28,26 @@ const ( ) type HistoryBlobHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,4,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - IsLast bool `protobuf:"varint,5,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` - FirstFailoverVersion int64 `protobuf:"varint,6,opt,name=first_failover_version,json=firstFailoverVersion,proto3" json:"first_failover_version,omitempty"` - LastFailoverVersion int64 `protobuf:"varint,7,opt,name=last_failover_version,json=lastFailoverVersion,proto3" json:"last_failover_version,omitempty"` - FirstEventId int64 `protobuf:"varint,8,opt,name=first_event_id,json=firstEventId,proto3" json:"first_event_id,omitempty"` - LastEventId int64 `protobuf:"varint,9,opt,name=last_event_id,json=lastEventId,proto3" json:"last_event_id,omitempty"` - EventCount int64 `protobuf:"varint,10,opt,name=event_count,json=eventCount,proto3" json:"event_count,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,4,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + IsLast bool `protobuf:"varint,5,opt,name=is_last,json=isLast,proto3" json:"is_last,omitempty"` + FirstFailoverVersion int64 `protobuf:"varint,6,opt,name=first_failover_version,json=firstFailoverVersion,proto3" json:"first_failover_version,omitempty"` + LastFailoverVersion int64 `protobuf:"varint,7,opt,name=last_failover_version,json=lastFailoverVersion,proto3" json:"last_failover_version,omitempty"` + FirstEventId int64 `protobuf:"varint,8,opt,name=first_event_id,json=firstEventId,proto3" json:"first_event_id,omitempty"` + LastEventId int64 `protobuf:"varint,9,opt,name=last_event_id,json=lastEventId,proto3" json:"last_event_id,omitempty"` + EventCount int64 `protobuf:"varint,10,opt,name=event_count,json=eventCount,proto3" json:"event_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryBlobHeader) Reset() { *x = HistoryBlobHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryBlobHeader) String() string { @@ -81,7 +58,7 @@ func (*HistoryBlobHeader) ProtoMessage() {} func (x *HistoryBlobHeader) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -167,21 +144,18 @@ func (x *HistoryBlobHeader) GetEventCount() int64 { } type HistoryBlob struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Header *HistoryBlobHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` + Body []*v1.History `protobuf:"bytes,2,rep,name=body,proto3" json:"body,omitempty"` unknownFields protoimpl.UnknownFields - - Header *HistoryBlobHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"` - Body []*v1.History `protobuf:"bytes,2,rep,name=body,proto3" json:"body,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HistoryBlob) Reset() { *x = HistoryBlob{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryBlob) String() string { @@ -192,7 +166,7 @@ func (*HistoryBlob) ProtoMessage() {} func (x *HistoryBlob) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -223,10 +197,7 @@ func (x *HistoryBlob) GetBody() []*v1.History { // VisibilityRecord is a single workflow visibility record in archive. type VisibilityRecord struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` @@ -238,17 +209,18 @@ type VisibilityRecord struct { Status v11.WorkflowExecutionStatus `protobuf:"varint,9,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"status,omitempty"` HistoryLength int64 `protobuf:"varint,10,opt,name=history_length,json=historyLength,proto3" json:"history_length,omitempty"` Memo *v12.Memo `protobuf:"bytes,11,opt,name=memo,proto3" json:"memo,omitempty"` - SearchAttributes map[string]string `protobuf:"bytes,12,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SearchAttributes map[string]string `protobuf:"bytes,12,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` HistoryArchivalUri string `protobuf:"bytes,13,opt,name=history_archival_uri,json=historyArchivalUri,proto3" json:"history_archival_uri,omitempty"` + ExecutionDuration *durationpb.Duration `protobuf:"bytes,14,opt,name=execution_duration,json=executionDuration,proto3" json:"execution_duration,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *VisibilityRecord) Reset() { *x = VisibilityRecord{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VisibilityRecord) String() string { @@ -259,7 +231,7 @@ func (*VisibilityRecord) ProtoMessage() {} func (x *VisibilityRecord) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_archiver_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -365,124 +337,72 @@ func (x *VisibilityRecord) GetHistoryArchivalUri() string { return "" } +func (x *VisibilityRecord) GetExecutionDuration() *durationpb.Duration { + if x != nil { + return x.ExecutionDuration + } + return nil +} + var File_temporal_server_api_archiver_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_archiver_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, - 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, - 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, 0x03, 0x0a, 0x11, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x42, 0x6c, 0x6f, 0x62, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, - 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x69, 0x73, 0x5f, - 0x6c, 0x61, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x61, 0x73, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x16, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x66, 0x61, - 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x14, 0x66, 0x69, 0x72, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, - 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x36, 0x0a, 0x15, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x61, - 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x28, 0x0a, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x66, 0x69, 0x72, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6c, 0x61, - 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x97, 0x01, 0x0a, 0x0b, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x4e, - 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x6c, 0x6f, 0x62, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x06, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x42, 0x02, 0x68, 0x00, 0x22, - 0xbc, 0x06, 0x0a, 0x10, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, - 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x3d, 0x0a, 0x0a, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x6c, - 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x04, 0x6d, 0x65, 0x6d, - 0x6f, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4d, - 0x65, 0x6d, 0x6f, 0x52, 0x04, 0x6d, 0x65, 0x6d, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x78, 0x0a, 0x11, - 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x72, 0x63, 0x68, 0x69, - 0x76, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x65, 0x61, 0x72, - 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, - 0x0a, 0x14, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, - 0x6c, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x55, 0x72, 0x69, 0x42, 0x02, 0x68, - 0x00, 0x1a, 0x4b, 0x0a, 0x15, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x6f, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x2f, 0x76, 0x31, - 0x3b, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_archiver_v1_message_proto_rawDesc = "" + + "\n" + + "-temporal/server/api/archiver/v1/message.proto\x12\x1ftemporal.server.api.archiver.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a$temporal/api/enums/v1/workflow.proto\x1a%temporal/api/history/v1/message.proto\"\xfa\x02\n" + + "\x11HistoryBlobHeader\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x03 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x04 \x01(\tR\x05runId\x12\x17\n" + + "\ais_last\x18\x05 \x01(\bR\x06isLast\x124\n" + + "\x16first_failover_version\x18\x06 \x01(\x03R\x14firstFailoverVersion\x122\n" + + "\x15last_failover_version\x18\a \x01(\x03R\x13lastFailoverVersion\x12$\n" + + "\x0efirst_event_id\x18\b \x01(\x03R\ffirstEventId\x12\"\n" + + "\rlast_event_id\x18\t \x01(\x03R\vlastEventId\x12\x1f\n" + + "\vevent_count\x18\n" + + " \x01(\x03R\n" + + "eventCount\"\x8f\x01\n" + + "\vHistoryBlob\x12J\n" + + "\x06header\x18\x01 \x01(\v22.temporal.server.api.archiver.v1.HistoryBlobHeaderR\x06header\x124\n" + + "\x04body\x18\x02 \x03(\v2 .temporal.api.history.v1.HistoryR\x04body\"\xca\x06\n" + + "\x10VisibilityRecord\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1c\n" + + "\tnamespace\x18\x02 \x01(\tR\tnamespace\x12\x1f\n" + + "\vworkflow_id\x18\x03 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x04 \x01(\tR\x05runId\x12,\n" + + "\x12workflow_type_name\x18\x05 \x01(\tR\x10workflowTypeName\x129\n" + + "\n" + + "start_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x12A\n" + + "\x0eexecution_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\rexecutionTime\x129\n" + + "\n" + + "close_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTime\x12F\n" + + "\x06status\x18\t \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x06status\x12%\n" + + "\x0ehistory_length\x18\n" + + " \x01(\x03R\rhistoryLength\x120\n" + + "\x04memo\x18\v \x01(\v2\x1c.temporal.api.common.v1.MemoR\x04memo\x12t\n" + + "\x11search_attributes\x18\f \x03(\v2G.temporal.server.api.archiver.v1.VisibilityRecord.SearchAttributesEntryR\x10searchAttributes\x120\n" + + "\x14history_archival_uri\x18\r \x01(\tR\x12historyArchivalUri\x12H\n" + + "\x12execution_duration\x18\x0e \x01(\v2\x19.google.protobuf.DurationR\x11executionDuration\x1aC\n" + + "\x15SearchAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B0Z.go.temporal.io/server/api/archiver/v1;archiverb\x06proto3" var ( file_temporal_server_api_archiver_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_archiver_v1_message_proto_rawDescData = file_temporal_server_api_archiver_v1_message_proto_rawDesc + file_temporal_server_api_archiver_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_archiver_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_archiver_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_archiver_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_archiver_v1_message_proto_rawDescData) + file_temporal_server_api_archiver_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_archiver_v1_message_proto_rawDesc), len(file_temporal_server_api_archiver_v1_message_proto_rawDesc))) }) return file_temporal_server_api_archiver_v1_message_proto_rawDescData } var file_temporal_server_api_archiver_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_temporal_server_api_archiver_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_archiver_v1_message_proto_goTypes = []any{ (*HistoryBlobHeader)(nil), // 0: temporal.server.api.archiver.v1.HistoryBlobHeader (*HistoryBlob)(nil), // 1: temporal.server.api.archiver.v1.HistoryBlob (*VisibilityRecord)(nil), // 2: temporal.server.api.archiver.v1.VisibilityRecord @@ -491,6 +411,7 @@ var file_temporal_server_api_archiver_v1_message_proto_goTypes = []interface{}{ (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp (v11.WorkflowExecutionStatus)(0), // 6: temporal.api.enums.v1.WorkflowExecutionStatus (*v12.Memo)(nil), // 7: temporal.api.common.v1.Memo + (*durationpb.Duration)(nil), // 8: google.protobuf.Duration } var file_temporal_server_api_archiver_v1_message_proto_depIdxs = []int32{ 0, // 0: temporal.server.api.archiver.v1.HistoryBlob.header:type_name -> temporal.server.api.archiver.v1.HistoryBlobHeader @@ -501,11 +422,12 @@ var file_temporal_server_api_archiver_v1_message_proto_depIdxs = []int32{ 6, // 5: temporal.server.api.archiver.v1.VisibilityRecord.status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus 7, // 6: temporal.server.api.archiver.v1.VisibilityRecord.memo:type_name -> temporal.api.common.v1.Memo 3, // 7: temporal.server.api.archiver.v1.VisibilityRecord.search_attributes:type_name -> temporal.server.api.archiver.v1.VisibilityRecord.SearchAttributesEntry - 8, // [8:8] is the sub-list for method output_type - 8, // [8:8] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name + 8, // 8: temporal.server.api.archiver.v1.VisibilityRecord.execution_duration:type_name -> google.protobuf.Duration + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_temporal_server_api_archiver_v1_message_proto_init() } @@ -513,49 +435,11 @@ func file_temporal_server_api_archiver_v1_message_proto_init() { if File_temporal_server_api_archiver_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_archiver_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryBlobHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_archiver_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryBlob); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_archiver_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VisibilityRecord); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_archiver_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_archiver_v1_message_proto_rawDesc), len(file_temporal_server_api_archiver_v1_message_proto_rawDesc)), NumEnums: 0, NumMessages: 4, NumExtensions: 0, @@ -566,7 +450,6 @@ func file_temporal_server_api_archiver_v1_message_proto_init() { MessageInfos: file_temporal_server_api_archiver_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_archiver_v1_message_proto = out.File - file_temporal_server_api_archiver_v1_message_proto_rawDesc = nil file_temporal_server_api_archiver_v1_message_proto_goTypes = nil file_temporal_server_api_archiver_v1_message_proto_depIdxs = nil } diff --git a/api/batch/v1/request_response.go-helpers.pb.go b/api/batch/v1/request_response.go-helpers.pb.go new file mode 100644 index 00000000000..95609b8dcff --- /dev/null +++ b/api/batch/v1/request_response.go-helpers.pb.go @@ -0,0 +1,43 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package batch + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type BatchOperationInput to the protobuf v3 wire format +func (val *BatchOperationInput) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type BatchOperationInput from the protobuf v3 wire format +func (val *BatchOperationInput) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *BatchOperationInput) Size() int { + return proto.Size(val) +} + +// Equal returns whether two BatchOperationInput values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *BatchOperationInput) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *BatchOperationInput + switch t := that.(type) { + case *BatchOperationInput: + that1 = t + case BatchOperationInput: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/batch/v1/request_response.pb.go b/api/batch/v1/request_response.pb.go new file mode 100644 index 00000000000..8856ee46919 --- /dev/null +++ b/api/batch/v1/request_response.pb.go @@ -0,0 +1,204 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/batch/v1/request_response.proto + +package batch + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/enums/v1" + v11 "go.temporal.io/api/workflowservice/v1" + v12 "go.temporal.io/server/api/adminservice/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type BatchOperationInput struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Concurrency int64 `protobuf:"varint,2,opt,name=concurrency,proto3" json:"concurrency,omitempty"` + AttemptsOnRetryableError int64 `protobuf:"varint,3,opt,name=attempts_on_retryable_error,json=attemptsOnRetryableError,proto3" json:"attempts_on_retryable_error,omitempty"` + ActivityHeartbeatTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=activity_heartbeat_timeout,json=activityHeartbeatTimeout,proto3" json:"activity_heartbeat_timeout,omitempty"` + NonRetryableErrors []string `protobuf:"bytes,5,rep,name=non_retryable_errors,json=nonRetryableErrors,proto3" json:"non_retryable_errors,omitempty"` + // Only needed if StartBatchOperationRequest request is set. + BatchType v1.BatchOperationType `protobuf:"varint,6,opt,name=batch_type,json=batchType,proto3,enum=temporal.api.enums.v1.BatchOperationType" json:"batch_type,omitempty"` + // The request to start the batch operation. + // Mutually exclusive with StartAdminBatchOperationRequest admin_request. + Request *v11.StartBatchOperationRequest `protobuf:"bytes,7,opt,name=request,proto3" json:"request,omitempty"` + // The request to start an admin batch operation. + // Mutually exclusive with StartBatchOperationRequest request. + AdminRequest *v12.StartAdminBatchOperationRequest `protobuf:"bytes,8,opt,name=admin_request,json=adminRequest,proto3" json:"admin_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BatchOperationInput) Reset() { + *x = BatchOperationInput{} + mi := &file_temporal_server_api_batch_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BatchOperationInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchOperationInput) ProtoMessage() {} + +func (x *BatchOperationInput) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_batch_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchOperationInput.ProtoReflect.Descriptor instead. +func (*BatchOperationInput) Descriptor() ([]byte, []int) { + return file_temporal_server_api_batch_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *BatchOperationInput) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *BatchOperationInput) GetConcurrency() int64 { + if x != nil { + return x.Concurrency + } + return 0 +} + +func (x *BatchOperationInput) GetAttemptsOnRetryableError() int64 { + if x != nil { + return x.AttemptsOnRetryableError + } + return 0 +} + +func (x *BatchOperationInput) GetActivityHeartbeatTimeout() *durationpb.Duration { + if x != nil { + return x.ActivityHeartbeatTimeout + } + return nil +} + +func (x *BatchOperationInput) GetNonRetryableErrors() []string { + if x != nil { + return x.NonRetryableErrors + } + return nil +} + +func (x *BatchOperationInput) GetBatchType() v1.BatchOperationType { + if x != nil { + return x.BatchType + } + return v1.BatchOperationType(0) +} + +func (x *BatchOperationInput) GetRequest() *v11.StartBatchOperationRequest { + if x != nil { + return x.Request + } + return nil +} + +func (x *BatchOperationInput) GetAdminRequest() *v12.StartAdminBatchOperationRequest { + if x != nil { + return x.AdminRequest + } + return nil +} + +var File_temporal_server_api_batch_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_api_batch_v1_request_response_proto_rawDesc = "" + + "\n" + + "3temporal/server/api/batch/v1/request_response.proto\x12\x1ctemporal.server.api.batch.v1\x1a\x1egoogle/protobuf/duration.proto\x1a+temporal/api/enums/v1/batch_operation.proto\x1a6temporal/api/workflowservice/v1/request_response.proto\x1a:temporal/server/api/adminservice/v1/request_response.proto\"\xb0\x04\n" + + "\x13BatchOperationInput\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12 \n" + + "\vconcurrency\x18\x02 \x01(\x03R\vconcurrency\x12=\n" + + "\x1battempts_on_retryable_error\x18\x03 \x01(\x03R\x18attemptsOnRetryableError\x12W\n" + + "\x1aactivity_heartbeat_timeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\x18activityHeartbeatTimeout\x120\n" + + "\x14non_retryable_errors\x18\x05 \x03(\tR\x12nonRetryableErrors\x12H\n" + + "\n" + + "batch_type\x18\x06 \x01(\x0e2).temporal.api.enums.v1.BatchOperationTypeR\tbatchType\x12U\n" + + "\arequest\x18\a \x01(\v2;.temporal.api.workflowservice.v1.StartBatchOperationRequestR\arequest\x12i\n" + + "\radmin_request\x18\b \x01(\v2D.temporal.server.api.adminservice.v1.StartAdminBatchOperationRequestR\fadminRequestB*Z(go.temporal.io/server/api/batch/v1;batchb\x06proto3" + +var ( + file_temporal_server_api_batch_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_api_batch_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_api_batch_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_api_batch_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_api_batch_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_batch_v1_request_response_proto_rawDesc), len(file_temporal_server_api_batch_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_api_batch_v1_request_response_proto_rawDescData +} + +var file_temporal_server_api_batch_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_temporal_server_api_batch_v1_request_response_proto_goTypes = []any{ + (*BatchOperationInput)(nil), // 0: temporal.server.api.batch.v1.BatchOperationInput + (*durationpb.Duration)(nil), // 1: google.protobuf.Duration + (v1.BatchOperationType)(0), // 2: temporal.api.enums.v1.BatchOperationType + (*v11.StartBatchOperationRequest)(nil), // 3: temporal.api.workflowservice.v1.StartBatchOperationRequest + (*v12.StartAdminBatchOperationRequest)(nil), // 4: temporal.server.api.adminservice.v1.StartAdminBatchOperationRequest +} +var file_temporal_server_api_batch_v1_request_response_proto_depIdxs = []int32{ + 1, // 0: temporal.server.api.batch.v1.BatchOperationInput.activity_heartbeat_timeout:type_name -> google.protobuf.Duration + 2, // 1: temporal.server.api.batch.v1.BatchOperationInput.batch_type:type_name -> temporal.api.enums.v1.BatchOperationType + 3, // 2: temporal.server.api.batch.v1.BatchOperationInput.request:type_name -> temporal.api.workflowservice.v1.StartBatchOperationRequest + 4, // 3: temporal.server.api.batch.v1.BatchOperationInput.admin_request:type_name -> temporal.server.api.adminservice.v1.StartAdminBatchOperationRequest + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_batch_v1_request_response_proto_init() } +func file_temporal_server_api_batch_v1_request_response_proto_init() { + if File_temporal_server_api_batch_v1_request_response_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_batch_v1_request_response_proto_rawDesc), len(file_temporal_server_api_batch_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_batch_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_api_batch_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_api_batch_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_api_batch_v1_request_response_proto = out.File + file_temporal_server_api_batch_v1_request_response_proto_goTypes = nil + file_temporal_server_api_batch_v1_request_response_proto_depIdxs = nil +} diff --git a/api/chasm/v1/message.go-helpers.pb.go b/api/chasm/v1/message.go-helpers.pb.go new file mode 100644 index 00000000000..b63c13f38a1 --- /dev/null +++ b/api/chasm/v1/message.go-helpers.pb.go @@ -0,0 +1,43 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package chasm + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type VisibilityExecutionInfo to the protobuf v3 wire format +func (val *VisibilityExecutionInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type VisibilityExecutionInfo from the protobuf v3 wire format +func (val *VisibilityExecutionInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *VisibilityExecutionInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two VisibilityExecutionInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *VisibilityExecutionInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *VisibilityExecutionInfo + switch t := that.(type) { + case *VisibilityExecutionInfo: + that1 = t + case VisibilityExecutionInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/chasm/v1/message.pb.go b/api/chasm/v1/message.pb.go new file mode 100644 index 00000000000..6723380a971 --- /dev/null +++ b/api/chasm/v1/message.pb.go @@ -0,0 +1,230 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/chasm/v1/message.proto + +package chasm + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/common/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type VisibilityExecutionInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + BusinessId string `protobuf:"bytes,1,opt,name=business_id,json=businessId,proto3" json:"business_id,omitempty"` + RunId string `protobuf:"bytes,2,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + CloseTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"` + HistoryLength int64 `protobuf:"varint,5,opt,name=history_length,json=historyLength,proto3" json:"history_length,omitempty"` + HistorySizeBytes int64 `protobuf:"varint,6,opt,name=history_size_bytes,json=historySizeBytes,proto3" json:"history_size_bytes,omitempty"` + StateTransitionCount int64 `protobuf:"varint,7,opt,name=state_transition_count,json=stateTransitionCount,proto3" json:"state_transition_count,omitempty"` + ChasmSearchAttributes *v1.SearchAttributes `protobuf:"bytes,8,opt,name=chasm_search_attributes,json=chasmSearchAttributes,proto3" json:"chasm_search_attributes,omitempty"` + CustomSearchAttributes *v1.SearchAttributes `protobuf:"bytes,9,opt,name=custom_search_attributes,json=customSearchAttributes,proto3" json:"custom_search_attributes,omitempty"` + Memo *v1.Memo `protobuf:"bytes,10,opt,name=memo,proto3" json:"memo,omitempty"` + ChasmMemo *v1.Payload `protobuf:"bytes,11,opt,name=chasm_memo,json=chasmMemo,proto3" json:"chasm_memo,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VisibilityExecutionInfo) Reset() { + *x = VisibilityExecutionInfo{} + mi := &file_temporal_server_api_chasm_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VisibilityExecutionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VisibilityExecutionInfo) ProtoMessage() {} + +func (x *VisibilityExecutionInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_chasm_v1_message_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VisibilityExecutionInfo.ProtoReflect.Descriptor instead. +func (*VisibilityExecutionInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_chasm_v1_message_proto_rawDescGZIP(), []int{0} +} + +func (x *VisibilityExecutionInfo) GetBusinessId() string { + if x != nil { + return x.BusinessId + } + return "" +} + +func (x *VisibilityExecutionInfo) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *VisibilityExecutionInfo) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *VisibilityExecutionInfo) GetCloseTime() *timestamppb.Timestamp { + if x != nil { + return x.CloseTime + } + return nil +} + +func (x *VisibilityExecutionInfo) GetHistoryLength() int64 { + if x != nil { + return x.HistoryLength + } + return 0 +} + +func (x *VisibilityExecutionInfo) GetHistorySizeBytes() int64 { + if x != nil { + return x.HistorySizeBytes + } + return 0 +} + +func (x *VisibilityExecutionInfo) GetStateTransitionCount() int64 { + if x != nil { + return x.StateTransitionCount + } + return 0 +} + +func (x *VisibilityExecutionInfo) GetChasmSearchAttributes() *v1.SearchAttributes { + if x != nil { + return x.ChasmSearchAttributes + } + return nil +} + +func (x *VisibilityExecutionInfo) GetCustomSearchAttributes() *v1.SearchAttributes { + if x != nil { + return x.CustomSearchAttributes + } + return nil +} + +func (x *VisibilityExecutionInfo) GetMemo() *v1.Memo { + if x != nil { + return x.Memo + } + return nil +} + +func (x *VisibilityExecutionInfo) GetChasmMemo() *v1.Payload { + if x != nil { + return x.ChasmMemo + } + return nil +} + +var File_temporal_server_api_chasm_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_api_chasm_v1_message_proto_rawDesc = "" + + "\n" + + "*temporal/server/api/chasm/v1/message.proto\x12\x1ctemporal.server.api.chasm.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\"\x8a\x05\n" + + "\x17VisibilityExecutionInfo\x12\x1f\n" + + "\vbusiness_id\x18\x01 \x01(\tR\n" + + "businessId\x12\x15\n" + + "\x06run_id\x18\x02 \x01(\tR\x05runId\x129\n" + + "\n" + + "start_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x129\n" + + "\n" + + "close_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTime\x12%\n" + + "\x0ehistory_length\x18\x05 \x01(\x03R\rhistoryLength\x12,\n" + + "\x12history_size_bytes\x18\x06 \x01(\x03R\x10historySizeBytes\x124\n" + + "\x16state_transition_count\x18\a \x01(\x03R\x14stateTransitionCount\x12`\n" + + "\x17chasm_search_attributes\x18\b \x01(\v2(.temporal.api.common.v1.SearchAttributesR\x15chasmSearchAttributes\x12b\n" + + "\x18custom_search_attributes\x18\t \x01(\v2(.temporal.api.common.v1.SearchAttributesR\x16customSearchAttributes\x120\n" + + "\x04memo\x18\n" + + " \x01(\v2\x1c.temporal.api.common.v1.MemoR\x04memo\x12>\n" + + "\n" + + "chasm_memo\x18\v \x01(\v2\x1f.temporal.api.common.v1.PayloadR\tchasmMemoB*Z(go.temporal.io/server/api/chasm/v1;chasmb\x06proto3" + +var ( + file_temporal_server_api_chasm_v1_message_proto_rawDescOnce sync.Once + file_temporal_server_api_chasm_v1_message_proto_rawDescData []byte +) + +func file_temporal_server_api_chasm_v1_message_proto_rawDescGZIP() []byte { + file_temporal_server_api_chasm_v1_message_proto_rawDescOnce.Do(func() { + file_temporal_server_api_chasm_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_chasm_v1_message_proto_rawDesc), len(file_temporal_server_api_chasm_v1_message_proto_rawDesc))) + }) + return file_temporal_server_api_chasm_v1_message_proto_rawDescData +} + +var file_temporal_server_api_chasm_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_temporal_server_api_chasm_v1_message_proto_goTypes = []any{ + (*VisibilityExecutionInfo)(nil), // 0: temporal.server.api.chasm.v1.VisibilityExecutionInfo + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp + (*v1.SearchAttributes)(nil), // 2: temporal.api.common.v1.SearchAttributes + (*v1.Memo)(nil), // 3: temporal.api.common.v1.Memo + (*v1.Payload)(nil), // 4: temporal.api.common.v1.Payload +} +var file_temporal_server_api_chasm_v1_message_proto_depIdxs = []int32{ + 1, // 0: temporal.server.api.chasm.v1.VisibilityExecutionInfo.start_time:type_name -> google.protobuf.Timestamp + 1, // 1: temporal.server.api.chasm.v1.VisibilityExecutionInfo.close_time:type_name -> google.protobuf.Timestamp + 2, // 2: temporal.server.api.chasm.v1.VisibilityExecutionInfo.chasm_search_attributes:type_name -> temporal.api.common.v1.SearchAttributes + 2, // 3: temporal.server.api.chasm.v1.VisibilityExecutionInfo.custom_search_attributes:type_name -> temporal.api.common.v1.SearchAttributes + 3, // 4: temporal.server.api.chasm.v1.VisibilityExecutionInfo.memo:type_name -> temporal.api.common.v1.Memo + 4, // 5: temporal.server.api.chasm.v1.VisibilityExecutionInfo.chasm_memo:type_name -> temporal.api.common.v1.Payload + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_chasm_v1_message_proto_init() } +func file_temporal_server_api_chasm_v1_message_proto_init() { + if File_temporal_server_api_chasm_v1_message_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_chasm_v1_message_proto_rawDesc), len(file_temporal_server_api_chasm_v1_message_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_chasm_v1_message_proto_goTypes, + DependencyIndexes: file_temporal_server_api_chasm_v1_message_proto_depIdxs, + MessageInfos: file_temporal_server_api_chasm_v1_message_proto_msgTypes, + }.Build() + File_temporal_server_api_chasm_v1_message_proto = out.File + file_temporal_server_api_chasm_v1_message_proto_goTypes = nil + file_temporal_server_api_chasm_v1_message_proto_depIdxs = nil +} diff --git a/api/checksum/v1/message.go-helpers.pb.go b/api/checksum/v1/message.go-helpers.pb.go index ebc4895497f..6869734d825 100644 --- a/api/checksum/v1/message.go-helpers.pb.go +++ b/api/checksum/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package checksum diff --git a/api/checksum/v1/message.pb.go b/api/checksum/v1/message.pb.go index 0a8b8b550de..64fd75c37dd 100644 --- a/api/checksum/v1/message.pb.go +++ b/api/checksum/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package checksum import ( reflect "reflect" sync "sync" + unsafe "unsafe" v11 "go.temporal.io/api/enums/v1" v1 "go.temporal.io/server/api/enums/v1" @@ -47,10 +26,7 @@ const ( ) type MutableStateChecksumPayload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` CancelRequested bool `protobuf:"varint,1,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` State v1.WorkflowExecutionState `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"state,omitempty"` Status v11.WorkflowExecutionStatus `protobuf:"varint,3,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"status,omitempty"` @@ -74,17 +50,18 @@ type MutableStateChecksumPayload struct { PendingSignalInitiatedEventIds []int64 `protobuf:"varint,16,rep,packed,name=pending_signal_initiated_event_ids,json=pendingSignalInitiatedEventIds,proto3" json:"pending_signal_initiated_event_ids,omitempty"` PendingReqCancelInitiatedEventIds []int64 `protobuf:"varint,17,rep,packed,name=pending_req_cancel_initiated_event_ids,json=pendingReqCancelInitiatedEventIds,proto3" json:"pending_req_cancel_initiated_event_ids,omitempty"` PendingChildInitiatedEventIds []int64 `protobuf:"varint,18,rep,packed,name=pending_child_initiated_event_ids,json=pendingChildInitiatedEventIds,proto3" json:"pending_child_initiated_event_ids,omitempty"` + PendingChasmNodePaths []string `protobuf:"bytes,26,rep,name=pending_chasm_node_paths,json=pendingChasmNodePaths,proto3" json:"pending_chasm_node_paths,omitempty"` StickyTaskQueueName string `protobuf:"bytes,19,opt,name=sticky_task_queue_name,json=stickyTaskQueueName,proto3" json:"sticky_task_queue_name,omitempty"` VersionHistories *v12.VersionHistories `protobuf:"bytes,20,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *MutableStateChecksumPayload) Reset() { *x = MutableStateChecksumPayload{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_checksum_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_checksum_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MutableStateChecksumPayload) String() string { @@ -95,7 +72,7 @@ func (*MutableStateChecksumPayload) ProtoMessage() {} func (x *MutableStateChecksumPayload) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_checksum_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -271,6 +248,13 @@ func (x *MutableStateChecksumPayload) GetPendingChildInitiatedEventIds() []int64 return nil } +func (x *MutableStateChecksumPayload) GetPendingChasmNodePaths() []string { + if x != nil { + return x.PendingChasmNodePaths + } + return nil +} + func (x *MutableStateChecksumPayload) GetStickyTaskQueueName() string { if x != nil { return x.StickyTaskQueueName @@ -287,135 +271,52 @@ func (x *MutableStateChecksumPayload) GetVersionHistories() *v12.VersionHistorie var File_temporal_server_api_checksum_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_checksum_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x24, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, - 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x0c, 0x0a, 0x1b, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x2d, 0x0a, 0x10, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, - 0x6c, 0x61, 0x73, 0x74, 0x57, 0x72, 0x69, 0x74, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x31, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6c, 0x61, 0x73, 0x74, 0x57, 0x72, 0x69, 0x74, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x31, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x72, 0x73, - 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x10, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x69, 0x72, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x39, 0x0a, 0x17, 0x6c, 0x61, 0x73, - 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x50, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x61, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x36, 0x0a, 0x15, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x75, - 0x73, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x45, 0x0a, 0x1d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x61, 0x6e, 0x63, 0x65, - 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x36, 0x0a, 0x15, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x13, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x36, 0x0a, 0x15, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, - 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x36, 0x0a, - 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1c, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x46, 0x0a, 0x1e, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, - 0x0a, 0x1f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x5f, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, - 0x0e, 0x20, 0x03, 0x28, 0x03, 0x52, 0x1b, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x52, 0x0a, 0x24, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, - 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x03, - 0x52, 0x20, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x22, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x73, - 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x03, 0x52, 0x1e, 0x70, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x55, 0x0a, 0x26, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x71, 0x5f, 0x63, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x03, 0x52, 0x21, 0x70, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x49, 0x6e, - 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x4c, 0x0a, 0x21, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x03, 0x52, 0x1d, 0x70, 0x65, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, - 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x37, 0x0a, 0x16, - 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x69, 0x63, - 0x6b, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x61, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x10, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, - 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_checksum_v1_message_proto_rawDesc = "" + + "\n" + + "-temporal/server/api/checksum/v1/message.proto\x12\x1ftemporal.server.api.checksum.v1\x1a$temporal/api/enums/v1/workflow.proto\x1a+temporal/server/api/enums/v1/workflow.proto\x1a,temporal/server/api/history/v1/message.proto\"\xa2\f\n" + + "\x1bMutableStateChecksumPayload\x12)\n" + + "\x10cancel_requested\x18\x01 \x01(\bR\x0fcancelRequested\x12J\n" + + "\x05state\x18\x02 \x01(\x0e24.temporal.server.api.enums.v1.WorkflowExecutionStateR\x05state\x12F\n" + + "\x06status\x18\x03 \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x06status\x12,\n" + + "\x12last_write_version\x18\x04 \x01(\x03R\x10lastWriteVersion\x12-\n" + + "\x13last_write_event_id\x18\x05 \x01(\x03R\x10lastWriteEventId\x12-\n" + + "\x13last_first_event_id\x18\x06 \x01(\x03R\x10lastFirstEventId\x12\"\n" + + "\rnext_event_id\x18\a \x01(\x03R\vnextEventId\x125\n" + + "\x17last_processed_event_id\x18\b \x01(\x03R\x14lastProcessedEventId\x12!\n" + + "\fsignal_count\x18\t \x01(\x03R\vsignalCount\x12%\n" + + "\x0eactivity_count\x18\x15 \x01(\x03R\ractivityCount\x122\n" + + "\x15child_execution_count\x18\x16 \x01(\x03R\x13childExecutionCount\x12(\n" + + "\x10user_timer_count\x18\x17 \x01(\x03R\x0euserTimerCount\x12A\n" + + "\x1drequest_cancel_external_count\x18\x18 \x01(\x03R\x1arequestCancelExternalCount\x122\n" + + "\x15signal_external_count\x18\x19 \x01(\x03R\x13signalExternalCount\x122\n" + + "\x15workflow_task_attempt\x18\n" + + " \x01(\x05R\x13workflowTaskAttempt\x122\n" + + "\x15workflow_task_version\x18\v \x01(\x03R\x13workflowTaskVersion\x12F\n" + + " workflow_task_scheduled_event_id\x18\f \x01(\x03R\x1cworkflowTaskScheduledEventId\x12B\n" + + "\x1eworkflow_task_started_event_id\x18\r \x01(\x03R\x1aworkflowTaskStartedEventId\x12D\n" + + "\x1fpending_timer_started_event_ids\x18\x0e \x03(\x03R\x1bpendingTimerStartedEventIds\x12N\n" + + "$pending_activity_scheduled_event_ids\x18\x0f \x03(\x03R pendingActivityScheduledEventIds\x12J\n" + + "\"pending_signal_initiated_event_ids\x18\x10 \x03(\x03R\x1ependingSignalInitiatedEventIds\x12Q\n" + + "&pending_req_cancel_initiated_event_ids\x18\x11 \x03(\x03R!pendingReqCancelInitiatedEventIds\x12H\n" + + "!pending_child_initiated_event_ids\x18\x12 \x03(\x03R\x1dpendingChildInitiatedEventIds\x127\n" + + "\x18pending_chasm_node_paths\x18\x1a \x03(\tR\x15pendingChasmNodePaths\x123\n" + + "\x16sticky_task_queue_name\x18\x13 \x01(\tR\x13stickyTaskQueueName\x12]\n" + + "\x11version_histories\x18\x14 \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistoriesB0Z.go.temporal.io/server/api/checksum/v1;checksumb\x06proto3" var ( file_temporal_server_api_checksum_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_checksum_v1_message_proto_rawDescData = file_temporal_server_api_checksum_v1_message_proto_rawDesc + file_temporal_server_api_checksum_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_checksum_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_checksum_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_checksum_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_checksum_v1_message_proto_rawDescData) + file_temporal_server_api_checksum_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_checksum_v1_message_proto_rawDesc), len(file_temporal_server_api_checksum_v1_message_proto_rawDesc))) }) return file_temporal_server_api_checksum_v1_message_proto_rawDescData } var file_temporal_server_api_checksum_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_temporal_server_api_checksum_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_checksum_v1_message_proto_goTypes = []any{ (*MutableStateChecksumPayload)(nil), // 0: temporal.server.api.checksum.v1.MutableStateChecksumPayload (v1.WorkflowExecutionState)(0), // 1: temporal.server.api.enums.v1.WorkflowExecutionState (v11.WorkflowExecutionStatus)(0), // 2: temporal.api.enums.v1.WorkflowExecutionStatus @@ -437,25 +338,11 @@ func file_temporal_server_api_checksum_v1_message_proto_init() { if File_temporal_server_api_checksum_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_checksum_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MutableStateChecksumPayload); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_checksum_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_checksum_v1_message_proto_rawDesc), len(file_temporal_server_api_checksum_v1_message_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -466,7 +353,6 @@ func file_temporal_server_api_checksum_v1_message_proto_init() { MessageInfos: file_temporal_server_api_checksum_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_checksum_v1_message_proto = out.File - file_temporal_server_api_checksum_v1_message_proto_rawDesc = nil file_temporal_server_api_checksum_v1_message_proto_goTypes = nil file_temporal_server_api_checksum_v1_message_proto_depIdxs = nil } diff --git a/api/cli/v1/message.go-helpers.pb.go b/api/cli/v1/message.go-helpers.pb.go index e534cbbeda5..7fb65b32978 100644 --- a/api/cli/v1/message.go-helpers.pb.go +++ b/api/cli/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package cli diff --git a/api/cli/v1/message.pb.go b/api/cli/v1/message.pb.go index a313ec3cbd8..87b954a30f2 100644 --- a/api/cli/v1/message.pb.go +++ b/api/cli/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package cli import ( reflect "reflect" sync "sync" + unsafe "unsafe" v11 "go.temporal.io/api/common/v1" v12 "go.temporal.io/api/enums/v1" @@ -48,24 +27,21 @@ const ( ) type DescribeWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` ExecutionConfig *v1.WorkflowExecutionConfig `protobuf:"bytes,1,opt,name=execution_config,json=executionConfig,proto3" json:"execution_config,omitempty"` WorkflowExecutionInfo *WorkflowExecutionInfo `protobuf:"bytes,2,opt,name=workflow_execution_info,json=workflowExecutionInfo,proto3" json:"workflow_execution_info,omitempty"` PendingActivities []*PendingActivityInfo `protobuf:"bytes,3,rep,name=pending_activities,json=pendingActivities,proto3" json:"pending_activities,omitempty"` PendingChildren []*v1.PendingChildExecutionInfo `protobuf:"bytes,4,rep,name=pending_children,json=pendingChildren,proto3" json:"pending_children,omitempty"` PendingWorkflowTask *v1.PendingWorkflowTaskInfo `protobuf:"bytes,5,opt,name=pending_workflow_task,json=pendingWorkflowTask,proto3" json:"pending_workflow_task,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DescribeWorkflowExecutionResponse) Reset() { *x = DescribeWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeWorkflowExecutionResponse) String() string { @@ -76,7 +52,7 @@ func (*DescribeWorkflowExecutionResponse) ProtoMessage() {} func (x *DescribeWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -127,10 +103,7 @@ func (x *DescribeWorkflowExecutionResponse) GetPendingWorkflowTask() *v1.Pending } type WorkflowExecutionInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Execution *v11.WorkflowExecution `protobuf:"bytes,1,opt,name=execution,proto3" json:"execution,omitempty"` Type *v11.WorkflowType `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` StartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` @@ -146,15 +119,15 @@ type WorkflowExecutionInfo struct { StateTransitionCount int64 `protobuf:"varint,13,opt,name=state_transition_count,json=stateTransitionCount,proto3" json:"state_transition_count,omitempty"` HistorySizeBytes int64 `protobuf:"varint,14,opt,name=history_size_bytes,json=historySizeBytes,proto3" json:"history_size_bytes,omitempty"` MostRecentWorkerVersionStamp *v11.WorkerVersionStamp `protobuf:"bytes,15,opt,name=most_recent_worker_version_stamp,json=mostRecentWorkerVersionStamp,proto3" json:"most_recent_worker_version_stamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *WorkflowExecutionInfo) Reset() { *x = WorkflowExecutionInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *WorkflowExecutionInfo) String() string { @@ -165,7 +138,7 @@ func (*WorkflowExecutionInfo) ProtoMessage() {} func (x *WorkflowExecutionInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -286,10 +259,7 @@ func (x *WorkflowExecutionInfo) GetMostRecentWorkerVersionStamp() *v11.WorkerVer } type PendingActivityInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` ActivityId string `protobuf:"bytes,1,opt,name=activity_id,json=activityId,proto3" json:"activity_id,omitempty"` ActivityType *v11.ActivityType `protobuf:"bytes,2,opt,name=activity_type,json=activityType,proto3" json:"activity_type,omitempty"` State v12.PendingActivityState `protobuf:"varint,3,opt,name=state,proto3,enum=temporal.api.enums.v1.PendingActivityState" json:"state,omitempty"` @@ -302,15 +272,15 @@ type PendingActivityInfo struct { ExpirationTime *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"` LastFailure *Failure `protobuf:"bytes,11,opt,name=last_failure,json=lastFailure,proto3" json:"last_failure,omitempty"` LastWorkerIdentity string `protobuf:"bytes,12,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PendingActivityInfo) Reset() { *x = PendingActivityInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PendingActivityInfo) String() string { @@ -321,7 +291,7 @@ func (*PendingActivityInfo) ProtoMessage() {} func (x *PendingActivityInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -421,20 +391,17 @@ func (x *PendingActivityInfo) GetLastWorkerIdentity() string { } type SearchAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + IndexedFields map[string]string `protobuf:"bytes,1,rep,name=indexed_fields,json=indexedFields,proto3" json:"indexed_fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - IndexedFields map[string]string `protobuf:"bytes,1,rep,name=indexed_fields,json=indexedFields,proto3" json:"indexed_fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *SearchAttributes) Reset() { *x = SearchAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SearchAttributes) String() string { @@ -445,7 +412,7 @@ func (*SearchAttributes) ProtoMessage() {} func (x *SearchAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -468,24 +435,21 @@ func (x *SearchAttributes) GetIndexedFields() map[string]string { } type Failure struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + StackTrace string `protobuf:"bytes,3,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + Cause *Failure `protobuf:"bytes,4,opt,name=cause,proto3" json:"cause,omitempty"` + FailureType string `protobuf:"bytes,5,opt,name=failure_type,json=failureType,proto3" json:"failure_type,omitempty"` unknownFields protoimpl.UnknownFields - - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` - Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` - StackTrace string `protobuf:"bytes,3,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` - Cause *Failure `protobuf:"bytes,4,opt,name=cause,proto3" json:"cause,omitempty"` - FailureType string `protobuf:"bytes,5,opt,name=failure_type,json=failureType,proto3" json:"failure_type,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Failure) Reset() { *x = Failure{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Failure) String() string { @@ -496,7 +460,7 @@ func (*Failure) ProtoMessage() {} func (x *Failure) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -547,24 +511,21 @@ func (x *Failure) GetFailureType() string { } type AddSearchAttributesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` IndexName string `protobuf:"bytes,1,opt,name=index_name,json=indexName,proto3" json:"index_name,omitempty"` - CustomSearchAttributes map[string]string `protobuf:"bytes,2,rep,name=custom_search_attributes,json=customSearchAttributes,proto3" json:"custom_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - SystemSearchAttributes map[string]string `protobuf:"bytes,3,rep,name=system_search_attributes,json=systemSearchAttributes,proto3" json:"system_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Mapping map[string]string `protobuf:"bytes,4,rep,name=mapping,proto3" json:"mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CustomSearchAttributes map[string]string `protobuf:"bytes,2,rep,name=custom_search_attributes,json=customSearchAttributes,proto3" json:"custom_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SystemSearchAttributes map[string]string `protobuf:"bytes,3,rep,name=system_search_attributes,json=systemSearchAttributes,proto3" json:"system_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Mapping map[string]string `protobuf:"bytes,4,rep,name=mapping,proto3" json:"mapping,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` AddWorkflowExecutionInfo *WorkflowExecutionInfo `protobuf:"bytes,5,opt,name=add_workflow_execution_info,json=addWorkflowExecutionInfo,proto3" json:"add_workflow_execution_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddSearchAttributesResponse) Reset() { *x = AddSearchAttributesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddSearchAttributesResponse) String() string { @@ -575,7 +536,7 @@ func (*AddSearchAttributesResponse) ProtoMessage() {} func (x *AddSearchAttributesResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cli_v1_message_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -627,255 +588,92 @@ func (x *AddSearchAttributesResponse) GetAddWorkflowExecutionInfo() *WorkflowExe var File_temporal_server_api_cli_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_cli_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x28, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, 0x2e, - 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2f, - 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xa7, 0x04, 0x0a, 0x21, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x6d, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x62, 0x0a, 0x12, 0x70, 0x65, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x62, 0x0a, 0x10, 0x70, 0x65, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, - 0x6e, 0x67, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x69, 0x6c, 0x64, - 0x72, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x69, 0x0a, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x50, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, - 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x22, 0xbc, 0x08, 0x0a, - 0x15, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x0a, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x09, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0d, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x32, 0x0a, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x58, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x04, 0x6d, 0x65, - 0x6d, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x4d, 0x65, 0x6d, 0x6f, 0x52, 0x04, 0x6d, 0x65, 0x6d, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5d, - 0x0a, 0x11, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x52, 0x10, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x5f, - 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x16, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x30, 0x0a, 0x12, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, - 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x76, 0x0a, 0x20, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x5f, - 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x1c, 0x6d, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, - 0x68, 0x00, 0x22, 0xfc, 0x05, 0x0a, 0x13, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0b, 0x61, 0x63, 0x74, - 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0d, - 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2f, 0x0a, 0x11, 0x68, 0x65, - 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x11, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, - 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x61, 0x74, - 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, - 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x78, 0x70, - 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, - 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x77, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x61, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc8, 0x01, 0x0a, 0x10, 0x53, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x6a, 0x0a, - 0x0e, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x2e, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0d, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x1a, 0x48, 0x0a, 0x12, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xce, 0x01, 0x0a, 0x07, 0x46, 0x61, - 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x1a, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, - 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x3d, 0x0a, 0x05, 0x63, 0x61, 0x75, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, - 0x52, 0x05, 0x63, 0x61, 0x75, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x66, 0x61, - 0x69, 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xac, 0x06, 0x0a, 0x1b, 0x41, 0x64, 0x64, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, - 0x0a, 0x0a, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x91, 0x01, 0x0a, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, - 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x53, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x16, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x91, 0x01, 0x0a, 0x18, - 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x53, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x63, 0x6c, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x61, 0x72, - 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x73, - 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x62, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, - 0x6e, 0x67, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x69, - 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, - 0x69, 0x6e, 0x67, 0x42, 0x02, 0x68, 0x00, 0x12, 0x74, 0x0a, 0x1b, 0x61, 0x64, 0x64, 0x5f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, - 0x6c, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x18, 0x61, 0x64, 0x64, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, - 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x51, 0x0a, 0x1b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x1b, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x53, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x0c, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x3a, 0x02, 0x38, 0x01, 0x42, 0x26, 0x5a, 0x24, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x63, 0x6c, 0x69, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6c, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} +const file_temporal_server_api_cli_v1_message_proto_rawDesc = "" + + "\n" + + "(temporal/server/api/cli/v1/message.proto\x12\x1atemporal.server.api.cli.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a$temporal/api/enums/v1/workflow.proto\x1a&temporal/api/workflow/v1/message.proto\"\x93\x04\n" + + "!DescribeWorkflowExecutionResponse\x12\\\n" + + "\x10execution_config\x18\x01 \x01(\v21.temporal.api.workflow.v1.WorkflowExecutionConfigR\x0fexecutionConfig\x12i\n" + + "\x17workflow_execution_info\x18\x02 \x01(\v21.temporal.server.api.cli.v1.WorkflowExecutionInfoR\x15workflowExecutionInfo\x12^\n" + + "\x12pending_activities\x18\x03 \x03(\v2/.temporal.server.api.cli.v1.PendingActivityInfoR\x11pendingActivities\x12^\n" + + "\x10pending_children\x18\x04 \x03(\v23.temporal.api.workflow.v1.PendingChildExecutionInfoR\x0fpendingChildren\x12e\n" + + "\x15pending_workflow_task\x18\x05 \x01(\v21.temporal.api.workflow.v1.PendingWorkflowTaskInfoR\x13pendingWorkflowTask\"\x80\b\n" + + "\x15WorkflowExecutionInfo\x12G\n" + + "\texecution\x18\x01 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x128\n" + + "\x04type\x18\x02 \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\x04type\x129\n" + + "\n" + + "start_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x129\n" + + "\n" + + "close_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTime\x12F\n" + + "\x06status\x18\x05 \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x06status\x12%\n" + + "\x0ehistory_length\x18\x06 \x01(\x03R\rhistoryLength\x12.\n" + + "\x13parent_namespace_id\x18\a \x01(\tR\x11parentNamespaceId\x12T\n" + + "\x10parent_execution\x18\b \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x0fparentExecution\x12A\n" + + "\x0eexecution_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\rexecutionTime\x120\n" + + "\x04memo\x18\n" + + " \x01(\v2\x1c.temporal.api.common.v1.MemoR\x04memo\x12Y\n" + + "\x11search_attributes\x18\v \x01(\v2,.temporal.server.api.cli.v1.SearchAttributesR\x10searchAttributes\x12Q\n" + + "\x11auto_reset_points\x18\f \x01(\v2%.temporal.api.workflow.v1.ResetPointsR\x0fautoResetPoints\x124\n" + + "\x16state_transition_count\x18\r \x01(\x03R\x14stateTransitionCount\x12,\n" + + "\x12history_size_bytes\x18\x0e \x01(\x03R\x10historySizeBytes\x12r\n" + + " most_recent_worker_version_stamp\x18\x0f \x01(\v2*.temporal.api.common.v1.WorkerVersionStampR\x1cmostRecentWorkerVersionStamp\"\xcc\x05\n" + + "\x13PendingActivityInfo\x12\x1f\n" + + "\vactivity_id\x18\x01 \x01(\tR\n" + + "activityId\x12I\n" + + "\ractivity_type\x18\x02 \x01(\v2$.temporal.api.common.v1.ActivityTypeR\factivityType\x12A\n" + + "\x05state\x18\x03 \x01(\x0e2+.temporal.api.enums.v1.PendingActivityStateR\x05state\x12+\n" + + "\x11heartbeat_details\x18\x04 \x01(\tR\x10heartbeatDetails\x12J\n" + + "\x13last_heartbeat_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\x11lastHeartbeatTime\x12F\n" + + "\x11last_started_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x0flastStartedTime\x12\x18\n" + + "\aattempt\x18\a \x01(\x05R\aattempt\x12)\n" + + "\x10maximum_attempts\x18\b \x01(\x05R\x0fmaximumAttempts\x12A\n" + + "\x0escheduled_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12C\n" + + "\x0fexpiration_time\x18\n" + + " \x01(\v2\x1a.google.protobuf.TimestampR\x0eexpirationTime\x12F\n" + + "\flast_failure\x18\v \x01(\v2#.temporal.server.api.cli.v1.FailureR\vlastFailure\x120\n" + + "\x14last_worker_identity\x18\f \x01(\tR\x12lastWorkerIdentity\"\xbc\x01\n" + + "\x10SearchAttributes\x12f\n" + + "\x0eindexed_fields\x18\x01 \x03(\v2?.temporal.server.api.cli.v1.SearchAttributes.IndexedFieldsEntryR\rindexedFields\x1a@\n" + + "\x12IndexedFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xba\x01\n" + + "\aFailure\x12\x18\n" + + "\amessage\x18\x01 \x01(\tR\amessage\x12\x16\n" + + "\x06source\x18\x02 \x01(\tR\x06source\x12\x1f\n" + + "\vstack_trace\x18\x03 \x01(\tR\n" + + "stackTrace\x129\n" + + "\x05cause\x18\x04 \x01(\v2#.temporal.server.api.cli.v1.FailureR\x05cause\x12!\n" + + "\ffailure_type\x18\x05 \x01(\tR\vfailureType\"\x80\x06\n" + + "\x1bAddSearchAttributesResponse\x12\x1d\n" + + "\n" + + "index_name\x18\x01 \x01(\tR\tindexName\x12\x8d\x01\n" + + "\x18custom_search_attributes\x18\x02 \x03(\v2S.temporal.server.api.cli.v1.AddSearchAttributesResponse.CustomSearchAttributesEntryR\x16customSearchAttributes\x12\x8d\x01\n" + + "\x18system_search_attributes\x18\x03 \x03(\v2S.temporal.server.api.cli.v1.AddSearchAttributesResponse.SystemSearchAttributesEntryR\x16systemSearchAttributes\x12^\n" + + "\amapping\x18\x04 \x03(\v2D.temporal.server.api.cli.v1.AddSearchAttributesResponse.MappingEntryR\amapping\x12p\n" + + "\x1badd_workflow_execution_info\x18\x05 \x01(\v21.temporal.server.api.cli.v1.WorkflowExecutionInfoR\x18addWorkflowExecutionInfo\x1aI\n" + + "\x1bCustomSearchAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1aI\n" + + "\x1bSystemSearchAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a:\n" + + "\fMappingEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B&Z$go.temporal.io/server/api/cli/v1;clib\x06proto3" var ( file_temporal_server_api_cli_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_cli_v1_message_proto_rawDescData = file_temporal_server_api_cli_v1_message_proto_rawDesc + file_temporal_server_api_cli_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_cli_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_cli_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_cli_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_cli_v1_message_proto_rawDescData) + file_temporal_server_api_cli_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_cli_v1_message_proto_rawDesc), len(file_temporal_server_api_cli_v1_message_proto_rawDesc))) }) return file_temporal_server_api_cli_v1_message_proto_rawDescData } var file_temporal_server_api_cli_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 10) -var file_temporal_server_api_cli_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_cli_v1_message_proto_goTypes = []any{ (*DescribeWorkflowExecutionResponse)(nil), // 0: temporal.server.api.cli.v1.DescribeWorkflowExecutionResponse (*WorkflowExecutionInfo)(nil), // 1: temporal.server.api.cli.v1.WorkflowExecutionInfo (*PendingActivityInfo)(nil), // 2: temporal.server.api.cli.v1.PendingActivityInfo @@ -941,85 +739,11 @@ func file_temporal_server_api_cli_v1_message_proto_init() { if File_temporal_server_api_cli_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_cli_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_cli_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkflowExecutionInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_cli_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PendingActivityInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_cli_v1_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SearchAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_cli_v1_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Failure); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_cli_v1_message_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddSearchAttributesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_cli_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_cli_v1_message_proto_rawDesc), len(file_temporal_server_api_cli_v1_message_proto_rawDesc)), NumEnums: 0, NumMessages: 10, NumExtensions: 0, @@ -1030,7 +754,6 @@ func file_temporal_server_api_cli_v1_message_proto_init() { MessageInfos: file_temporal_server_api_cli_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_cli_v1_message_proto = out.File - file_temporal_server_api_cli_v1_message_proto_rawDesc = nil file_temporal_server_api_cli_v1_message_proto_goTypes = nil file_temporal_server_api_cli_v1_message_proto_depIdxs = nil } diff --git a/api/clock/v1/message.go-helpers.pb.go b/api/clock/v1/message.go-helpers.pb.go index c119486c655..d0fb0a4b4f6 100644 --- a/api/clock/v1/message.go-helpers.pb.go +++ b/api/clock/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package clock diff --git a/api/clock/v1/message.pb.go b/api/clock/v1/message.pb.go index e2712a1c852..340726e177e 100644 --- a/api/clock/v1/message.pb.go +++ b/api/clock/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package clock import ( reflect "reflect" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -44,22 +23,19 @@ const ( ) type VectorClock struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Clock int64 `protobuf:"varint,2,opt,name=clock,proto3" json:"clock,omitempty"` + ClusterId int64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - Clock int64 `protobuf:"varint,2,opt,name=clock,proto3" json:"clock,omitempty"` - ClusterId int64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VectorClock) Reset() { *x = VectorClock{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_clock_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_clock_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VectorClock) String() string { @@ -70,7 +46,7 @@ func (*VectorClock) ProtoMessage() {} func (x *VectorClock) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_clock_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -109,10 +85,7 @@ func (x *VectorClock) GetClusterId() int64 { // A Hybrid Logical Clock timestamp. // Guarantees strict total ordering for conflict resolution purposes. type HybridLogicalClock struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Wall clock - A single time source MUST guarantee that 2 consecutive timestamps are monotonically non-decreasing. // e.g. by storing the last wall clock and returning max(gettimeofday(), lastWallClock). WallClock int64 `protobuf:"varint,1,opt,name=wall_clock,json=wallClock,proto3" json:"wall_clock,omitempty"` @@ -121,16 +94,16 @@ type HybridLogicalClock struct { Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` // The cluster version ID as described in the XDC docs - used as a tie breaker. // See: https://github.com/uber/cadence/blob/master/docs/design/2290-cadence-ndc.md - ClusterId int64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + ClusterId int64 `protobuf:"varint,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HybridLogicalClock) Reset() { *x = HybridLogicalClock{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_clock_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_clock_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HybridLogicalClock) String() string { @@ -141,7 +114,7 @@ func (*HybridLogicalClock) ProtoMessage() {} func (x *HybridLogicalClock) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_clock_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -179,44 +152,35 @@ func (x *HybridLogicalClock) GetClusterId() int64 { var File_temporal_server_api_clock_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_clock_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, - 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x22, 0x69, 0x0a, 0x0b, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, - 0x6f, 0x63, 0x6b, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x18, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, - 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x78, 0x0a, 0x12, 0x48, 0x79, 0x62, 0x72, - 0x69, 0x64, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x21, 0x0a, - 0x0a, 0x77, 0x61, 0x6c, 0x6c, 0x5f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x77, 0x61, 0x6c, 0x6c, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x6f, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6c, - 0x6f, 0x63, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_clock_v1_message_proto_rawDesc = "" + + "\n" + + "*temporal/server/api/clock/v1/message.proto\x12\x1ctemporal.server.api.clock.v1\"]\n" + + "\vVectorClock\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x12\x14\n" + + "\x05clock\x18\x02 \x01(\x03R\x05clock\x12\x1d\n" + + "\n" + + "cluster_id\x18\x03 \x01(\x03R\tclusterId\"l\n" + + "\x12HybridLogicalClock\x12\x1d\n" + + "\n" + + "wall_clock\x18\x01 \x01(\x03R\twallClock\x12\x18\n" + + "\aversion\x18\x02 \x01(\x05R\aversion\x12\x1d\n" + + "\n" + + "cluster_id\x18\x03 \x01(\x03R\tclusterIdB*Z(go.temporal.io/server/api/clock/v1;clockb\x06proto3" var ( file_temporal_server_api_clock_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_clock_v1_message_proto_rawDescData = file_temporal_server_api_clock_v1_message_proto_rawDesc + file_temporal_server_api_clock_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_clock_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_clock_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_clock_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_clock_v1_message_proto_rawDescData) + file_temporal_server_api_clock_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_clock_v1_message_proto_rawDesc), len(file_temporal_server_api_clock_v1_message_proto_rawDesc))) }) return file_temporal_server_api_clock_v1_message_proto_rawDescData } var file_temporal_server_api_clock_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_temporal_server_api_clock_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_clock_v1_message_proto_goTypes = []any{ (*VectorClock)(nil), // 0: temporal.server.api.clock.v1.VectorClock (*HybridLogicalClock)(nil), // 1: temporal.server.api.clock.v1.HybridLogicalClock } @@ -233,37 +197,11 @@ func file_temporal_server_api_clock_v1_message_proto_init() { if File_temporal_server_api_clock_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_clock_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VectorClock); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_clock_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HybridLogicalClock); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_clock_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_clock_v1_message_proto_rawDesc), len(file_temporal_server_api_clock_v1_message_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -274,7 +212,6 @@ func file_temporal_server_api_clock_v1_message_proto_init() { MessageInfos: file_temporal_server_api_clock_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_clock_v1_message_proto = out.File - file_temporal_server_api_clock_v1_message_proto_rawDesc = nil file_temporal_server_api_clock_v1_message_proto_goTypes = nil file_temporal_server_api_clock_v1_message_proto_depIdxs = nil } diff --git a/api/cluster/v1/message.go-helpers.pb.go b/api/cluster/v1/message.go-helpers.pb.go index 83ee430c825..3d3a56a000d 100644 --- a/api/cluster/v1/message.go-helpers.pb.go +++ b/api/cluster/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package cluster diff --git a/api/cluster/v1/message.pb.go b/api/cluster/v1/message.pb.go index 6c2ea178929..a815f4792e3 100644 --- a/api/cluster/v1/message.pb.go +++ b/api/cluster/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package cluster import ( reflect "reflect" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/server/api/enums/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -46,20 +25,17 @@ const ( ) type HostInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` unknownFields protoimpl.UnknownFields - - Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HostInfo) Reset() { *x = HostInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HostInfo) String() string { @@ -70,7 +46,7 @@ func (*HostInfo) ProtoMessage() {} func (x *HostInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -93,22 +69,19 @@ func (x *HostInfo) GetIdentity() string { } type RingInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` + MemberCount int32 `protobuf:"varint,2,opt,name=member_count,json=memberCount,proto3" json:"member_count,omitempty"` + Members []*HostInfo `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` unknownFields protoimpl.UnknownFields - - Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` - MemberCount int32 `protobuf:"varint,2,opt,name=member_count,json=memberCount,proto3" json:"member_count,omitempty"` - Members []*HostInfo `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RingInfo) Reset() { *x = RingInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RingInfo) String() string { @@ -119,7 +92,7 @@ func (*RingInfo) ProtoMessage() {} func (x *RingInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -156,22 +129,19 @@ func (x *RingInfo) GetMembers() []*HostInfo { } type MembershipInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CurrentHost *HostInfo `protobuf:"bytes,1,opt,name=current_host,json=currentHost,proto3" json:"current_host,omitempty"` - ReachableMembers []string `protobuf:"bytes,2,rep,name=reachable_members,json=reachableMembers,proto3" json:"reachable_members,omitempty"` - Rings []*RingInfo `protobuf:"bytes,3,rep,name=rings,proto3" json:"rings,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + CurrentHost *HostInfo `protobuf:"bytes,1,opt,name=current_host,json=currentHost,proto3" json:"current_host,omitempty"` + ReachableMembers []string `protobuf:"bytes,2,rep,name=reachable_members,json=reachableMembers,proto3" json:"reachable_members,omitempty"` + Rings []*RingInfo `protobuf:"bytes,3,rep,name=rings,proto3" json:"rings,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *MembershipInfo) Reset() { *x = MembershipInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MembershipInfo) String() string { @@ -182,7 +152,7 @@ func (*MembershipInfo) ProtoMessage() {} func (x *MembershipInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -219,10 +189,7 @@ func (x *MembershipInfo) GetRings() []*RingInfo { } type ClusterMember struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Role v1.ClusterMemberRole `protobuf:"varint,1,opt,name=role,proto3,enum=temporal.server.api.enums.v1.ClusterMemberRole" json:"role,omitempty"` HostId string `protobuf:"bytes,2,opt,name=host_id,json=hostId,proto3" json:"host_id,omitempty"` RpcAddress string `protobuf:"bytes,3,opt,name=rpc_address,json=rpcAddress,proto3" json:"rpc_address,omitempty"` @@ -230,15 +197,15 @@ type ClusterMember struct { SessionStartTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=session_start_time,json=sessionStartTime,proto3" json:"session_start_time,omitempty"` LastHeartbitTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=last_heartbit_time,json=lastHeartbitTime,proto3" json:"last_heartbit_time,omitempty"` RecordExpiryTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=record_expiry_time,json=recordExpiryTime,proto3" json:"record_expiry_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClusterMember) Reset() { *x = ClusterMember{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClusterMember) String() string { @@ -249,7 +216,7 @@ func (*ClusterMember) ProtoMessage() {} func (x *ClusterMember) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_cluster_v1_message_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -315,85 +282,43 @@ func (x *ClusterMember) GetRecordExpiryTime() *timestamppb.Timestamp { var File_temporal_server_api_cluster_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_cluster_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, - 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x2a, 0x0a, 0x08, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1e, 0x0a, - 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x42, 0x02, 0x68, 0x00, 0x22, 0x91, 0x01, 0x0a, - 0x08, 0x52, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x25, 0x0a, 0x0c, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x46, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x6d, 0x65, 0x6d, - 0x62, 0x65, 0x72, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xd6, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x6d, 0x62, - 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4f, 0x0a, 0x0c, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x48, - 0x6f, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x48, - 0x6f, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2f, 0x0a, 0x11, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, - 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x10, 0x72, 0x65, 0x61, 0x63, 0x68, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x42, 0x0a, 0x05, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x72, 0x69, 0x6e, 0x67, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa3, 0x03, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, - 0x6c, 0x65, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x68, - 0x6f, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x6f, 0x73, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x70, 0x63, 0x5f, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x70, 0x63, 0x41, - 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x72, 0x70, 0x63, - 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x72, 0x70, 0x63, 0x50, - 0x6f, 0x72, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4c, 0x0a, 0x12, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x4c, 0x0a, 0x12, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x69, - 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, - 0x62, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4c, 0x0a, 0x12, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x45, 0x78, 0x70, 0x69, 0x72, 0x79, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_cluster_v1_message_proto_rawDesc = "" + + "\n" + + ",temporal/server/api/cluster/v1/message.proto\x12\x1etemporal.server.api.cluster.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a*temporal/server/api/enums/v1/cluster.proto\"&\n" + + "\bHostInfo\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\"\x85\x01\n" + + "\bRingInfo\x12\x12\n" + + "\x04role\x18\x01 \x01(\tR\x04role\x12!\n" + + "\fmember_count\x18\x02 \x01(\x05R\vmemberCount\x12B\n" + + "\amembers\x18\x03 \x03(\v2(.temporal.server.api.cluster.v1.HostInfoR\amembers\"\xca\x01\n" + + "\x0eMembershipInfo\x12K\n" + + "\fcurrent_host\x18\x01 \x01(\v2(.temporal.server.api.cluster.v1.HostInfoR\vcurrentHost\x12+\n" + + "\x11reachable_members\x18\x02 \x03(\tR\x10reachableMembers\x12>\n" + + "\x05rings\x18\x03 \x03(\v2(.temporal.server.api.cluster.v1.RingInfoR\x05rings\"\x87\x03\n" + + "\rClusterMember\x12C\n" + + "\x04role\x18\x01 \x01(\x0e2/.temporal.server.api.enums.v1.ClusterMemberRoleR\x04role\x12\x17\n" + + "\ahost_id\x18\x02 \x01(\tR\x06hostId\x12\x1f\n" + + "\vrpc_address\x18\x03 \x01(\tR\n" + + "rpcAddress\x12\x19\n" + + "\brpc_port\x18\x04 \x01(\x05R\arpcPort\x12H\n" + + "\x12session_start_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\x10sessionStartTime\x12H\n" + + "\x12last_heartbit_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x10lastHeartbitTime\x12H\n" + + "\x12record_expiry_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x10recordExpiryTimeB.Z,go.temporal.io/server/api/cluster/v1;clusterb\x06proto3" var ( file_temporal_server_api_cluster_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_cluster_v1_message_proto_rawDescData = file_temporal_server_api_cluster_v1_message_proto_rawDesc + file_temporal_server_api_cluster_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_cluster_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_cluster_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_cluster_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_cluster_v1_message_proto_rawDescData) + file_temporal_server_api_cluster_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_cluster_v1_message_proto_rawDesc), len(file_temporal_server_api_cluster_v1_message_proto_rawDesc))) }) return file_temporal_server_api_cluster_v1_message_proto_rawDescData } var file_temporal_server_api_cluster_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_temporal_server_api_cluster_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_cluster_v1_message_proto_goTypes = []any{ (*HostInfo)(nil), // 0: temporal.server.api.cluster.v1.HostInfo (*RingInfo)(nil), // 1: temporal.server.api.cluster.v1.RingInfo (*MembershipInfo)(nil), // 2: temporal.server.api.cluster.v1.MembershipInfo @@ -421,61 +346,11 @@ func file_temporal_server_api_cluster_v1_message_proto_init() { if File_temporal_server_api_cluster_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_cluster_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HostInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_cluster_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RingInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_cluster_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MembershipInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_cluster_v1_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClusterMember); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_cluster_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_cluster_v1_message_proto_rawDesc), len(file_temporal_server_api_cluster_v1_message_proto_rawDesc)), NumEnums: 0, NumMessages: 4, NumExtensions: 0, @@ -486,7 +361,6 @@ func file_temporal_server_api_cluster_v1_message_proto_init() { MessageInfos: file_temporal_server_api_cluster_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_cluster_v1_message_proto = out.File - file_temporal_server_api_cluster_v1_message_proto_rawDesc = nil file_temporal_server_api_cluster_v1_message_proto_goTypes = nil file_temporal_server_api_cluster_v1_message_proto_depIdxs = nil } diff --git a/api/common/v1/api_category.go-helpers.pb.go b/api/common/v1/api_category.go-helpers.pb.go new file mode 100644 index 00000000000..d105bd7080f --- /dev/null +++ b/api/common/v1/api_category.go-helpers.pb.go @@ -0,0 +1,65 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package commonspb + +import ( + "fmt" + + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ApiCategoryOptions to the protobuf v3 wire format +func (val *ApiCategoryOptions) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ApiCategoryOptions from the protobuf v3 wire format +func (val *ApiCategoryOptions) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ApiCategoryOptions) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ApiCategoryOptions values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ApiCategoryOptions) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ApiCategoryOptions + switch t := that.(type) { + case *ApiCategoryOptions: + that1 = t + case ApiCategoryOptions: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +var ( + ApiCategory_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Standard": 1, + "LongPoll": 2, + "System": 3, + } +) + +// ApiCategoryFromString parses a ApiCategory value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to ApiCategory +func ApiCategoryFromString(s string) (ApiCategory, error) { + if v, ok := ApiCategory_value[s]; ok { + return ApiCategory(v), nil + } else if v, ok := ApiCategory_shorthandValue[s]; ok { + return ApiCategory(v), nil + } + return ApiCategory(0), fmt.Errorf("%s is not a valid ApiCategory", s) +} diff --git a/api/common/v1/api_category.pb.go b/api/common/v1/api_category.pb.go new file mode 100644 index 00000000000..248dd81cee8 --- /dev/null +++ b/api/common/v1/api_category.pb.go @@ -0,0 +1,230 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/common/v1/api_category.proto + +package commonspb + +import ( + reflect "reflect" + "strconv" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ApiCategory int32 + +const ( + // Unspecified API category. Treated as standard API. + API_CATEGORY_UNSPECIFIED ApiCategory = 0 + // Standard API with typical request/response patterns. + API_CATEGORY_STANDARD ApiCategory = 1 + // Long-polling API that intentionally waits for state changes or external events. + // These APIs should be excluded from health signal tracking because their latency + // reflects client wait times and event availability rather than server health. + // Including them in health metrics would skew the data and could cause healthy + // nodes to appear unhealthy. + // + // Examples: PollMutableState, PollWorkflowExecutionUpdate, QueryWorkflow + API_CATEGORY_LONG_POLL ApiCategory = 2 + API_CATEGORY_SYSTEM ApiCategory = 3 +) + +// Enum value maps for ApiCategory. +var ( + ApiCategory_name = map[int32]string{ + 0: "API_CATEGORY_UNSPECIFIED", + 1: "API_CATEGORY_STANDARD", + 2: "API_CATEGORY_LONG_POLL", + 3: "API_CATEGORY_SYSTEM", + } + ApiCategory_value = map[string]int32{ + "API_CATEGORY_UNSPECIFIED": 0, + "API_CATEGORY_STANDARD": 1, + "API_CATEGORY_LONG_POLL": 2, + "API_CATEGORY_SYSTEM": 3, + } +) + +func (x ApiCategory) Enum() *ApiCategory { + p := new(ApiCategory) + *p = x + return p +} + +func (x ApiCategory) String() string { + switch x { + case API_CATEGORY_UNSPECIFIED: + return "Unspecified" + case API_CATEGORY_STANDARD: + return "Standard" + case API_CATEGORY_LONG_POLL: + return "LongPoll" + case API_CATEGORY_SYSTEM: + return "System" + default: + return strconv.Itoa(int(x)) + } + +} + +func (ApiCategory) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_common_v1_api_category_proto_enumTypes[0].Descriptor() +} + +func (ApiCategory) Type() protoreflect.EnumType { + return &file_temporal_server_api_common_v1_api_category_proto_enumTypes[0] +} + +func (x ApiCategory) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ApiCategory.Descriptor instead. +func (ApiCategory) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_common_v1_api_category_proto_rawDescGZIP(), []int{0} +} + +type ApiCategoryOptions struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The category of this API for health and observability purposes. + Category ApiCategory `protobuf:"varint,1,opt,name=category,proto3,enum=temporal.server.api.common.v1.ApiCategory" json:"category,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ApiCategoryOptions) Reset() { + *x = ApiCategoryOptions{} + mi := &file_temporal_server_api_common_v1_api_category_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ApiCategoryOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiCategoryOptions) ProtoMessage() {} + +func (x *ApiCategoryOptions) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_common_v1_api_category_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiCategoryOptions.ProtoReflect.Descriptor instead. +func (*ApiCategoryOptions) Descriptor() ([]byte, []int) { + return file_temporal_server_api_common_v1_api_category_proto_rawDescGZIP(), []int{0} +} + +func (x *ApiCategoryOptions) GetCategory() ApiCategory { + if x != nil { + return x.Category + } + return API_CATEGORY_UNSPECIFIED +} + +var file_temporal_server_api_common_v1_api_category_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*ApiCategoryOptions)(nil), + Field: 50001, + Name: "temporal.server.api.common.v1.api_category", + Tag: "bytes,50001,opt,name=api_category", + Filename: "temporal/server/api/common/v1/api_category.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional temporal.server.api.common.v1.ApiCategoryOptions api_category = 50001; + E_ApiCategory = &file_temporal_server_api_common_v1_api_category_proto_extTypes[0] +) + +var File_temporal_server_api_common_v1_api_category_proto protoreflect.FileDescriptor + +const file_temporal_server_api_common_v1_api_category_proto_rawDesc = "" + + "\n" + + "0temporal/server/api/common/v1/api_category.proto\x12\x1dtemporal.server.api.common.v1\x1a google/protobuf/descriptor.proto\"\\\n" + + "\x12ApiCategoryOptions\x12F\n" + + "\bcategory\x18\x01 \x01(\x0e2*.temporal.server.api.common.v1.ApiCategoryR\bcategory*{\n" + + "\vApiCategory\x12\x1c\n" + + "\x18API_CATEGORY_UNSPECIFIED\x10\x00\x12\x19\n" + + "\x15API_CATEGORY_STANDARD\x10\x01\x12\x1a\n" + + "\x16API_CATEGORY_LONG_POLL\x10\x02\x12\x17\n" + + "\x13API_CATEGORY_SYSTEM\x10\x03:y\n" + + "\fapi_category\x12\x1e.google.protobuf.MethodOptions\x18ц\x03 \x01(\v21.temporal.server.api.common.v1.ApiCategoryOptionsR\vapiCategory\x88\x01\x01B/Z-go.temporal.io/server/api/common/v1;commonspbb\x06proto3" + +var ( + file_temporal_server_api_common_v1_api_category_proto_rawDescOnce sync.Once + file_temporal_server_api_common_v1_api_category_proto_rawDescData []byte +) + +func file_temporal_server_api_common_v1_api_category_proto_rawDescGZIP() []byte { + file_temporal_server_api_common_v1_api_category_proto_rawDescOnce.Do(func() { + file_temporal_server_api_common_v1_api_category_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_common_v1_api_category_proto_rawDesc), len(file_temporal_server_api_common_v1_api_category_proto_rawDesc))) + }) + return file_temporal_server_api_common_v1_api_category_proto_rawDescData +} + +var file_temporal_server_api_common_v1_api_category_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_temporal_server_api_common_v1_api_category_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_temporal_server_api_common_v1_api_category_proto_goTypes = []any{ + (ApiCategory)(0), // 0: temporal.server.api.common.v1.ApiCategory + (*ApiCategoryOptions)(nil), // 1: temporal.server.api.common.v1.ApiCategoryOptions + (*descriptorpb.MethodOptions)(nil), // 2: google.protobuf.MethodOptions +} +var file_temporal_server_api_common_v1_api_category_proto_depIdxs = []int32{ + 0, // 0: temporal.server.api.common.v1.ApiCategoryOptions.category:type_name -> temporal.server.api.common.v1.ApiCategory + 2, // 1: temporal.server.api.common.v1.api_category:extendee -> google.protobuf.MethodOptions + 1, // 2: temporal.server.api.common.v1.api_category:type_name -> temporal.server.api.common.v1.ApiCategoryOptions + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_common_v1_api_category_proto_init() } +func file_temporal_server_api_common_v1_api_category_proto_init() { + if File_temporal_server_api_common_v1_api_category_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_common_v1_api_category_proto_rawDesc), len(file_temporal_server_api_common_v1_api_category_proto_rawDesc)), + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_common_v1_api_category_proto_goTypes, + DependencyIndexes: file_temporal_server_api_common_v1_api_category_proto_depIdxs, + EnumInfos: file_temporal_server_api_common_v1_api_category_proto_enumTypes, + MessageInfos: file_temporal_server_api_common_v1_api_category_proto_msgTypes, + ExtensionInfos: file_temporal_server_api_common_v1_api_category_proto_extTypes, + }.Build() + File_temporal_server_api_common_v1_api_category_proto = out.File + file_temporal_server_api_common_v1_api_category_proto_goTypes = nil + file_temporal_server_api_common_v1_api_category_proto_depIdxs = nil +} diff --git a/api/common/v1/dlq.go-helpers.pb.go b/api/common/v1/dlq.go-helpers.pb.go index 0e149281065..9e740118dbb 100644 --- a/api/common/v1/dlq.go-helpers.pb.go +++ b/api/common/v1/dlq.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package commonspb diff --git a/api/common/v1/dlq.pb.go b/api/common/v1/dlq.pb.go index 89db8cc771f..4b074cf318b 100644 --- a/api/common/v1/dlq.pb.go +++ b/api/common/v1/dlq.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2023 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package commonspb import ( reflect "reflect" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/api/common/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -45,22 +24,19 @@ const ( ) type HistoryTask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // shard_id is included to avoid having to deserialize the task blob. - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - Blob *v1.DataBlob `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Blob *v1.DataBlob `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryTask) Reset() { *x = HistoryTask{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryTask) String() string { @@ -71,7 +47,7 @@ func (*HistoryTask) ProtoMessage() {} func (x *HistoryTask) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -101,21 +77,18 @@ func (x *HistoryTask) GetBlob() *v1.DataBlob { } type HistoryDLQTaskMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // message_id is the zero-indexed sequence number of the message in the queue that contains this history task. - MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` + MessageId int64 `protobuf:"varint,1,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryDLQTaskMetadata) Reset() { *x = HistoryDLQTaskMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryDLQTaskMetadata) String() string { @@ -126,7 +99,7 @@ func (*HistoryDLQTaskMetadata) ProtoMessage() {} func (x *HistoryDLQTaskMetadata) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -151,22 +124,19 @@ func (x *HistoryDLQTaskMetadata) GetMessageId() int64 { // HistoryDLQTask is a history task that has been moved to the DLQ, so it also has a message ID (index within that // queue). type HistoryDLQTask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Metadata *HistoryDLQTaskMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // This is named payload to prevent stuttering (e.g. task.Task). - Payload *HistoryTask `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` + Payload *HistoryTask `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryDLQTask) Reset() { *x = HistoryDLQTask{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryDLQTask) String() string { @@ -177,7 +147,7 @@ func (*HistoryDLQTask) ProtoMessage() {} func (x *HistoryDLQTask) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -208,10 +178,7 @@ func (x *HistoryDLQTask) GetPayload() *HistoryTask { // HistoryDLQKey is a compound key that identifies a history DLQ. type HistoryDLQKey struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // task_category is the category of the task. The default values are defined in the TaskCategory enum. However, there // may also be other categories registered at runtime with the history/tasks package. As a result, the category here // is an integer instead of an enum to support both the default values and custom values. @@ -222,15 +189,15 @@ type HistoryDLQKey struct { // current cluster. SourceCluster string `protobuf:"bytes,2,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` TargetCluster string `protobuf:"bytes,3,opt,name=target_cluster,json=targetCluster,proto3" json:"target_cluster,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryDLQKey) Reset() { *x = HistoryDLQKey{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryDLQKey) String() string { @@ -241,7 +208,7 @@ func (*HistoryDLQKey) ProtoMessage() {} func (x *HistoryDLQKey) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_common_v1_dlq_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -279,63 +246,37 @@ func (x *HistoryDLQKey) GetTargetCluster() string { var File_temporal_server_api_common_v1_dlq_proto protoreflect.FileDescriptor -var file_temporal_server_api_common_v1_dlq_proto_rawDesc = []byte{ - 0x0a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6c, - 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x66, 0x0a, 0x0b, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, - 0x6f, 0x62, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x68, 0x00, 0x22, 0x3b, 0x0a, 0x16, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb1, 0x01, 0x0a, 0x0e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x55, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, - 0x51, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, 0x0a, 0x07, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x8e, 0x01, 0x0a, 0x0d, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x4b, - 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, - 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, - 0x65, 0x67, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x3b, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x73, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +const file_temporal_server_api_common_v1_dlq_proto_rawDesc = "" + + "\n" + + "'temporal/server/api/common/v1/dlq.proto\x12\x1dtemporal.server.api.common.v1\x1a$temporal/api/common/v1/message.proto\"^\n" + + "\vHistoryTask\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x124\n" + + "\x04blob\x18\x02 \x01(\v2 .temporal.api.common.v1.DataBlobR\x04blob\"7\n" + + "\x16HistoryDLQTaskMetadata\x12\x1d\n" + + "\n" + + "message_id\x18\x01 \x01(\x03R\tmessageId\"\xa9\x01\n" + + "\x0eHistoryDLQTask\x12Q\n" + + "\bmetadata\x18\x01 \x01(\v25.temporal.server.api.common.v1.HistoryDLQTaskMetadataR\bmetadata\x12D\n" + + "\apayload\x18\x02 \x01(\v2*.temporal.server.api.common.v1.HistoryTaskR\apayload\"\x82\x01\n" + + "\rHistoryDLQKey\x12#\n" + + "\rtask_category\x18\x01 \x01(\x05R\ftaskCategory\x12%\n" + + "\x0esource_cluster\x18\x02 \x01(\tR\rsourceCluster\x12%\n" + + "\x0etarget_cluster\x18\x03 \x01(\tR\rtargetClusterB/Z-go.temporal.io/server/api/common/v1;commonspbb\x06proto3" var ( file_temporal_server_api_common_v1_dlq_proto_rawDescOnce sync.Once - file_temporal_server_api_common_v1_dlq_proto_rawDescData = file_temporal_server_api_common_v1_dlq_proto_rawDesc + file_temporal_server_api_common_v1_dlq_proto_rawDescData []byte ) func file_temporal_server_api_common_v1_dlq_proto_rawDescGZIP() []byte { file_temporal_server_api_common_v1_dlq_proto_rawDescOnce.Do(func() { - file_temporal_server_api_common_v1_dlq_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_common_v1_dlq_proto_rawDescData) + file_temporal_server_api_common_v1_dlq_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_common_v1_dlq_proto_rawDesc), len(file_temporal_server_api_common_v1_dlq_proto_rawDesc))) }) return file_temporal_server_api_common_v1_dlq_proto_rawDescData } var file_temporal_server_api_common_v1_dlq_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_temporal_server_api_common_v1_dlq_proto_goTypes = []interface{}{ +var file_temporal_server_api_common_v1_dlq_proto_goTypes = []any{ (*HistoryTask)(nil), // 0: temporal.server.api.common.v1.HistoryTask (*HistoryDLQTaskMetadata)(nil), // 1: temporal.server.api.common.v1.HistoryDLQTaskMetadata (*HistoryDLQTask)(nil), // 2: temporal.server.api.common.v1.HistoryDLQTask @@ -358,61 +299,11 @@ func file_temporal_server_api_common_v1_dlq_proto_init() { if File_temporal_server_api_common_v1_dlq_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_common_v1_dlq_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryTask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_common_v1_dlq_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryDLQTaskMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_common_v1_dlq_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryDLQTask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_common_v1_dlq_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryDLQKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_common_v1_dlq_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_common_v1_dlq_proto_rawDesc), len(file_temporal_server_api_common_v1_dlq_proto_rawDesc)), NumEnums: 0, NumMessages: 4, NumExtensions: 0, @@ -423,7 +314,6 @@ func file_temporal_server_api_common_v1_dlq_proto_init() { MessageInfos: file_temporal_server_api_common_v1_dlq_proto_msgTypes, }.Build() File_temporal_server_api_common_v1_dlq_proto = out.File - file_temporal_server_api_common_v1_dlq_proto_rawDesc = nil file_temporal_server_api_common_v1_dlq_proto_goTypes = nil file_temporal_server_api_common_v1_dlq_proto_depIdxs = nil } diff --git a/api/deployment/v1/message.go-helpers.pb.go b/api/deployment/v1/message.go-helpers.pb.go new file mode 100644 index 00000000000..b54743600d1 --- /dev/null +++ b/api/deployment/v1/message.go-helpers.pb.go @@ -0,0 +1,2189 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package deployment + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type WorkerDeploymentVersion to the protobuf v3 wire format +func (val *WorkerDeploymentVersion) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentVersion from the protobuf v3 wire format +func (val *WorkerDeploymentVersion) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentVersion) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentVersion values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentVersion) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentVersion + switch t := that.(type) { + case *WorkerDeploymentVersion: + that1 = t + case WorkerDeploymentVersion: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeploymentVersionData to the protobuf v3 wire format +func (val *DeploymentVersionData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeploymentVersionData from the protobuf v3 wire format +func (val *DeploymentVersionData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeploymentVersionData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeploymentVersionData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeploymentVersionData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeploymentVersionData + switch t := that.(type) { + case *DeploymentVersionData: + that1 = t + case DeploymentVersionData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerDeploymentVersionData to the protobuf v3 wire format +func (val *WorkerDeploymentVersionData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentVersionData from the protobuf v3 wire format +func (val *WorkerDeploymentVersionData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentVersionData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentVersionData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentVersionData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentVersionData + switch t := that.(type) { + case *WorkerDeploymentVersionData: + that1 = t + case WorkerDeploymentVersionData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type VersionLocalState to the protobuf v3 wire format +func (val *VersionLocalState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type VersionLocalState from the protobuf v3 wire format +func (val *VersionLocalState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *VersionLocalState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two VersionLocalState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *VersionLocalState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *VersionLocalState + switch t := that.(type) { + case *VersionLocalState: + that1 = t + case VersionLocalState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TaskQueueVersionData to the protobuf v3 wire format +func (val *TaskQueueVersionData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TaskQueueVersionData from the protobuf v3 wire format +func (val *TaskQueueVersionData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TaskQueueVersionData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TaskQueueVersionData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TaskQueueVersionData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TaskQueueVersionData + switch t := that.(type) { + case *TaskQueueVersionData: + that1 = t + case TaskQueueVersionData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerDeploymentVersionWorkflowArgs to the protobuf v3 wire format +func (val *WorkerDeploymentVersionWorkflowArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentVersionWorkflowArgs from the protobuf v3 wire format +func (val *WorkerDeploymentVersionWorkflowArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentVersionWorkflowArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentVersionWorkflowArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentVersionWorkflowArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentVersionWorkflowArgs + switch t := that.(type) { + case *WorkerDeploymentVersionWorkflowArgs: + that1 = t + case WorkerDeploymentVersionWorkflowArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerDeploymentWorkflowArgs to the protobuf v3 wire format +func (val *WorkerDeploymentWorkflowArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentWorkflowArgs from the protobuf v3 wire format +func (val *WorkerDeploymentWorkflowArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentWorkflowArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentWorkflowArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentWorkflowArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentWorkflowArgs + switch t := that.(type) { + case *WorkerDeploymentWorkflowArgs: + that1 = t + case WorkerDeploymentWorkflowArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerDeploymentLocalState to the protobuf v3 wire format +func (val *WorkerDeploymentLocalState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentLocalState from the protobuf v3 wire format +func (val *WorkerDeploymentLocalState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentLocalState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentLocalState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentLocalState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentLocalState + switch t := that.(type) { + case *WorkerDeploymentLocalState: + that1 = t + case WorkerDeploymentLocalState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PropagatingRevisions to the protobuf v3 wire format +func (val *PropagatingRevisions) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PropagatingRevisions from the protobuf v3 wire format +func (val *PropagatingRevisions) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PropagatingRevisions) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PropagatingRevisions values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PropagatingRevisions) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PropagatingRevisions + switch t := that.(type) { + case *PropagatingRevisions: + that1 = t + case PropagatingRevisions: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerDeploymentVersionSummary to the protobuf v3 wire format +func (val *WorkerDeploymentVersionSummary) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentVersionSummary from the protobuf v3 wire format +func (val *WorkerDeploymentVersionSummary) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentVersionSummary) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentVersionSummary values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentVersionSummary) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentVersionSummary + switch t := that.(type) { + case *WorkerDeploymentVersionSummary: + that1 = t + case WorkerDeploymentVersionSummary: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RegisterWorkerInVersionArgs to the protobuf v3 wire format +func (val *RegisterWorkerInVersionArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RegisterWorkerInVersionArgs from the protobuf v3 wire format +func (val *RegisterWorkerInVersionArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RegisterWorkerInVersionArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RegisterWorkerInVersionArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RegisterWorkerInVersionArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RegisterWorkerInVersionArgs + switch t := that.(type) { + case *RegisterWorkerInVersionArgs: + that1 = t + case RegisterWorkerInVersionArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RegisterWorkerInWorkerDeploymentArgs to the protobuf v3 wire format +func (val *RegisterWorkerInWorkerDeploymentArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RegisterWorkerInWorkerDeploymentArgs from the protobuf v3 wire format +func (val *RegisterWorkerInWorkerDeploymentArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RegisterWorkerInWorkerDeploymentArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RegisterWorkerInWorkerDeploymentArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RegisterWorkerInWorkerDeploymentArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RegisterWorkerInWorkerDeploymentArgs + switch t := that.(type) { + case *RegisterWorkerInWorkerDeploymentArgs: + that1 = t + case RegisterWorkerInWorkerDeploymentArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeVersionFromWorkerDeploymentActivityArgs to the protobuf v3 wire format +func (val *DescribeVersionFromWorkerDeploymentActivityArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeVersionFromWorkerDeploymentActivityArgs from the protobuf v3 wire format +func (val *DescribeVersionFromWorkerDeploymentActivityArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeVersionFromWorkerDeploymentActivityArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeVersionFromWorkerDeploymentActivityArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeVersionFromWorkerDeploymentActivityArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeVersionFromWorkerDeploymentActivityArgs + switch t := that.(type) { + case *DescribeVersionFromWorkerDeploymentActivityArgs: + that1 = t + case DescribeVersionFromWorkerDeploymentActivityArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeVersionFromWorkerDeploymentActivityResult to the protobuf v3 wire format +func (val *DescribeVersionFromWorkerDeploymentActivityResult) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeVersionFromWorkerDeploymentActivityResult from the protobuf v3 wire format +func (val *DescribeVersionFromWorkerDeploymentActivityResult) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeVersionFromWorkerDeploymentActivityResult) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeVersionFromWorkerDeploymentActivityResult values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeVersionFromWorkerDeploymentActivityResult) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeVersionFromWorkerDeploymentActivityResult + switch t := that.(type) { + case *DescribeVersionFromWorkerDeploymentActivityResult: + that1 = t + case DescribeVersionFromWorkerDeploymentActivityResult: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncVersionStateUpdateArgs to the protobuf v3 wire format +func (val *SyncVersionStateUpdateArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncVersionStateUpdateArgs from the protobuf v3 wire format +func (val *SyncVersionStateUpdateArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncVersionStateUpdateArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncVersionStateUpdateArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncVersionStateUpdateArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncVersionStateUpdateArgs + switch t := that.(type) { + case *SyncVersionStateUpdateArgs: + that1 = t + case SyncVersionStateUpdateArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncVersionStateResponse to the protobuf v3 wire format +func (val *SyncVersionStateResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncVersionStateResponse from the protobuf v3 wire format +func (val *SyncVersionStateResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncVersionStateResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncVersionStateResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncVersionStateResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncVersionStateResponse + switch t := that.(type) { + case *SyncVersionStateResponse: + that1 = t + case SyncVersionStateResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type AddVersionUpdateArgs to the protobuf v3 wire format +func (val *AddVersionUpdateArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type AddVersionUpdateArgs from the protobuf v3 wire format +func (val *AddVersionUpdateArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *AddVersionUpdateArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two AddVersionUpdateArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *AddVersionUpdateArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *AddVersionUpdateArgs + switch t := that.(type) { + case *AddVersionUpdateArgs: + that1 = t + case AddVersionUpdateArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncDrainageInfoSignalArgs to the protobuf v3 wire format +func (val *SyncDrainageInfoSignalArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncDrainageInfoSignalArgs from the protobuf v3 wire format +func (val *SyncDrainageInfoSignalArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncDrainageInfoSignalArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncDrainageInfoSignalArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncDrainageInfoSignalArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncDrainageInfoSignalArgs + switch t := that.(type) { + case *SyncDrainageInfoSignalArgs: + that1 = t + case SyncDrainageInfoSignalArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncDrainageStatusSignalArgs to the protobuf v3 wire format +func (val *SyncDrainageStatusSignalArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncDrainageStatusSignalArgs from the protobuf v3 wire format +func (val *SyncDrainageStatusSignalArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncDrainageStatusSignalArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncDrainageStatusSignalArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncDrainageStatusSignalArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncDrainageStatusSignalArgs + switch t := that.(type) { + case *SyncDrainageStatusSignalArgs: + that1 = t + case SyncDrainageStatusSignalArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PropagationCompletionInfo to the protobuf v3 wire format +func (val *PropagationCompletionInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PropagationCompletionInfo from the protobuf v3 wire format +func (val *PropagationCompletionInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PropagationCompletionInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PropagationCompletionInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PropagationCompletionInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PropagationCompletionInfo + switch t := that.(type) { + case *PropagationCompletionInfo: + that1 = t + case PropagationCompletionInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type QueryDescribeVersionResponse to the protobuf v3 wire format +func (val *QueryDescribeVersionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type QueryDescribeVersionResponse from the protobuf v3 wire format +func (val *QueryDescribeVersionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *QueryDescribeVersionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two QueryDescribeVersionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *QueryDescribeVersionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *QueryDescribeVersionResponse + switch t := that.(type) { + case *QueryDescribeVersionResponse: + that1 = t + case QueryDescribeVersionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type QueryDescribeWorkerDeploymentResponse to the protobuf v3 wire format +func (val *QueryDescribeWorkerDeploymentResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type QueryDescribeWorkerDeploymentResponse from the protobuf v3 wire format +func (val *QueryDescribeWorkerDeploymentResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *QueryDescribeWorkerDeploymentResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two QueryDescribeWorkerDeploymentResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *QueryDescribeWorkerDeploymentResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *QueryDescribeWorkerDeploymentResponse + switch t := that.(type) { + case *QueryDescribeWorkerDeploymentResponse: + that1 = t + case QueryDescribeWorkerDeploymentResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateRequestIDQueryResponse to the protobuf v3 wire format +func (val *CreateRequestIDQueryResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateRequestIDQueryResponse from the protobuf v3 wire format +func (val *CreateRequestIDQueryResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateRequestIDQueryResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateRequestIDQueryResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateRequestIDQueryResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateRequestIDQueryResponse + switch t := that.(type) { + case *CreateRequestIDQueryResponse: + that1 = t + case CreateRequestIDQueryResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartWorkerDeploymentRequest to the protobuf v3 wire format +func (val *StartWorkerDeploymentRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartWorkerDeploymentRequest from the protobuf v3 wire format +func (val *StartWorkerDeploymentRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartWorkerDeploymentRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartWorkerDeploymentRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartWorkerDeploymentRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartWorkerDeploymentRequest + switch t := that.(type) { + case *StartWorkerDeploymentRequest: + that1 = t + case StartWorkerDeploymentRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartWorkerDeploymentVersionRequest to the protobuf v3 wire format +func (val *StartWorkerDeploymentVersionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartWorkerDeploymentVersionRequest from the protobuf v3 wire format +func (val *StartWorkerDeploymentVersionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartWorkerDeploymentVersionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartWorkerDeploymentVersionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartWorkerDeploymentVersionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartWorkerDeploymentVersionRequest + switch t := that.(type) { + case *StartWorkerDeploymentVersionRequest: + that1 = t + case StartWorkerDeploymentVersionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncDeploymentVersionUserDataRequest to the protobuf v3 wire format +func (val *SyncDeploymentVersionUserDataRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncDeploymentVersionUserDataRequest from the protobuf v3 wire format +func (val *SyncDeploymentVersionUserDataRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncDeploymentVersionUserDataRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncDeploymentVersionUserDataRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncDeploymentVersionUserDataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncDeploymentVersionUserDataRequest + switch t := that.(type) { + case *SyncDeploymentVersionUserDataRequest: + that1 = t + case SyncDeploymentVersionUserDataRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncDeploymentVersionUserDataResponse to the protobuf v3 wire format +func (val *SyncDeploymentVersionUserDataResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncDeploymentVersionUserDataResponse from the protobuf v3 wire format +func (val *SyncDeploymentVersionUserDataResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncDeploymentVersionUserDataResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncDeploymentVersionUserDataResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncDeploymentVersionUserDataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncDeploymentVersionUserDataResponse + switch t := that.(type) { + case *SyncDeploymentVersionUserDataResponse: + that1 = t + case SyncDeploymentVersionUserDataResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CheckWorkerDeploymentUserDataPropagationRequest to the protobuf v3 wire format +func (val *CheckWorkerDeploymentUserDataPropagationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CheckWorkerDeploymentUserDataPropagationRequest from the protobuf v3 wire format +func (val *CheckWorkerDeploymentUserDataPropagationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CheckWorkerDeploymentUserDataPropagationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CheckWorkerDeploymentUserDataPropagationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CheckWorkerDeploymentUserDataPropagationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CheckWorkerDeploymentUserDataPropagationRequest + switch t := that.(type) { + case *CheckWorkerDeploymentUserDataPropagationRequest: + that1 = t + case CheckWorkerDeploymentUserDataPropagationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncUnversionedRampActivityArgs to the protobuf v3 wire format +func (val *SyncUnversionedRampActivityArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncUnversionedRampActivityArgs from the protobuf v3 wire format +func (val *SyncUnversionedRampActivityArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncUnversionedRampActivityArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncUnversionedRampActivityArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncUnversionedRampActivityArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncUnversionedRampActivityArgs + switch t := that.(type) { + case *SyncUnversionedRampActivityArgs: + that1 = t + case SyncUnversionedRampActivityArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncUnversionedRampActivityResponse to the protobuf v3 wire format +func (val *SyncUnversionedRampActivityResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncUnversionedRampActivityResponse from the protobuf v3 wire format +func (val *SyncUnversionedRampActivityResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncUnversionedRampActivityResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncUnversionedRampActivityResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncUnversionedRampActivityResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncUnversionedRampActivityResponse + switch t := that.(type) { + case *SyncUnversionedRampActivityResponse: + that1 = t + case SyncUnversionedRampActivityResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateVersionMetadataArgs to the protobuf v3 wire format +func (val *UpdateVersionMetadataArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateVersionMetadataArgs from the protobuf v3 wire format +func (val *UpdateVersionMetadataArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateVersionMetadataArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateVersionMetadataArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateVersionMetadataArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateVersionMetadataArgs + switch t := that.(type) { + case *UpdateVersionMetadataArgs: + that1 = t + case UpdateVersionMetadataArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateVersionMetadataResponse to the protobuf v3 wire format +func (val *UpdateVersionMetadataResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateVersionMetadataResponse from the protobuf v3 wire format +func (val *UpdateVersionMetadataResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateVersionMetadataResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateVersionMetadataResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateVersionMetadataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateVersionMetadataResponse + switch t := that.(type) { + case *UpdateVersionMetadataResponse: + that1 = t + case UpdateVersionMetadataResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SetCurrentVersionArgs to the protobuf v3 wire format +func (val *SetCurrentVersionArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SetCurrentVersionArgs from the protobuf v3 wire format +func (val *SetCurrentVersionArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SetCurrentVersionArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SetCurrentVersionArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SetCurrentVersionArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SetCurrentVersionArgs + switch t := that.(type) { + case *SetCurrentVersionArgs: + that1 = t + case SetCurrentVersionArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SetCurrentVersionResponse to the protobuf v3 wire format +func (val *SetCurrentVersionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SetCurrentVersionResponse from the protobuf v3 wire format +func (val *SetCurrentVersionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SetCurrentVersionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SetCurrentVersionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SetCurrentVersionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SetCurrentVersionResponse + switch t := that.(type) { + case *SetCurrentVersionResponse: + that1 = t + case SetCurrentVersionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateWorkerDeploymentArgs to the protobuf v3 wire format +func (val *CreateWorkerDeploymentArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateWorkerDeploymentArgs from the protobuf v3 wire format +func (val *CreateWorkerDeploymentArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateWorkerDeploymentArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateWorkerDeploymentArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateWorkerDeploymentArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateWorkerDeploymentArgs + switch t := that.(type) { + case *CreateWorkerDeploymentArgs: + that1 = t + case CreateWorkerDeploymentArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateWorkerDeploymentResponse to the protobuf v3 wire format +func (val *CreateWorkerDeploymentResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateWorkerDeploymentResponse from the protobuf v3 wire format +func (val *CreateWorkerDeploymentResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateWorkerDeploymentResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateWorkerDeploymentResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateWorkerDeploymentResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateWorkerDeploymentResponse + switch t := that.(type) { + case *CreateWorkerDeploymentResponse: + that1 = t + case CreateWorkerDeploymentResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateWorkerDeploymentVersionArgs to the protobuf v3 wire format +func (val *CreateWorkerDeploymentVersionArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateWorkerDeploymentVersionArgs from the protobuf v3 wire format +func (val *CreateWorkerDeploymentVersionArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateWorkerDeploymentVersionArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateWorkerDeploymentVersionArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateWorkerDeploymentVersionArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateWorkerDeploymentVersionArgs + switch t := that.(type) { + case *CreateWorkerDeploymentVersionArgs: + that1 = t + case CreateWorkerDeploymentVersionArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateWorkerDeploymentVersionResponse to the protobuf v3 wire format +func (val *CreateWorkerDeploymentVersionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateWorkerDeploymentVersionResponse from the protobuf v3 wire format +func (val *CreateWorkerDeploymentVersionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateWorkerDeploymentVersionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateWorkerDeploymentVersionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateWorkerDeploymentVersionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateWorkerDeploymentVersionResponse + switch t := that.(type) { + case *CreateWorkerDeploymentVersionResponse: + that1 = t + case CreateWorkerDeploymentVersionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteVersionArgs to the protobuf v3 wire format +func (val *DeleteVersionArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteVersionArgs from the protobuf v3 wire format +func (val *DeleteVersionArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteVersionArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteVersionArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteVersionArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteVersionArgs + switch t := that.(type) { + case *DeleteVersionArgs: + that1 = t + case DeleteVersionArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteVersionActivityArgs to the protobuf v3 wire format +func (val *DeleteVersionActivityArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteVersionActivityArgs from the protobuf v3 wire format +func (val *DeleteVersionActivityArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteVersionActivityArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteVersionActivityArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteVersionActivityArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteVersionActivityArgs + switch t := that.(type) { + case *DeleteVersionActivityArgs: + that1 = t + case DeleteVersionActivityArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CheckTaskQueuesHavePollersActivityArgs to the protobuf v3 wire format +func (val *CheckTaskQueuesHavePollersActivityArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CheckTaskQueuesHavePollersActivityArgs from the protobuf v3 wire format +func (val *CheckTaskQueuesHavePollersActivityArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CheckTaskQueuesHavePollersActivityArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CheckTaskQueuesHavePollersActivityArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CheckTaskQueuesHavePollersActivityArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CheckTaskQueuesHavePollersActivityArgs + switch t := that.(type) { + case *CheckTaskQueuesHavePollersActivityArgs: + that1 = t + case CheckTaskQueuesHavePollersActivityArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteDeploymentArgs to the protobuf v3 wire format +func (val *DeleteDeploymentArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteDeploymentArgs from the protobuf v3 wire format +func (val *DeleteDeploymentArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteDeploymentArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteDeploymentArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteDeploymentArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteDeploymentArgs + switch t := that.(type) { + case *DeleteDeploymentArgs: + that1 = t + case DeleteDeploymentArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SetRampingVersionResponse to the protobuf v3 wire format +func (val *SetRampingVersionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SetRampingVersionResponse from the protobuf v3 wire format +func (val *SetRampingVersionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SetRampingVersionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SetRampingVersionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SetRampingVersionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SetRampingVersionResponse + switch t := that.(type) { + case *SetRampingVersionResponse: + that1 = t + case SetRampingVersionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SetRampingVersionArgs to the protobuf v3 wire format +func (val *SetRampingVersionArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SetRampingVersionArgs from the protobuf v3 wire format +func (val *SetRampingVersionArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SetRampingVersionArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SetRampingVersionArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SetRampingVersionArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SetRampingVersionArgs + switch t := that.(type) { + case *SetRampingVersionArgs: + that1 = t + case SetRampingVersionArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SetManagerIdentityArgs to the protobuf v3 wire format +func (val *SetManagerIdentityArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SetManagerIdentityArgs from the protobuf v3 wire format +func (val *SetManagerIdentityArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SetManagerIdentityArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SetManagerIdentityArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SetManagerIdentityArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SetManagerIdentityArgs + switch t := that.(type) { + case *SetManagerIdentityArgs: + that1 = t + case SetManagerIdentityArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SetManagerIdentityResponse to the protobuf v3 wire format +func (val *SetManagerIdentityResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SetManagerIdentityResponse from the protobuf v3 wire format +func (val *SetManagerIdentityResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SetManagerIdentityResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SetManagerIdentityResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SetManagerIdentityResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SetManagerIdentityResponse + switch t := that.(type) { + case *SetManagerIdentityResponse: + that1 = t + case SetManagerIdentityResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncVersionStateActivityArgs to the protobuf v3 wire format +func (val *SyncVersionStateActivityArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncVersionStateActivityArgs from the protobuf v3 wire format +func (val *SyncVersionStateActivityArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncVersionStateActivityArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncVersionStateActivityArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncVersionStateActivityArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncVersionStateActivityArgs + switch t := that.(type) { + case *SyncVersionStateActivityArgs: + that1 = t + case SyncVersionStateActivityArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncVersionStateActivityResult to the protobuf v3 wire format +func (val *SyncVersionStateActivityResult) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncVersionStateActivityResult from the protobuf v3 wire format +func (val *SyncVersionStateActivityResult) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncVersionStateActivityResult) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncVersionStateActivityResult values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncVersionStateActivityResult) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncVersionStateActivityResult + switch t := that.(type) { + case *SyncVersionStateActivityResult: + that1 = t + case SyncVersionStateActivityResult: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type IsVersionMissingTaskQueuesArgs to the protobuf v3 wire format +func (val *IsVersionMissingTaskQueuesArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type IsVersionMissingTaskQueuesArgs from the protobuf v3 wire format +func (val *IsVersionMissingTaskQueuesArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *IsVersionMissingTaskQueuesArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two IsVersionMissingTaskQueuesArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *IsVersionMissingTaskQueuesArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *IsVersionMissingTaskQueuesArgs + switch t := that.(type) { + case *IsVersionMissingTaskQueuesArgs: + that1 = t + case IsVersionMissingTaskQueuesArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type IsVersionMissingTaskQueuesResult to the protobuf v3 wire format +func (val *IsVersionMissingTaskQueuesResult) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type IsVersionMissingTaskQueuesResult from the protobuf v3 wire format +func (val *IsVersionMissingTaskQueuesResult) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *IsVersionMissingTaskQueuesResult) Size() int { + return proto.Size(val) +} + +// Equal returns whether two IsVersionMissingTaskQueuesResult values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *IsVersionMissingTaskQueuesResult) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *IsVersionMissingTaskQueuesResult + switch t := that.(type) { + case *IsVersionMissingTaskQueuesResult: + that1 = t + case IsVersionMissingTaskQueuesResult: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerDeploymentWorkflowMemo to the protobuf v3 wire format +func (val *WorkerDeploymentWorkflowMemo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentWorkflowMemo from the protobuf v3 wire format +func (val *WorkerDeploymentWorkflowMemo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentWorkflowMemo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentWorkflowMemo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentWorkflowMemo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentWorkflowMemo + switch t := that.(type) { + case *WorkerDeploymentWorkflowMemo: + that1 = t + case WorkerDeploymentWorkflowMemo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerDeploymentSummary to the protobuf v3 wire format +func (val *WorkerDeploymentSummary) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentSummary from the protobuf v3 wire format +func (val *WorkerDeploymentSummary) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentSummary) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentSummary values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentSummary) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentSummary + switch t := that.(type) { + case *WorkerDeploymentSummary: + that1 = t + case WorkerDeploymentSummary: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ValidateWorkerControllerInstanceSpecInput to the protobuf v3 wire format +func (val *ValidateWorkerControllerInstanceSpecInput) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ValidateWorkerControllerInstanceSpecInput from the protobuf v3 wire format +func (val *ValidateWorkerControllerInstanceSpecInput) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ValidateWorkerControllerInstanceSpecInput) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ValidateWorkerControllerInstanceSpecInput values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ValidateWorkerControllerInstanceSpecInput) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ValidateWorkerControllerInstanceSpecInput + switch t := that.(type) { + case *ValidateWorkerControllerInstanceSpecInput: + that1 = t + case ValidateWorkerControllerInstanceSpecInput: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateWorkerControllerInstanceInput to the protobuf v3 wire format +func (val *UpdateWorkerControllerInstanceInput) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateWorkerControllerInstanceInput from the protobuf v3 wire format +func (val *UpdateWorkerControllerInstanceInput) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateWorkerControllerInstanceInput) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateWorkerControllerInstanceInput values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateWorkerControllerInstanceInput) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateWorkerControllerInstanceInput + switch t := that.(type) { + case *UpdateWorkerControllerInstanceInput: + that1 = t + case UpdateWorkerControllerInstanceInput: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteWorkerControllerInstanceInput to the protobuf v3 wire format +func (val *DeleteWorkerControllerInstanceInput) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteWorkerControllerInstanceInput from the protobuf v3 wire format +func (val *DeleteWorkerControllerInstanceInput) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteWorkerControllerInstanceInput) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteWorkerControllerInstanceInput values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteWorkerControllerInstanceInput) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteWorkerControllerInstanceInput + switch t := that.(type) { + case *DeleteWorkerControllerInstanceInput: + that1 = t + case DeleteWorkerControllerInstanceInput: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateComputeConfigArgs to the protobuf v3 wire format +func (val *UpdateComputeConfigArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateComputeConfigArgs from the protobuf v3 wire format +func (val *UpdateComputeConfigArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateComputeConfigArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateComputeConfigArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateComputeConfigArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateComputeConfigArgs + switch t := that.(type) { + case *UpdateComputeConfigArgs: + that1 = t + case UpdateComputeConfigArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateComputeConfigResponse to the protobuf v3 wire format +func (val *UpdateComputeConfigResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateComputeConfigResponse from the protobuf v3 wire format +func (val *UpdateComputeConfigResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateComputeConfigResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateComputeConfigResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateComputeConfigResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateComputeConfigResponse + switch t := that.(type) { + case *UpdateComputeConfigResponse: + that1 = t + case UpdateComputeConfigResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceCANDeploymentSignalArgs to the protobuf v3 wire format +func (val *ForceCANDeploymentSignalArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceCANDeploymentSignalArgs from the protobuf v3 wire format +func (val *ForceCANDeploymentSignalArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceCANDeploymentSignalArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceCANDeploymentSignalArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceCANDeploymentSignalArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceCANDeploymentSignalArgs + switch t := that.(type) { + case *ForceCANDeploymentSignalArgs: + that1 = t + case ForceCANDeploymentSignalArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceCANVersionSignalArgs to the protobuf v3 wire format +func (val *ForceCANVersionSignalArgs) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceCANVersionSignalArgs from the protobuf v3 wire format +func (val *ForceCANVersionSignalArgs) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceCANVersionSignalArgs) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceCANVersionSignalArgs values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceCANVersionSignalArgs) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceCANVersionSignalArgs + switch t := that.(type) { + case *ForceCANVersionSignalArgs: + that1 = t + case ForceCANVersionSignalArgs: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/deployment/v1/message.pb.go b/api/deployment/v1/message.pb.go new file mode 100644 index 00000000000..2a65b099db2 --- /dev/null +++ b/api/deployment/v1/message.pb.go @@ -0,0 +1,4576 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/deployment/v1/message.proto + +package deployment + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v13 "go.temporal.io/api/common/v1" + v12 "go.temporal.io/api/compute/v1" + v11 "go.temporal.io/api/deployment/v1" + v1 "go.temporal.io/api/enums/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Identifies a Worker Deployment Version. The combination of `deployment_name` and `build_id` +// serve as the identifier. +type WorkerDeploymentVersion struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The name of the Deployment this version belongs too. + DeploymentName string `protobuf:"bytes,1,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + // Build ID uniquely identifies the Deployment Version within a Deployment, but the same Build + // ID can be used in multiple Deployments. + BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentVersion) Reset() { + *x = WorkerDeploymentVersion{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentVersion) ProtoMessage() {} + +func (x *WorkerDeploymentVersion) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentVersion.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentVersion) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{0} +} + +func (x *WorkerDeploymentVersion) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *WorkerDeploymentVersion) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +// The source of truth for this data is in the WorkerDeployment entity workflows, which is +// synced to all TQs whenever the source changes. +// Deprecated. +type DeploymentVersionData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Nil means unversioned. + Version *WorkerDeploymentVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Last time `current_since_time`, `ramping_since_time, or `ramp_percentage` of this version changed. + RoutingUpdateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=routing_update_time,json=routingUpdateTime,proto3" json:"routing_update_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) + // + // Nil if not current. + CurrentSinceTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=current_since_time,json=currentSinceTime,proto3" json:"current_since_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) + // + // Nil if not ramping. Updated when the version first starts ramping, not on each ramp change. + RampingSinceTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=ramping_since_time,json=rampingSinceTime,proto3" json:"ramping_since_time,omitempty"` + // Range: [0, 100]. Must be zero if the version is not ramping (i.e. `ramping_since_time` is nil). + // Can be in the range [0, 100] if the version is ramping. + RampPercentage float32 `protobuf:"fixed32,5,opt,name=ramp_percentage,json=rampPercentage,proto3" json:"ramp_percentage,omitempty"` + // Status of the Worker Deployment Version. + Status v1.WorkerDeploymentVersionStatus `protobuf:"varint,6,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkerDeploymentVersionStatus" json:"status,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeploymentVersionData) Reset() { + *x = DeploymentVersionData{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeploymentVersionData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeploymentVersionData) ProtoMessage() {} + +func (x *DeploymentVersionData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeploymentVersionData.ProtoReflect.Descriptor instead. +func (*DeploymentVersionData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{1} +} + +func (x *DeploymentVersionData) GetVersion() *WorkerDeploymentVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *DeploymentVersionData) GetRoutingUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.RoutingUpdateTime + } + return nil +} + +func (x *DeploymentVersionData) GetCurrentSinceTime() *timestamppb.Timestamp { + if x != nil { + return x.CurrentSinceTime + } + return nil +} + +func (x *DeploymentVersionData) GetRampingSinceTime() *timestamppb.Timestamp { + if x != nil { + return x.RampingSinceTime + } + return nil +} + +func (x *DeploymentVersionData) GetRampPercentage() float32 { + if x != nil { + return x.RampPercentage + } + return 0 +} + +func (x *DeploymentVersionData) GetStatus() v1.WorkerDeploymentVersionStatus { + if x != nil { + return x.Status + } + return v1.WorkerDeploymentVersionStatus(0) +} + +// Information that a TQ should know about a particular Deployment Version. This info is not part of +// RoutingConfig and hence not protected by the routing config revision number. +// As of Workflow Version `VersionDataRevisionNumber`, version specific data has its own revision +// number, which makes async propagations safer and allows async registration. +type WorkerDeploymentVersionData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Incremented everytime version data changes. Updates with lower revision number than what is + // already in the TQ will be ignored to avoid stale writes. + RevisionNumber int64 `protobuf:"varint,1,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // Last update time. Used for garbage collecting deleted versions from TQ user data. + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + // In order to protect against deletes being overwritten by delayed stale writes, we can't + // immediately delete the version data from task queues. instead, we mark them as deleted while + // keeping the revision number. + // Old enough deleted versions are GCed based on update_time. + // Deprecated. This mechanism is not safe against reactivation of versions after delete. + // Use forget_version flag for synchronous deletion of the version data from TQ. + Deleted bool `protobuf:"varint,3,opt,name=deleted,proto3" json:"deleted,omitempty"` + Status v1.WorkerDeploymentVersionStatus `protobuf:"varint,6,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkerDeploymentVersionStatus" json:"status,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentVersionData) Reset() { + *x = WorkerDeploymentVersionData{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentVersionData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentVersionData) ProtoMessage() {} + +func (x *WorkerDeploymentVersionData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentVersionData.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentVersionData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{2} +} + +func (x *WorkerDeploymentVersionData) GetRevisionNumber() int64 { + if x != nil { + return x.RevisionNumber + } + return 0 +} + +func (x *WorkerDeploymentVersionData) GetUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.UpdateTime + } + return nil +} + +func (x *WorkerDeploymentVersionData) GetDeleted() bool { + if x != nil { + return x.Deleted + } + return false +} + +func (x *WorkerDeploymentVersionData) GetStatus() v1.WorkerDeploymentVersionStatus { + if x != nil { + return x.Status + } + return v1.WorkerDeploymentVersionStatus(0) +} + +// Local state for Worker Deployment Version +type VersionLocalState struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version *WorkerDeploymentVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Last time `current_since_time`, `ramping_since_time, or `ramp_percentage` of this version changed. + RoutingUpdateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=routing_update_time,json=routingUpdateTime,proto3" json:"routing_update_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) + // + // Nil if not current. + CurrentSinceTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=current_since_time,json=currentSinceTime,proto3" json:"current_since_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) + // + // Nil if not ramping. Updated when the version first starts ramping, not on each ramp change. + RampingSinceTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=ramping_since_time,json=rampingSinceTime,proto3" json:"ramping_since_time,omitempty"` + // Range: [0, 100]. Must be zero if the version is not ramping (i.e. `ramping_since_time` is nil). + // Can be in the range [0, 100] if the version is ramping. + RampPercentage float32 `protobuf:"fixed32,6,opt,name=ramp_percentage,json=rampPercentage,proto3" json:"ramp_percentage,omitempty"` + // Timestamp when this version first became current or ramping. + FirstActivationTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=first_activation_time,json=firstActivationTime,proto3" json:"first_activation_time,omitempty"` + // Timestamp when this version last became current. + // Can be used to determine whether a version has ever been Current. + LastCurrentTime *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=last_current_time,json=lastCurrentTime,proto3" json:"last_current_time,omitempty"` + // Timestamp when this version last stopped being current or ramping. + LastDeactivationTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=last_deactivation_time,json=lastDeactivationTime,proto3" json:"last_deactivation_time,omitempty"` + // Helps user determine when it is safe to decommission the workers of this + // Version. Not present when version is current or ramping. + // Current limitations: + // - Not supported for Unversioned mode. + // - Periodically refreshed, may have delays up to few minutes (consult the + // last_checked_time value). + // - Refreshed only when version is not current or ramping AND the status is not + // "drained" yet. + // - Once the status is changed to "drained", it is not changed until the Version + // becomes Current or Ramping again, at which time the drainage info is cleared. + // This means if the Version is "drained" but new workflows are sent to it via + // Pinned Versioning Override, the status does not account for those Pinned-override + // executions and remains "drained". + DrainageInfo *v11.VersionDrainageInfo `protobuf:"bytes,7,opt,name=drainage_info,json=drainageInfo,proto3" json:"drainage_info,omitempty"` + // Arbitrary user-provided metadata attached to this version. + Metadata *v11.VersionMetadata `protobuf:"bytes,8,opt,name=metadata,proto3" json:"metadata,omitempty"` + // Deployment workflow should always be running before starting the version workflow. + // We should not start the deployment workflow. If we cannot find the deployment workflow when signaling, it means a bug and we should fix it. + // Deprecated. + // + // Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. + StartedDeploymentWorkflow bool `protobuf:"varint,9,opt,name=started_deployment_workflow,json=startedDeploymentWorkflow,proto3" json:"started_deployment_workflow,omitempty"` + // Key: Task Queue Name + TaskQueueFamilies map[string]*VersionLocalState_TaskQueueFamilyData `protobuf:"bytes,10,rep,name=task_queue_families,json=taskQueueFamilies,proto3" json:"task_queue_families,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Number of task queues which will be synced in a single batch. + SyncBatchSize int32 `protobuf:"varint,11,opt,name=sync_batch_size,json=syncBatchSize,proto3" json:"sync_batch_size,omitempty"` + // Status of the Worker Deployment Version. + Status v1.WorkerDeploymentVersionStatus `protobuf:"varint,14,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkerDeploymentVersionStatus" json:"status,omitempty"` + // Incremented everytime version data synced to TQ changes. Updates with lower revision number + // than what is already in the TQ will be ignored to avoid stale writes during async operations. + RevisionNumber int64 `protobuf:"varint,15,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // Identity of the last client who modified the configuration of this Version. + // Covers changes through: CreateWorkerDeploymentVersion, UpdateWorkerDeploymentVersionComputeConfig, + // UpdateWorkerDeploymentVersionMetadata. + LastModifierIdentity string `protobuf:"bytes,17,opt,name=last_modifier_identity,json=lastModifierIdentity,proto3" json:"last_modifier_identity,omitempty"` + // Cached compute config summary, kept in sync with the WCI on each compute config update. + ComputeConfig *v12.ComputeConfigSummary `protobuf:"bytes,18,opt,name=compute_config,json=computeConfig,proto3" json:"compute_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VersionLocalState) Reset() { + *x = VersionLocalState{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VersionLocalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionLocalState) ProtoMessage() {} + +func (x *VersionLocalState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionLocalState.ProtoReflect.Descriptor instead. +func (*VersionLocalState) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{3} +} + +func (x *VersionLocalState) GetVersion() *WorkerDeploymentVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *VersionLocalState) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *VersionLocalState) GetRoutingUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.RoutingUpdateTime + } + return nil +} + +func (x *VersionLocalState) GetCurrentSinceTime() *timestamppb.Timestamp { + if x != nil { + return x.CurrentSinceTime + } + return nil +} + +func (x *VersionLocalState) GetRampingSinceTime() *timestamppb.Timestamp { + if x != nil { + return x.RampingSinceTime + } + return nil +} + +func (x *VersionLocalState) GetRampPercentage() float32 { + if x != nil { + return x.RampPercentage + } + return 0 +} + +func (x *VersionLocalState) GetFirstActivationTime() *timestamppb.Timestamp { + if x != nil { + return x.FirstActivationTime + } + return nil +} + +func (x *VersionLocalState) GetLastCurrentTime() *timestamppb.Timestamp { + if x != nil { + return x.LastCurrentTime + } + return nil +} + +func (x *VersionLocalState) GetLastDeactivationTime() *timestamppb.Timestamp { + if x != nil { + return x.LastDeactivationTime + } + return nil +} + +func (x *VersionLocalState) GetDrainageInfo() *v11.VersionDrainageInfo { + if x != nil { + return x.DrainageInfo + } + return nil +} + +func (x *VersionLocalState) GetMetadata() *v11.VersionMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. +func (x *VersionLocalState) GetStartedDeploymentWorkflow() bool { + if x != nil { + return x.StartedDeploymentWorkflow + } + return false +} + +func (x *VersionLocalState) GetTaskQueueFamilies() map[string]*VersionLocalState_TaskQueueFamilyData { + if x != nil { + return x.TaskQueueFamilies + } + return nil +} + +func (x *VersionLocalState) GetSyncBatchSize() int32 { + if x != nil { + return x.SyncBatchSize + } + return 0 +} + +func (x *VersionLocalState) GetStatus() v1.WorkerDeploymentVersionStatus { + if x != nil { + return x.Status + } + return v1.WorkerDeploymentVersionStatus(0) +} + +func (x *VersionLocalState) GetRevisionNumber() int64 { + if x != nil { + return x.RevisionNumber + } + return 0 +} + +func (x *VersionLocalState) GetLastModifierIdentity() string { + if x != nil { + return x.LastModifierIdentity + } + return "" +} + +func (x *VersionLocalState) GetComputeConfig() *v12.ComputeConfigSummary { + if x != nil { + return x.ComputeConfig + } + return nil +} + +// Data specific to a task queue, from the perspective of a worker deployment version. +type TaskQueueVersionData struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskQueueVersionData) Reset() { + *x = TaskQueueVersionData{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskQueueVersionData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskQueueVersionData) ProtoMessage() {} + +func (x *TaskQueueVersionData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskQueueVersionData.ProtoReflect.Descriptor instead. +func (*TaskQueueVersionData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{4} +} + +// used as Worker Deployment Version workflow input: +type WorkerDeploymentVersionWorkflowArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceName string `protobuf:"bytes,1,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + VersionState *VersionLocalState `protobuf:"bytes,3,opt,name=version_state,json=versionState,proto3" json:"version_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentVersionWorkflowArgs) Reset() { + *x = WorkerDeploymentVersionWorkflowArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentVersionWorkflowArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentVersionWorkflowArgs) ProtoMessage() {} + +func (x *WorkerDeploymentVersionWorkflowArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentVersionWorkflowArgs.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentVersionWorkflowArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{5} +} + +func (x *WorkerDeploymentVersionWorkflowArgs) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *WorkerDeploymentVersionWorkflowArgs) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *WorkerDeploymentVersionWorkflowArgs) GetVersionState() *VersionLocalState { + if x != nil { + return x.VersionState + } + return nil +} + +// used as Worker Deployment workflow input: +type WorkerDeploymentWorkflowArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceName string `protobuf:"bytes,1,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + DeploymentName string `protobuf:"bytes,3,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + State *WorkerDeploymentLocalState `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentWorkflowArgs) Reset() { + *x = WorkerDeploymentWorkflowArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentWorkflowArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentWorkflowArgs) ProtoMessage() {} + +func (x *WorkerDeploymentWorkflowArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentWorkflowArgs.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentWorkflowArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{6} +} + +func (x *WorkerDeploymentWorkflowArgs) GetNamespaceName() string { + if x != nil { + return x.NamespaceName + } + return "" +} + +func (x *WorkerDeploymentWorkflowArgs) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *WorkerDeploymentWorkflowArgs) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *WorkerDeploymentWorkflowArgs) GetState() *WorkerDeploymentLocalState { + if x != nil { + return x.State + } + return nil +} + +// Local state for Worker Deployment +type WorkerDeploymentLocalState struct { + state protoimpl.MessageState `protogen:"open.v1"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Encapsulates task routing information for this deployment. + RoutingConfig *v11.RoutingConfig `protobuf:"bytes,2,opt,name=routing_config,json=routingConfig,proto3" json:"routing_config,omitempty"` + Versions map[string]*WorkerDeploymentVersionSummary `protobuf:"bytes,3,rep,name=versions,proto3" json:"versions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ConflictToken []byte `protobuf:"bytes,4,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + LastModifierIdentity string `protobuf:"bytes,5,opt,name=last_modifier_identity,json=lastModifierIdentity,proto3" json:"last_modifier_identity,omitempty"` + // Number of task queues which will be synced in a single batch. + SyncBatchSize int32 `protobuf:"varint,6,opt,name=sync_batch_size,json=syncBatchSize,proto3" json:"sync_batch_size,omitempty"` + ManagerIdentity string `protobuf:"bytes,7,opt,name=manager_identity,json=managerIdentity,proto3" json:"manager_identity,omitempty"` + // Track async propagations in progress per build ID. Map: build_id -> revision numbers. + // Used to track which propagations are still pending across continue-as-new. + PropagatingRevisions map[string]*PropagatingRevisions `protobuf:"bytes,8,rep,name=propagating_revisions,json=propagatingRevisions,proto3" json:"propagating_revisions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Request ID used to create this worker deployment. + CreateRequestId string `protobuf:"bytes,9,opt,name=create_request_id,json=createRequestId,proto3" json:"create_request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentLocalState) Reset() { + *x = WorkerDeploymentLocalState{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentLocalState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentLocalState) ProtoMessage() {} + +func (x *WorkerDeploymentLocalState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentLocalState.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentLocalState) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{7} +} + +func (x *WorkerDeploymentLocalState) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *WorkerDeploymentLocalState) GetRoutingConfig() *v11.RoutingConfig { + if x != nil { + return x.RoutingConfig + } + return nil +} + +func (x *WorkerDeploymentLocalState) GetVersions() map[string]*WorkerDeploymentVersionSummary { + if x != nil { + return x.Versions + } + return nil +} + +func (x *WorkerDeploymentLocalState) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +func (x *WorkerDeploymentLocalState) GetLastModifierIdentity() string { + if x != nil { + return x.LastModifierIdentity + } + return "" +} + +func (x *WorkerDeploymentLocalState) GetSyncBatchSize() int32 { + if x != nil { + return x.SyncBatchSize + } + return 0 +} + +func (x *WorkerDeploymentLocalState) GetManagerIdentity() string { + if x != nil { + return x.ManagerIdentity + } + return "" +} + +func (x *WorkerDeploymentLocalState) GetPropagatingRevisions() map[string]*PropagatingRevisions { + if x != nil { + return x.PropagatingRevisions + } + return nil +} + +func (x *WorkerDeploymentLocalState) GetCreateRequestId() string { + if x != nil { + return x.CreateRequestId + } + return "" +} + +// Tracks revision numbers that are currently propagating for a specific build ID +type PropagatingRevisions struct { + state protoimpl.MessageState `protogen:"open.v1"` + RevisionNumbers []int64 `protobuf:"varint,1,rep,packed,name=revision_numbers,json=revisionNumbers,proto3" json:"revision_numbers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PropagatingRevisions) Reset() { + *x = PropagatingRevisions{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PropagatingRevisions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PropagatingRevisions) ProtoMessage() {} + +func (x *PropagatingRevisions) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PropagatingRevisions.ProtoReflect.Descriptor instead. +func (*PropagatingRevisions) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{8} +} + +func (x *PropagatingRevisions) GetRevisionNumbers() []int64 { + if x != nil { + return x.RevisionNumbers + } + return nil +} + +type WorkerDeploymentVersionSummary struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. + DrainageStatus v1.VersionDrainageStatus `protobuf:"varint,3,opt,name=drainage_status,json=drainageStatus,proto3,enum=temporal.api.enums.v1.VersionDrainageStatus" json:"drainage_status,omitempty"` + // Information about workflow drainage to help the user determine when it is safe + // to decommission a Version. Not present while version is current or ramping. + DrainageInfo *v11.VersionDrainageInfo `protobuf:"bytes,4,opt,name=drainage_info,json=drainageInfo,proto3" json:"drainage_info,omitempty"` + // Last time `current_since_time`, `ramping_since_time, or `ramp_percentage` of this version changed. + RoutingUpdateTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=routing_update_time,json=routingUpdateTime,proto3" json:"routing_update_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) + // + // Nil if not current. + CurrentSinceTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=current_since_time,json=currentSinceTime,proto3" json:"current_since_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) + // + // Nil if not ramping. Updated when the version first starts ramping, not on each ramp change. + RampingSinceTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=ramping_since_time,json=rampingSinceTime,proto3" json:"ramping_since_time,omitempty"` + // Timestamp when this version first became current or ramping. + FirstActivationTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=first_activation_time,json=firstActivationTime,proto3" json:"first_activation_time,omitempty"` + // Timestamp when this version last became current. + // Can be used to determine whether a version has ever been Current. + LastCurrentTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=last_current_time,json=lastCurrentTime,proto3" json:"last_current_time,omitempty"` + // Timestamp when this version last stopped being current or ramping. + LastDeactivationTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=last_deactivation_time,json=lastDeactivationTime,proto3" json:"last_deactivation_time,omitempty"` + // Status of the Worker Deployment Version. + Status v1.WorkerDeploymentVersionStatus `protobuf:"varint,10,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkerDeploymentVersionStatus" json:"status,omitempty"` + // Request ID used to create this version. Used for idempotency. + // Not synced from the version workflow; only set by the deployment workflow. + CreateRequestId string `protobuf:"bytes,12,opt,name=create_request_id,json=createRequestId,proto3" json:"create_request_id,omitempty"` + // Compute config summary for this version. Synced from the version workflow on each compute config update. + // Also set by the deployment workflow at version creation time if a compute config was provided. + ComputeConfig *v12.ComputeConfigSummary `protobuf:"bytes,13,opt,name=compute_config,json=computeConfig,proto3" json:"compute_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentVersionSummary) Reset() { + *x = WorkerDeploymentVersionSummary{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentVersionSummary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentVersionSummary) ProtoMessage() {} + +func (x *WorkerDeploymentVersionSummary) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentVersionSummary.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentVersionSummary) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{9} +} + +func (x *WorkerDeploymentVersionSummary) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *WorkerDeploymentVersionSummary) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. +func (x *WorkerDeploymentVersionSummary) GetDrainageStatus() v1.VersionDrainageStatus { + if x != nil { + return x.DrainageStatus + } + return v1.VersionDrainageStatus(0) +} + +func (x *WorkerDeploymentVersionSummary) GetDrainageInfo() *v11.VersionDrainageInfo { + if x != nil { + return x.DrainageInfo + } + return nil +} + +func (x *WorkerDeploymentVersionSummary) GetRoutingUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.RoutingUpdateTime + } + return nil +} + +func (x *WorkerDeploymentVersionSummary) GetCurrentSinceTime() *timestamppb.Timestamp { + if x != nil { + return x.CurrentSinceTime + } + return nil +} + +func (x *WorkerDeploymentVersionSummary) GetRampingSinceTime() *timestamppb.Timestamp { + if x != nil { + return x.RampingSinceTime + } + return nil +} + +func (x *WorkerDeploymentVersionSummary) GetFirstActivationTime() *timestamppb.Timestamp { + if x != nil { + return x.FirstActivationTime + } + return nil +} + +func (x *WorkerDeploymentVersionSummary) GetLastCurrentTime() *timestamppb.Timestamp { + if x != nil { + return x.LastCurrentTime + } + return nil +} + +func (x *WorkerDeploymentVersionSummary) GetLastDeactivationTime() *timestamppb.Timestamp { + if x != nil { + return x.LastDeactivationTime + } + return nil +} + +func (x *WorkerDeploymentVersionSummary) GetStatus() v1.WorkerDeploymentVersionStatus { + if x != nil { + return x.Status + } + return v1.WorkerDeploymentVersionStatus(0) +} + +func (x *WorkerDeploymentVersionSummary) GetCreateRequestId() string { + if x != nil { + return x.CreateRequestId + } + return "" +} + +func (x *WorkerDeploymentVersionSummary) GetComputeConfig() *v12.ComputeConfigSummary { + if x != nil { + return x.ComputeConfig + } + return nil +} + +// used as Worker Deployment Version workflow update input: +type RegisterWorkerInVersionArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskQueueName string `protobuf:"bytes,1,opt,name=task_queue_name,json=taskQueueName,proto3" json:"task_queue_name,omitempty"` + TaskQueueType v1.TaskQueueType `protobuf:"varint,2,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + MaxTaskQueues int32 `protobuf:"varint,3,opt,name=max_task_queues,json=maxTaskQueues,proto3" json:"max_task_queues,omitempty"` + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + RoutingConfig *v11.RoutingConfig `protobuf:"bytes,5,opt,name=routing_config,json=routingConfig,proto3" json:"routing_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RegisterWorkerInVersionArgs) Reset() { + *x = RegisterWorkerInVersionArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RegisterWorkerInVersionArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterWorkerInVersionArgs) ProtoMessage() {} + +func (x *RegisterWorkerInVersionArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterWorkerInVersionArgs.ProtoReflect.Descriptor instead. +func (*RegisterWorkerInVersionArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{10} +} + +func (x *RegisterWorkerInVersionArgs) GetTaskQueueName() string { + if x != nil { + return x.TaskQueueName + } + return "" +} + +func (x *RegisterWorkerInVersionArgs) GetTaskQueueType() v1.TaskQueueType { + if x != nil { + return x.TaskQueueType + } + return v1.TaskQueueType(0) +} + +func (x *RegisterWorkerInVersionArgs) GetMaxTaskQueues() int32 { + if x != nil { + return x.MaxTaskQueues + } + return 0 +} + +func (x *RegisterWorkerInVersionArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *RegisterWorkerInVersionArgs) GetRoutingConfig() *v11.RoutingConfig { + if x != nil { + return x.RoutingConfig + } + return nil +} + +// used as Worker Deployment workflow update input: +type RegisterWorkerInWorkerDeploymentArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskQueueName string `protobuf:"bytes,1,opt,name=task_queue_name,json=taskQueueName,proto3" json:"task_queue_name,omitempty"` + TaskQueueType v1.TaskQueueType `protobuf:"varint,2,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + MaxTaskQueues int32 `protobuf:"varint,3,opt,name=max_task_queues,json=maxTaskQueues,proto3" json:"max_task_queues,omitempty"` + Version *WorkerDeploymentVersion `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RegisterWorkerInWorkerDeploymentArgs) Reset() { + *x = RegisterWorkerInWorkerDeploymentArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RegisterWorkerInWorkerDeploymentArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegisterWorkerInWorkerDeploymentArgs) ProtoMessage() {} + +func (x *RegisterWorkerInWorkerDeploymentArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegisterWorkerInWorkerDeploymentArgs.ProtoReflect.Descriptor instead. +func (*RegisterWorkerInWorkerDeploymentArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{11} +} + +func (x *RegisterWorkerInWorkerDeploymentArgs) GetTaskQueueName() string { + if x != nil { + return x.TaskQueueName + } + return "" +} + +func (x *RegisterWorkerInWorkerDeploymentArgs) GetTaskQueueType() v1.TaskQueueType { + if x != nil { + return x.TaskQueueType + } + return v1.TaskQueueType(0) +} + +func (x *RegisterWorkerInWorkerDeploymentArgs) GetMaxTaskQueues() int32 { + if x != nil { + return x.MaxTaskQueues + } + return 0 +} + +func (x *RegisterWorkerInWorkerDeploymentArgs) GetVersion() *WorkerDeploymentVersion { + if x != nil { + return x.Version + } + return nil +} + +// used as Worker Deployment workflow activity input: +type DescribeVersionFromWorkerDeploymentActivityArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeVersionFromWorkerDeploymentActivityArgs) Reset() { + *x = DescribeVersionFromWorkerDeploymentActivityArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeVersionFromWorkerDeploymentActivityArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeVersionFromWorkerDeploymentActivityArgs) ProtoMessage() {} + +func (x *DescribeVersionFromWorkerDeploymentActivityArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeVersionFromWorkerDeploymentActivityArgs.ProtoReflect.Descriptor instead. +func (*DescribeVersionFromWorkerDeploymentActivityArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{12} +} + +func (x *DescribeVersionFromWorkerDeploymentActivityArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +type DescribeVersionFromWorkerDeploymentActivityResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + // All the Task Queues that have ever polled from this Deployment version. + TaskQueueInfos []*v11.WorkerDeploymentVersionInfo_VersionTaskQueueInfo `protobuf:"bytes,1,rep,name=task_queue_infos,json=taskQueueInfos,proto3" json:"task_queue_infos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeVersionFromWorkerDeploymentActivityResult) Reset() { + *x = DescribeVersionFromWorkerDeploymentActivityResult{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeVersionFromWorkerDeploymentActivityResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeVersionFromWorkerDeploymentActivityResult) ProtoMessage() {} + +func (x *DescribeVersionFromWorkerDeploymentActivityResult) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeVersionFromWorkerDeploymentActivityResult.ProtoReflect.Descriptor instead. +func (*DescribeVersionFromWorkerDeploymentActivityResult) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{13} +} + +func (x *DescribeVersionFromWorkerDeploymentActivityResult) GetTaskQueueInfos() []*v11.WorkerDeploymentVersionInfo_VersionTaskQueueInfo { + if x != nil { + return x.TaskQueueInfos + } + return nil +} + +// used as Worker Deployment workflow update input (sent from Worker Deployment workflow): +type SyncVersionStateUpdateArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Last time `current_since_time`, `ramping_since_time, or `ramp_percentage` of this version changed. + // + // Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. + RoutingUpdateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=routing_update_time,json=routingUpdateTime,proto3" json:"routing_update_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) + // + // Nil if not current. + // + // Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. + CurrentSinceTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=current_since_time,json=currentSinceTime,proto3" json:"current_since_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: 'Since' captures the field semantics despite being a preposition. --) + // + // Nil if not ramping. Updated when the version first starts ramping, not on each ramp change. + // + // Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. + RampingSinceTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=ramping_since_time,json=rampingSinceTime,proto3" json:"ramping_since_time,omitempty"` + // Range: [0, 100]. Must be zero if the version is not ramping (i.e. `ramping_since_time` is nil). + // Can be in the range [0, 100] if the version is ramping. + // + // Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. + RampPercentage float32 `protobuf:"fixed32,4,opt,name=ramp_percentage,json=rampPercentage,proto3" json:"ramp_percentage,omitempty"` + // Full routing config for async propagation mode. When present, the version workflow + // will propagate the entire routing config asynchronously. When absent, sync mode is used. + RoutingConfig *v11.RoutingConfig `protobuf:"bytes,5,opt,name=routing_config,json=routingConfig,proto3" json:"routing_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncVersionStateUpdateArgs) Reset() { + *x = SyncVersionStateUpdateArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncVersionStateUpdateArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncVersionStateUpdateArgs) ProtoMessage() {} + +func (x *SyncVersionStateUpdateArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncVersionStateUpdateArgs.ProtoReflect.Descriptor instead. +func (*SyncVersionStateUpdateArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{14} +} + +// Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. +func (x *SyncVersionStateUpdateArgs) GetRoutingUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.RoutingUpdateTime + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. +func (x *SyncVersionStateUpdateArgs) GetCurrentSinceTime() *timestamppb.Timestamp { + if x != nil { + return x.CurrentSinceTime + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. +func (x *SyncVersionStateUpdateArgs) GetRampingSinceTime() *timestamppb.Timestamp { + if x != nil { + return x.RampingSinceTime + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. +func (x *SyncVersionStateUpdateArgs) GetRampPercentage() float32 { + if x != nil { + return x.RampPercentage + } + return 0 +} + +func (x *SyncVersionStateUpdateArgs) GetRoutingConfig() *v11.RoutingConfig { + if x != nil { + return x.RoutingConfig + } + return nil +} + +// used as Worker Deployment workflow update response (sent from Worker Deployment workflow): +type SyncVersionStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Deprecated. State could be so large, no need to send it to the deployment workflow. + // + // Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. + VersionState *VersionLocalState `protobuf:"bytes,1,opt,name=version_state,json=versionState,proto3" json:"version_state,omitempty"` + Summary *WorkerDeploymentVersionSummary `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncVersionStateResponse) Reset() { + *x = SyncVersionStateResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncVersionStateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncVersionStateResponse) ProtoMessage() {} + +func (x *SyncVersionStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncVersionStateResponse.ProtoReflect.Descriptor instead. +func (*SyncVersionStateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{15} +} + +// Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. +func (x *SyncVersionStateResponse) GetVersionState() *VersionLocalState { + if x != nil { + return x.VersionState + } + return nil +} + +func (x *SyncVersionStateResponse) GetSummary() *WorkerDeploymentVersionSummary { + if x != nil { + return x.Summary + } + return nil +} + +// Sent from Version workflow to Worker Deployment workflow +type AddVersionUpdateArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AddVersionUpdateArgs) Reset() { + *x = AddVersionUpdateArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AddVersionUpdateArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddVersionUpdateArgs) ProtoMessage() {} + +func (x *AddVersionUpdateArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddVersionUpdateArgs.ProtoReflect.Descriptor instead. +func (*AddVersionUpdateArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{16} +} + +func (x *AddVersionUpdateArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *AddVersionUpdateArgs) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +// Sent from Drainage child workflow to Version parent +type SyncDrainageInfoSignalArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + DrainageInfo *v11.VersionDrainageInfo `protobuf:"bytes,1,opt,name=drainage_info,json=drainageInfo,proto3" json:"drainage_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncDrainageInfoSignalArgs) Reset() { + *x = SyncDrainageInfoSignalArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDrainageInfoSignalArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDrainageInfoSignalArgs) ProtoMessage() {} + +func (x *SyncDrainageInfoSignalArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncDrainageInfoSignalArgs.ProtoReflect.Descriptor instead. +func (*SyncDrainageInfoSignalArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{17} +} + +func (x *SyncDrainageInfoSignalArgs) GetDrainageInfo() *v11.VersionDrainageInfo { + if x != nil { + return x.DrainageInfo + } + return nil +} + +// Sent from Version workflow to Worker Deployment workflow +type SyncDrainageStatusSignalArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + DrainageStatus v1.VersionDrainageStatus `protobuf:"varint,2,opt,name=drainage_status,json=drainageStatus,proto3,enum=temporal.api.enums.v1.VersionDrainageStatus" json:"drainage_status,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncDrainageStatusSignalArgs) Reset() { + *x = SyncDrainageStatusSignalArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDrainageStatusSignalArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDrainageStatusSignalArgs) ProtoMessage() {} + +func (x *SyncDrainageStatusSignalArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncDrainageStatusSignalArgs.ProtoReflect.Descriptor instead. +func (*SyncDrainageStatusSignalArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{18} +} + +func (x *SyncDrainageStatusSignalArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *SyncDrainageStatusSignalArgs) GetDrainageStatus() v1.VersionDrainageStatus { + if x != nil { + return x.DrainageStatus + } + return v1.VersionDrainageStatus(0) +} + +// Sent from Version workflow to Worker Deployment workflow when async propagation completes +type PropagationCompletionInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + RevisionNumber int64 `protobuf:"varint,1,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PropagationCompletionInfo) Reset() { + *x = PropagationCompletionInfo{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PropagationCompletionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PropagationCompletionInfo) ProtoMessage() {} + +func (x *PropagationCompletionInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PropagationCompletionInfo.ProtoReflect.Descriptor instead. +func (*PropagationCompletionInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{19} +} + +func (x *PropagationCompletionInfo) GetRevisionNumber() int64 { + if x != nil { + return x.RevisionNumber + } + return 0 +} + +func (x *PropagationCompletionInfo) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +// used as Worker Deployment Version workflow query response: +type QueryDescribeVersionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + VersionState *VersionLocalState `protobuf:"bytes,1,opt,name=version_state,json=versionState,proto3" json:"version_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *QueryDescribeVersionResponse) Reset() { + *x = QueryDescribeVersionResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *QueryDescribeVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryDescribeVersionResponse) ProtoMessage() {} + +func (x *QueryDescribeVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryDescribeVersionResponse.ProtoReflect.Descriptor instead. +func (*QueryDescribeVersionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{20} +} + +func (x *QueryDescribeVersionResponse) GetVersionState() *VersionLocalState { + if x != nil { + return x.VersionState + } + return nil +} + +// used as Worker Deployment Version workflow query response: +type QueryDescribeWorkerDeploymentResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + State *WorkerDeploymentLocalState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *QueryDescribeWorkerDeploymentResponse) Reset() { + *x = QueryDescribeWorkerDeploymentResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *QueryDescribeWorkerDeploymentResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryDescribeWorkerDeploymentResponse) ProtoMessage() {} + +func (x *QueryDescribeWorkerDeploymentResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryDescribeWorkerDeploymentResponse.ProtoReflect.Descriptor instead. +func (*QueryDescribeWorkerDeploymentResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{21} +} + +func (x *QueryDescribeWorkerDeploymentResponse) GetState() *WorkerDeploymentLocalState { + if x != nil { + return x.State + } + return nil +} + +// used as Worker Deployment workflow query response: +type CreateRequestIDQueryResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + ConflictToken []byte `protobuf:"bytes,2,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateRequestIDQueryResponse) Reset() { + *x = CreateRequestIDQueryResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateRequestIDQueryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateRequestIDQueryResponse) ProtoMessage() {} + +func (x *CreateRequestIDQueryResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateRequestIDQueryResponse.ProtoReflect.Descriptor instead. +func (*CreateRequestIDQueryResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{22} +} + +func (x *CreateRequestIDQueryResponse) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *CreateRequestIDQueryResponse) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +// used as Worker Deployment Version workflow activity input: +type StartWorkerDeploymentRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentName string `protobuf:"bytes,1,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + RequestId string `protobuf:"bytes,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartWorkerDeploymentRequest) Reset() { + *x = StartWorkerDeploymentRequest{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartWorkerDeploymentRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartWorkerDeploymentRequest) ProtoMessage() {} + +func (x *StartWorkerDeploymentRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartWorkerDeploymentRequest.ProtoReflect.Descriptor instead. +func (*StartWorkerDeploymentRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{23} +} + +func (x *StartWorkerDeploymentRequest) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *StartWorkerDeploymentRequest) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +// used as Worker Deployment workflow activity input: +type StartWorkerDeploymentVersionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentName string `protobuf:"bytes,1,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Identity string `protobuf:"bytes,4,opt,name=identity,proto3" json:"identity,omitempty"` + ComputeConfig *v12.ComputeConfigSummary `protobuf:"bytes,5,opt,name=compute_config,json=computeConfig,proto3" json:"compute_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartWorkerDeploymentVersionRequest) Reset() { + *x = StartWorkerDeploymentVersionRequest{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartWorkerDeploymentVersionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartWorkerDeploymentVersionRequest) ProtoMessage() {} + +func (x *StartWorkerDeploymentVersionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartWorkerDeploymentVersionRequest.ProtoReflect.Descriptor instead. +func (*StartWorkerDeploymentVersionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{24} +} + +func (x *StartWorkerDeploymentVersionRequest) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *StartWorkerDeploymentVersionRequest) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +func (x *StartWorkerDeploymentVersionRequest) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *StartWorkerDeploymentVersionRequest) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *StartWorkerDeploymentVersionRequest) GetComputeConfig() *v12.ComputeConfigSummary { + if x != nil { + return x.ComputeConfig + } + return nil +} + +// used as Worker Deployment Version workflow activity input: +type SyncDeploymentVersionUserDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentName string `protobuf:"bytes,4,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + Version *WorkerDeploymentVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Sync []*SyncDeploymentVersionUserDataRequest_SyncUserData `protobuf:"bytes,2,rep,name=sync,proto3" json:"sync,omitempty"` + // if true, the version will be forgotten from the task queue user data. + ForgetVersion bool `protobuf:"varint,3,opt,name=forget_version,json=forgetVersion,proto3" json:"forget_version,omitempty"` + // Async mode: full routing config to propagate (includes revision_number) + UpdateRoutingConfig *v11.RoutingConfig `protobuf:"bytes,5,opt,name=update_routing_config,json=updateRoutingConfig,proto3" json:"update_routing_config,omitempty"` + // Async mode: version-specific data to upsert + UpsertVersionData *WorkerDeploymentVersionData `protobuf:"bytes,6,opt,name=upsert_version_data,json=upsertVersionData,proto3" json:"upsert_version_data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncDeploymentVersionUserDataRequest) Reset() { + *x = SyncDeploymentVersionUserDataRequest{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDeploymentVersionUserDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDeploymentVersionUserDataRequest) ProtoMessage() {} + +func (x *SyncDeploymentVersionUserDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncDeploymentVersionUserDataRequest.ProtoReflect.Descriptor instead. +func (*SyncDeploymentVersionUserDataRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{25} +} + +func (x *SyncDeploymentVersionUserDataRequest) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *SyncDeploymentVersionUserDataRequest) GetVersion() *WorkerDeploymentVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *SyncDeploymentVersionUserDataRequest) GetSync() []*SyncDeploymentVersionUserDataRequest_SyncUserData { + if x != nil { + return x.Sync + } + return nil +} + +func (x *SyncDeploymentVersionUserDataRequest) GetForgetVersion() bool { + if x != nil { + return x.ForgetVersion + } + return false +} + +func (x *SyncDeploymentVersionUserDataRequest) GetUpdateRoutingConfig() *v11.RoutingConfig { + if x != nil { + return x.UpdateRoutingConfig + } + return nil +} + +func (x *SyncDeploymentVersionUserDataRequest) GetUpsertVersionData() *WorkerDeploymentVersionData { + if x != nil { + return x.UpsertVersionData + } + return nil +} + +// used as Worker Deployment Version workflow activity output: +type SyncDeploymentVersionUserDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskQueueMaxVersions map[string]int64 `protobuf:"bytes,1,rep,name=task_queue_max_versions,json=taskQueueMaxVersions,proto3" json:"task_queue_max_versions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncDeploymentVersionUserDataResponse) Reset() { + *x = SyncDeploymentVersionUserDataResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDeploymentVersionUserDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDeploymentVersionUserDataResponse) ProtoMessage() {} + +func (x *SyncDeploymentVersionUserDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncDeploymentVersionUserDataResponse.ProtoReflect.Descriptor instead. +func (*SyncDeploymentVersionUserDataResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{26} +} + +func (x *SyncDeploymentVersionUserDataResponse) GetTaskQueueMaxVersions() map[string]int64 { + if x != nil { + return x.TaskQueueMaxVersions + } + return nil +} + +// used as Worker Deployment Version workflow activity input: +type CheckWorkerDeploymentUserDataPropagationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskQueueMaxVersions map[string]int64 `protobuf:"bytes,1,rep,name=task_queue_max_versions,json=taskQueueMaxVersions,proto3" json:"task_queue_max_versions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CheckWorkerDeploymentUserDataPropagationRequest) Reset() { + *x = CheckWorkerDeploymentUserDataPropagationRequest{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CheckWorkerDeploymentUserDataPropagationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckWorkerDeploymentUserDataPropagationRequest) ProtoMessage() {} + +func (x *CheckWorkerDeploymentUserDataPropagationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckWorkerDeploymentUserDataPropagationRequest.ProtoReflect.Descriptor instead. +func (*CheckWorkerDeploymentUserDataPropagationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{27} +} + +func (x *CheckWorkerDeploymentUserDataPropagationRequest) GetTaskQueueMaxVersions() map[string]int64 { + if x != nil { + return x.TaskQueueMaxVersions + } + return nil +} + +// used as Worker Deployment workflow activity input: +type SyncUnversionedRampActivityArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + CurrentVersion string `protobuf:"bytes,1,opt,name=current_version,json=currentVersion,proto3" json:"current_version,omitempty"` + UpdateArgs *SyncVersionStateUpdateArgs `protobuf:"bytes,2,opt,name=update_args,json=updateArgs,proto3" json:"update_args,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncUnversionedRampActivityArgs) Reset() { + *x = SyncUnversionedRampActivityArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncUnversionedRampActivityArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncUnversionedRampActivityArgs) ProtoMessage() {} + +func (x *SyncUnversionedRampActivityArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncUnversionedRampActivityArgs.ProtoReflect.Descriptor instead. +func (*SyncUnversionedRampActivityArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{28} +} + +func (x *SyncUnversionedRampActivityArgs) GetCurrentVersion() string { + if x != nil { + return x.CurrentVersion + } + return "" +} + +func (x *SyncUnversionedRampActivityArgs) GetUpdateArgs() *SyncVersionStateUpdateArgs { + if x != nil { + return x.UpdateArgs + } + return nil +} + +// used as Worker Deployment workflow activity output: +type SyncUnversionedRampActivityResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskQueueMaxVersions map[string]int64 `protobuf:"bytes,1,rep,name=task_queue_max_versions,json=taskQueueMaxVersions,proto3" json:"task_queue_max_versions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncUnversionedRampActivityResponse) Reset() { + *x = SyncUnversionedRampActivityResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncUnversionedRampActivityResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncUnversionedRampActivityResponse) ProtoMessage() {} + +func (x *SyncUnversionedRampActivityResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncUnversionedRampActivityResponse.ProtoReflect.Descriptor instead. +func (*SyncUnversionedRampActivityResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{29} +} + +func (x *SyncUnversionedRampActivityResponse) GetTaskQueueMaxVersions() map[string]int64 { + if x != nil { + return x.TaskQueueMaxVersions + } + return nil +} + +// used as Worker Deployment Version workflow update input: +type UpdateVersionMetadataArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + UpsertEntries map[string]*v13.Payload `protobuf:"bytes,1,rep,name=upsert_entries,json=upsertEntries,proto3" json:"upsert_entries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + RemoveEntries []string `protobuf:"bytes,2,rep,name=remove_entries,json=removeEntries,proto3" json:"remove_entries,omitempty"` + Identity string `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateVersionMetadataArgs) Reset() { + *x = UpdateVersionMetadataArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateVersionMetadataArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateVersionMetadataArgs) ProtoMessage() {} + +func (x *UpdateVersionMetadataArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateVersionMetadataArgs.ProtoReflect.Descriptor instead. +func (*UpdateVersionMetadataArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{30} +} + +func (x *UpdateVersionMetadataArgs) GetUpsertEntries() map[string]*v13.Payload { + if x != nil { + return x.UpsertEntries + } + return nil +} + +func (x *UpdateVersionMetadataArgs) GetRemoveEntries() []string { + if x != nil { + return x.RemoveEntries + } + return nil +} + +func (x *UpdateVersionMetadataArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +// used as Worker Deployment Version workflow update response: +type UpdateVersionMetadataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Metadata *v11.VersionMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateVersionMetadataResponse) Reset() { + *x = UpdateVersionMetadataResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateVersionMetadataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateVersionMetadataResponse) ProtoMessage() {} + +func (x *UpdateVersionMetadataResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateVersionMetadataResponse.ProtoReflect.Descriptor instead. +func (*UpdateVersionMetadataResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{31} +} + +func (x *UpdateVersionMetadataResponse) GetMetadata() *v11.VersionMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +// used as Worker Deployment workflow update input: +type SetCurrentVersionArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + IgnoreMissingTaskQueues bool `protobuf:"varint,3,opt,name=ignore_missing_task_queues,json=ignoreMissingTaskQueues,proto3" json:"ignore_missing_task_queues,omitempty"` + ConflictToken []byte `protobuf:"bytes,4,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + AllowNoPollers bool `protobuf:"varint,5,opt,name=allow_no_pollers,json=allowNoPollers,proto3" json:"allow_no_pollers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetCurrentVersionArgs) Reset() { + *x = SetCurrentVersionArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetCurrentVersionArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetCurrentVersionArgs) ProtoMessage() {} + +func (x *SetCurrentVersionArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetCurrentVersionArgs.ProtoReflect.Descriptor instead. +func (*SetCurrentVersionArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{32} +} + +func (x *SetCurrentVersionArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *SetCurrentVersionArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *SetCurrentVersionArgs) GetIgnoreMissingTaskQueues() bool { + if x != nil { + return x.IgnoreMissingTaskQueues + } + return false +} + +func (x *SetCurrentVersionArgs) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +func (x *SetCurrentVersionArgs) GetAllowNoPollers() bool { + if x != nil { + return x.AllowNoPollers + } + return false +} + +// used as Worker Deployment update response: +type SetCurrentVersionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + PreviousVersion string `protobuf:"bytes,1,opt,name=previous_version,json=previousVersion,proto3" json:"previous_version,omitempty"` + ConflictToken []byte `protobuf:"bytes,2,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetCurrentVersionResponse) Reset() { + *x = SetCurrentVersionResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetCurrentVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetCurrentVersionResponse) ProtoMessage() {} + +func (x *SetCurrentVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetCurrentVersionResponse.ProtoReflect.Descriptor instead. +func (*SetCurrentVersionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{33} +} + +func (x *SetCurrentVersionResponse) GetPreviousVersion() string { + if x != nil { + return x.PreviousVersion + } + return "" +} + +func (x *SetCurrentVersionResponse) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +// used as Worker Deployment workflow update input: +type CreateWorkerDeploymentArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Retrying with same request id is a successful no-op. + // Retrying with different request id is an error. + // One deployment is deleted, same or different request id will re-create it. + RequestId string `protobuf:"bytes,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateWorkerDeploymentArgs) Reset() { + *x = CreateWorkerDeploymentArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateWorkerDeploymentArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWorkerDeploymentArgs) ProtoMessage() {} + +func (x *CreateWorkerDeploymentArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWorkerDeploymentArgs.ProtoReflect.Descriptor instead. +func (*CreateWorkerDeploymentArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{34} +} + +func (x *CreateWorkerDeploymentArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *CreateWorkerDeploymentArgs) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +// used as Worker Deployment update response: +type CreateWorkerDeploymentResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ConflictToken []byte `protobuf:"bytes,1,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateWorkerDeploymentResponse) Reset() { + *x = CreateWorkerDeploymentResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateWorkerDeploymentResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWorkerDeploymentResponse) ProtoMessage() {} + +func (x *CreateWorkerDeploymentResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWorkerDeploymentResponse.ProtoReflect.Descriptor instead. +func (*CreateWorkerDeploymentResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{35} +} + +func (x *CreateWorkerDeploymentResponse) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +// used as Worker Deployment workflow update input: +type CreateWorkerDeploymentVersionArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Retrying with same request id is a successful no-op. + // Retrying with different request id (including auto-created) is an error. + RequestId string `protobuf:"bytes,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Version string (.) + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + ComputeConfig *v12.ComputeConfig `protobuf:"bytes,4,opt,name=compute_config,json=computeConfig,proto3" json:"compute_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateWorkerDeploymentVersionArgs) Reset() { + *x = CreateWorkerDeploymentVersionArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateWorkerDeploymentVersionArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWorkerDeploymentVersionArgs) ProtoMessage() {} + +func (x *CreateWorkerDeploymentVersionArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[36] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWorkerDeploymentVersionArgs.ProtoReflect.Descriptor instead. +func (*CreateWorkerDeploymentVersionArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{36} +} + +func (x *CreateWorkerDeploymentVersionArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *CreateWorkerDeploymentVersionArgs) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *CreateWorkerDeploymentVersionArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *CreateWorkerDeploymentVersionArgs) GetComputeConfig() *v12.ComputeConfig { + if x != nil { + return x.ComputeConfig + } + return nil +} + +// used as Worker Deployment update response: +type CreateWorkerDeploymentVersionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateWorkerDeploymentVersionResponse) Reset() { + *x = CreateWorkerDeploymentVersionResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateWorkerDeploymentVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateWorkerDeploymentVersionResponse) ProtoMessage() {} + +func (x *CreateWorkerDeploymentVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[37] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateWorkerDeploymentVersionResponse.ProtoReflect.Descriptor instead. +func (*CreateWorkerDeploymentVersionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{37} +} + +// used as Worker Deployment workflow update input: +type DeleteVersionArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + SkipDrainage bool `protobuf:"varint,3,opt,name=skip_drainage,json=skipDrainage,proto3" json:"skip_drainage,omitempty"` + // If true, it would mean that the delete operation is initiated by the server internally. This is done on the + // event that the addition of a version exceeds the max number of versions allowed in a worker-deployment (defaultMaxVersions). + // False elsewhere. + ServerDelete bool `protobuf:"varint,4,opt,name=server_delete,json=serverDelete,proto3" json:"server_delete,omitempty"` + // version workflow does not block the update for tq propagation + AsyncPropagation bool `protobuf:"varint,5,opt,name=async_propagation,json=asyncPropagation,proto3" json:"async_propagation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteVersionArgs) Reset() { + *x = DeleteVersionArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteVersionArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteVersionArgs) ProtoMessage() {} + +func (x *DeleteVersionArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[38] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteVersionArgs.ProtoReflect.Descriptor instead. +func (*DeleteVersionArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{38} +} + +func (x *DeleteVersionArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *DeleteVersionArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *DeleteVersionArgs) GetSkipDrainage() bool { + if x != nil { + return x.SkipDrainage + } + return false +} + +func (x *DeleteVersionArgs) GetServerDelete() bool { + if x != nil { + return x.ServerDelete + } + return false +} + +func (x *DeleteVersionArgs) GetAsyncPropagation() bool { + if x != nil { + return x.AsyncPropagation + } + return false +} + +// used as Worker Deployment Activity input: +type DeleteVersionActivityArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + DeploymentName string `protobuf:"bytes,2,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` + RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + SkipDrainage bool `protobuf:"varint,5,opt,name=skip_drainage,json=skipDrainage,proto3" json:"skip_drainage,omitempty"` + AsyncPropagation bool `protobuf:"varint,6,opt,name=async_propagation,json=asyncPropagation,proto3" json:"async_propagation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteVersionActivityArgs) Reset() { + *x = DeleteVersionActivityArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteVersionActivityArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteVersionActivityArgs) ProtoMessage() {} + +func (x *DeleteVersionActivityArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[39] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteVersionActivityArgs.ProtoReflect.Descriptor instead. +func (*DeleteVersionActivityArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{39} +} + +func (x *DeleteVersionActivityArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *DeleteVersionActivityArgs) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *DeleteVersionActivityArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *DeleteVersionActivityArgs) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *DeleteVersionActivityArgs) GetSkipDrainage() bool { + if x != nil { + return x.SkipDrainage + } + return false +} + +func (x *DeleteVersionActivityArgs) GetAsyncPropagation() bool { + if x != nil { + return x.AsyncPropagation + } + return false +} + +// used as Worker Deployment Activity input: +type CheckTaskQueuesHavePollersActivityArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Key: Task Queue Name + TaskQueuesAndTypes map[string]*CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes `protobuf:"bytes,1,rep,name=task_queues_and_types,json=taskQueuesAndTypes,proto3" json:"task_queues_and_types,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + WorkerDeploymentVersion *WorkerDeploymentVersion `protobuf:"bytes,2,opt,name=worker_deployment_version,json=workerDeploymentVersion,proto3" json:"worker_deployment_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CheckTaskQueuesHavePollersActivityArgs) Reset() { + *x = CheckTaskQueuesHavePollersActivityArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CheckTaskQueuesHavePollersActivityArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckTaskQueuesHavePollersActivityArgs) ProtoMessage() {} + +func (x *CheckTaskQueuesHavePollersActivityArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[40] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckTaskQueuesHavePollersActivityArgs.ProtoReflect.Descriptor instead. +func (*CheckTaskQueuesHavePollersActivityArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{40} +} + +func (x *CheckTaskQueuesHavePollersActivityArgs) GetTaskQueuesAndTypes() map[string]*CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes { + if x != nil { + return x.TaskQueuesAndTypes + } + return nil +} + +func (x *CheckTaskQueuesHavePollersActivityArgs) GetWorkerDeploymentVersion() *WorkerDeploymentVersion { + if x != nil { + return x.WorkerDeploymentVersion + } + return nil +} + +// used as Worker Deployment workflow update input: +type DeleteDeploymentArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteDeploymentArgs) Reset() { + *x = DeleteDeploymentArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteDeploymentArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteDeploymentArgs) ProtoMessage() {} + +func (x *DeleteDeploymentArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[41] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteDeploymentArgs.ProtoReflect.Descriptor instead. +func (*DeleteDeploymentArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{41} +} + +func (x *DeleteDeploymentArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +// used as Worker Deployment update response: +type SetRampingVersionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + PreviousVersion string `protobuf:"bytes,1,opt,name=previous_version,json=previousVersion,proto3" json:"previous_version,omitempty"` + PreviousPercentage float32 `protobuf:"fixed32,2,opt,name=previous_percentage,json=previousPercentage,proto3" json:"previous_percentage,omitempty"` + ConflictToken []byte `protobuf:"bytes,3,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetRampingVersionResponse) Reset() { + *x = SetRampingVersionResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetRampingVersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetRampingVersionResponse) ProtoMessage() {} + +func (x *SetRampingVersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[42] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetRampingVersionResponse.ProtoReflect.Descriptor instead. +func (*SetRampingVersionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{42} +} + +func (x *SetRampingVersionResponse) GetPreviousVersion() string { + if x != nil { + return x.PreviousVersion + } + return "" +} + +func (x *SetRampingVersionResponse) GetPreviousPercentage() float32 { + if x != nil { + return x.PreviousPercentage + } + return 0 +} + +func (x *SetRampingVersionResponse) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +// used as Worker Deployment workflow update input: +type SetRampingVersionArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Percentage float32 `protobuf:"fixed32,3,opt,name=percentage,proto3" json:"percentage,omitempty"` + IgnoreMissingTaskQueues bool `protobuf:"varint,4,opt,name=ignore_missing_task_queues,json=ignoreMissingTaskQueues,proto3" json:"ignore_missing_task_queues,omitempty"` + ConflictToken []byte `protobuf:"bytes,5,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + AllowNoPollers bool `protobuf:"varint,6,opt,name=allow_no_pollers,json=allowNoPollers,proto3" json:"allow_no_pollers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetRampingVersionArgs) Reset() { + *x = SetRampingVersionArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetRampingVersionArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetRampingVersionArgs) ProtoMessage() {} + +func (x *SetRampingVersionArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[43] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetRampingVersionArgs.ProtoReflect.Descriptor instead. +func (*SetRampingVersionArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{43} +} + +func (x *SetRampingVersionArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *SetRampingVersionArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *SetRampingVersionArgs) GetPercentage() float32 { + if x != nil { + return x.Percentage + } + return 0 +} + +func (x *SetRampingVersionArgs) GetIgnoreMissingTaskQueues() bool { + if x != nil { + return x.IgnoreMissingTaskQueues + } + return false +} + +func (x *SetRampingVersionArgs) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +func (x *SetRampingVersionArgs) GetAllowNoPollers() bool { + if x != nil { + return x.AllowNoPollers + } + return false +} + +// used as Worker Deployment workflow update input: +type SetManagerIdentityArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + // identity is the client's identity, as usual + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // manager_identity is the new manager_identity. + ManagerIdentity string `protobuf:"bytes,2,opt,name=manager_identity,json=managerIdentity,proto3" json:"manager_identity,omitempty"` + ConflictToken []byte `protobuf:"bytes,5,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetManagerIdentityArgs) Reset() { + *x = SetManagerIdentityArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetManagerIdentityArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetManagerIdentityArgs) ProtoMessage() {} + +func (x *SetManagerIdentityArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[44] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetManagerIdentityArgs.ProtoReflect.Descriptor instead. +func (*SetManagerIdentityArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{44} +} + +func (x *SetManagerIdentityArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *SetManagerIdentityArgs) GetManagerIdentity() string { + if x != nil { + return x.ManagerIdentity + } + return "" +} + +func (x *SetManagerIdentityArgs) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +// used as Worker Deployment update response: +type SetManagerIdentityResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + PreviousManagerIdentity string `protobuf:"bytes,1,opt,name=previous_manager_identity,json=previousManagerIdentity,proto3" json:"previous_manager_identity,omitempty"` + ConflictToken []byte `protobuf:"bytes,2,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetManagerIdentityResponse) Reset() { + *x = SetManagerIdentityResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetManagerIdentityResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetManagerIdentityResponse) ProtoMessage() {} + +func (x *SetManagerIdentityResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[45] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetManagerIdentityResponse.ProtoReflect.Descriptor instead. +func (*SetManagerIdentityResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{45} +} + +func (x *SetManagerIdentityResponse) GetPreviousManagerIdentity() string { + if x != nil { + return x.PreviousManagerIdentity + } + return "" +} + +func (x *SetManagerIdentityResponse) GetConflictToken() []byte { + if x != nil { + return x.ConflictToken + } + return nil +} + +// used as Worker Deployment activity input: +type SyncVersionStateActivityArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentName string `protobuf:"bytes,1,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + // . or possibly just in the future + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + UpdateArgs *SyncVersionStateUpdateArgs `protobuf:"bytes,3,opt,name=update_args,json=updateArgs,proto3" json:"update_args,omitempty"` + RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncVersionStateActivityArgs) Reset() { + *x = SyncVersionStateActivityArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncVersionStateActivityArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncVersionStateActivityArgs) ProtoMessage() {} + +func (x *SyncVersionStateActivityArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[46] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncVersionStateActivityArgs.ProtoReflect.Descriptor instead. +func (*SyncVersionStateActivityArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{46} +} + +func (x *SyncVersionStateActivityArgs) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *SyncVersionStateActivityArgs) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *SyncVersionStateActivityArgs) GetUpdateArgs() *SyncVersionStateUpdateArgs { + if x != nil { + return x.UpdateArgs + } + return nil +} + +func (x *SyncVersionStateActivityArgs) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +// used as Worker Deployment activity result: +type SyncVersionStateActivityResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. + VersionState *VersionLocalState `protobuf:"bytes,1,opt,name=version_state,json=versionState,proto3" json:"version_state,omitempty"` + Summary *WorkerDeploymentVersionSummary `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncVersionStateActivityResult) Reset() { + *x = SyncVersionStateActivityResult{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncVersionStateActivityResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncVersionStateActivityResult) ProtoMessage() {} + +func (x *SyncVersionStateActivityResult) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[47] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncVersionStateActivityResult.ProtoReflect.Descriptor instead. +func (*SyncVersionStateActivityResult) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{47} +} + +// Deprecated: Marked as deprecated in temporal/server/api/deployment/v1/message.proto. +func (x *SyncVersionStateActivityResult) GetVersionState() *VersionLocalState { + if x != nil { + return x.VersionState + } + return nil +} + +func (x *SyncVersionStateActivityResult) GetSummary() *WorkerDeploymentVersionSummary { + if x != nil { + return x.Summary + } + return nil +} + +// used as Worker Deployment activity input: +type IsVersionMissingTaskQueuesArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + PrevCurrentVersion string `protobuf:"bytes,1,opt,name=prev_current_version,json=prevCurrentVersion,proto3" json:"prev_current_version,omitempty"` + NewCurrentVersion string `protobuf:"bytes,2,opt,name=new_current_version,json=newCurrentVersion,proto3" json:"new_current_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IsVersionMissingTaskQueuesArgs) Reset() { + *x = IsVersionMissingTaskQueuesArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IsVersionMissingTaskQueuesArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IsVersionMissingTaskQueuesArgs) ProtoMessage() {} + +func (x *IsVersionMissingTaskQueuesArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[48] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IsVersionMissingTaskQueuesArgs.ProtoReflect.Descriptor instead. +func (*IsVersionMissingTaskQueuesArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{48} +} + +func (x *IsVersionMissingTaskQueuesArgs) GetPrevCurrentVersion() string { + if x != nil { + return x.PrevCurrentVersion + } + return "" +} + +func (x *IsVersionMissingTaskQueuesArgs) GetNewCurrentVersion() string { + if x != nil { + return x.NewCurrentVersion + } + return "" +} + +// used as Worker Deployment activity output: +type IsVersionMissingTaskQueuesResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + IsMissingTaskQueues bool `protobuf:"varint,1,opt,name=is_missing_task_queues,json=isMissingTaskQueues,proto3" json:"is_missing_task_queues,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IsVersionMissingTaskQueuesResult) Reset() { + *x = IsVersionMissingTaskQueuesResult{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IsVersionMissingTaskQueuesResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IsVersionMissingTaskQueuesResult) ProtoMessage() {} + +func (x *IsVersionMissingTaskQueuesResult) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[49] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IsVersionMissingTaskQueuesResult.ProtoReflect.Descriptor instead. +func (*IsVersionMissingTaskQueuesResult) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{49} +} + +func (x *IsVersionMissingTaskQueuesResult) GetIsMissingTaskQueues() bool { + if x != nil { + return x.IsMissingTaskQueues + } + return false +} + +// used as Worker Deployment workflow memo: +type WorkerDeploymentWorkflowMemo struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentName string `protobuf:"bytes,1,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + RoutingConfig *v11.RoutingConfig `protobuf:"bytes,3,opt,name=routing_config,json=routingConfig,proto3" json:"routing_config,omitempty"` + LatestVersionSummary *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary `protobuf:"bytes,4,opt,name=latest_version_summary,json=latestVersionSummary,proto3" json:"latest_version_summary,omitempty"` + CurrentVersionSummary *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary `protobuf:"bytes,5,opt,name=current_version_summary,json=currentVersionSummary,proto3" json:"current_version_summary,omitempty"` + RampingVersionSummary *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary `protobuf:"bytes,6,opt,name=ramping_version_summary,json=rampingVersionSummary,proto3" json:"ramping_version_summary,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentWorkflowMemo) Reset() { + *x = WorkerDeploymentWorkflowMemo{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentWorkflowMemo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentWorkflowMemo) ProtoMessage() {} + +func (x *WorkerDeploymentWorkflowMemo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[50] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentWorkflowMemo.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentWorkflowMemo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{50} +} + +func (x *WorkerDeploymentWorkflowMemo) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *WorkerDeploymentWorkflowMemo) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *WorkerDeploymentWorkflowMemo) GetRoutingConfig() *v11.RoutingConfig { + if x != nil { + return x.RoutingConfig + } + return nil +} + +func (x *WorkerDeploymentWorkflowMemo) GetLatestVersionSummary() *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary { + if x != nil { + return x.LatestVersionSummary + } + return nil +} + +func (x *WorkerDeploymentWorkflowMemo) GetCurrentVersionSummary() *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary { + if x != nil { + return x.CurrentVersionSummary + } + return nil +} + +func (x *WorkerDeploymentWorkflowMemo) GetRampingVersionSummary() *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary { + if x != nil { + return x.RampingVersionSummary + } + return nil +} + +// Subset of fields of WorkerDeploymentInfo returned in ListWorkerDeploymentsResponse +type WorkerDeploymentSummary struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + RoutingConfig *v11.RoutingConfig `protobuf:"bytes,3,opt,name=routing_config,json=routingConfig,proto3" json:"routing_config,omitempty"` + LatestVersionSummary *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary `protobuf:"bytes,4,opt,name=latest_version_summary,json=latestVersionSummary,proto3" json:"latest_version_summary,omitempty"` + CurrentVersionSummary *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary `protobuf:"bytes,5,opt,name=current_version_summary,json=currentVersionSummary,proto3" json:"current_version_summary,omitempty"` + RampingVersionSummary *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary `protobuf:"bytes,6,opt,name=ramping_version_summary,json=rampingVersionSummary,proto3" json:"ramping_version_summary,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentSummary) Reset() { + *x = WorkerDeploymentSummary{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentSummary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentSummary) ProtoMessage() {} + +func (x *WorkerDeploymentSummary) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[51] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentSummary.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentSummary) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{51} +} + +func (x *WorkerDeploymentSummary) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *WorkerDeploymentSummary) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *WorkerDeploymentSummary) GetRoutingConfig() *v11.RoutingConfig { + if x != nil { + return x.RoutingConfig + } + return nil +} + +func (x *WorkerDeploymentSummary) GetLatestVersionSummary() *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary { + if x != nil { + return x.LatestVersionSummary + } + return nil +} + +func (x *WorkerDeploymentSummary) GetCurrentVersionSummary() *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary { + if x != nil { + return x.CurrentVersionSummary + } + return nil +} + +func (x *WorkerDeploymentSummary) GetRampingVersionSummary() *v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary { + if x != nil { + return x.RampingVersionSummary + } + return nil +} + +// Input for the activity that validates compute config scaling groups via +// the Worker Controller Instance client. +type ValidateWorkerControllerInstanceSpecInput struct { + state protoimpl.MessageState `protogen:"open.v1"` + ScalingGroups map[string]*v12.ComputeConfigScalingGroup `protobuf:"bytes,1,rep,name=scaling_groups,json=scalingGroups,proto3" json:"scaling_groups,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateWorkerControllerInstanceSpecInput) Reset() { + *x = ValidateWorkerControllerInstanceSpecInput{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateWorkerControllerInstanceSpecInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateWorkerControllerInstanceSpecInput) ProtoMessage() {} + +func (x *ValidateWorkerControllerInstanceSpecInput) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[52] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateWorkerControllerInstanceSpecInput.ProtoReflect.Descriptor instead. +func (*ValidateWorkerControllerInstanceSpecInput) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{52} +} + +func (x *ValidateWorkerControllerInstanceSpecInput) GetScalingGroups() map[string]*v12.ComputeConfigScalingGroup { + if x != nil { + return x.ScalingGroups + } + return nil +} + +// used as activity input for creating or updating a Worker Controller Instance +// via the WCI client. +type UpdateWorkerControllerInstanceInput struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version *v11.WorkerDeploymentVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Identity string `protobuf:"bytes,2,opt,name=identity,proto3" json:"identity,omitempty"` + UpsertScalingGroups map[string]*v12.ComputeConfigScalingGroupUpdate `protobuf:"bytes,3,rep,name=upsert_scaling_groups,json=upsertScalingGroups,proto3" json:"upsert_scaling_groups,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + RemoveScalingGroups []string `protobuf:"bytes,4,rep,name=remove_scaling_groups,json=removeScalingGroups,proto3" json:"remove_scaling_groups,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateWorkerControllerInstanceInput) Reset() { + *x = UpdateWorkerControllerInstanceInput{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateWorkerControllerInstanceInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateWorkerControllerInstanceInput) ProtoMessage() {} + +func (x *UpdateWorkerControllerInstanceInput) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[53] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateWorkerControllerInstanceInput.ProtoReflect.Descriptor instead. +func (*UpdateWorkerControllerInstanceInput) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{53} +} + +func (x *UpdateWorkerControllerInstanceInput) GetVersion() *v11.WorkerDeploymentVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *UpdateWorkerControllerInstanceInput) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *UpdateWorkerControllerInstanceInput) GetUpsertScalingGroups() map[string]*v12.ComputeConfigScalingGroupUpdate { + if x != nil { + return x.UpsertScalingGroups + } + return nil +} + +func (x *UpdateWorkerControllerInstanceInput) GetRemoveScalingGroups() []string { + if x != nil { + return x.RemoveScalingGroups + } + return nil +} + +// used as activity input for deleting a Worker Controller Instance +// via the WCI client. +type DeleteWorkerControllerInstanceInput struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version *v11.WorkerDeploymentVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Identity string `protobuf:"bytes,2,opt,name=identity,proto3" json:"identity,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteWorkerControllerInstanceInput) Reset() { + *x = DeleteWorkerControllerInstanceInput{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteWorkerControllerInstanceInput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteWorkerControllerInstanceInput) ProtoMessage() {} + +func (x *DeleteWorkerControllerInstanceInput) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[54] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteWorkerControllerInstanceInput.ProtoReflect.Descriptor instead. +func (*DeleteWorkerControllerInstanceInput) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{54} +} + +func (x *DeleteWorkerControllerInstanceInput) GetVersion() *v11.WorkerDeploymentVersion { + if x != nil { + return x.Version + } + return nil +} + +func (x *DeleteWorkerControllerInstanceInput) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +// Input for the UpdateComputeConfig workflow update on a version workflow. +type UpdateComputeConfigArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + RequestId string `protobuf:"bytes,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Scaling groups to add or update. + UpsertScalingGroups map[string]*v12.ComputeConfigScalingGroupUpdate `protobuf:"bytes,3,rep,name=upsert_scaling_groups,json=upsertScalingGroups,proto3" json:"upsert_scaling_groups,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Names of scaling groups to remove. Names that don't match an existing group are ignored. + RemoveScalingGroups []string `protobuf:"bytes,4,rep,name=remove_scaling_groups,json=removeScalingGroups,proto3" json:"remove_scaling_groups,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateComputeConfigArgs) Reset() { + *x = UpdateComputeConfigArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateComputeConfigArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateComputeConfigArgs) ProtoMessage() {} + +func (x *UpdateComputeConfigArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[55] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateComputeConfigArgs.ProtoReflect.Descriptor instead. +func (*UpdateComputeConfigArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{55} +} + +func (x *UpdateComputeConfigArgs) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *UpdateComputeConfigArgs) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *UpdateComputeConfigArgs) GetUpsertScalingGroups() map[string]*v12.ComputeConfigScalingGroupUpdate { + if x != nil { + return x.UpsertScalingGroups + } + return nil +} + +func (x *UpdateComputeConfigArgs) GetRemoveScalingGroups() []string { + if x != nil { + return x.RemoveScalingGroups + } + return nil +} + +// Response for the UpdateComputeConfig workflow update on a version workflow. +type UpdateComputeConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateComputeConfigResponse) Reset() { + *x = UpdateComputeConfigResponse{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateComputeConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateComputeConfigResponse) ProtoMessage() {} + +func (x *UpdateComputeConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[56] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateComputeConfigResponse.ProtoReflect.Descriptor instead. +func (*UpdateComputeConfigResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{56} +} + +// Signal input for force-continue-as-new on Deployment workflow +type ForceCANDeploymentSignalArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + // If provided, this state will be used instead of the current state + // when performing continue-as-new. + OverrideState *WorkerDeploymentLocalState `protobuf:"bytes,1,opt,name=override_state,json=overrideState,proto3" json:"override_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceCANDeploymentSignalArgs) Reset() { + *x = ForceCANDeploymentSignalArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceCANDeploymentSignalArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceCANDeploymentSignalArgs) ProtoMessage() {} + +func (x *ForceCANDeploymentSignalArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[57] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceCANDeploymentSignalArgs.ProtoReflect.Descriptor instead. +func (*ForceCANDeploymentSignalArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{57} +} + +func (x *ForceCANDeploymentSignalArgs) GetOverrideState() *WorkerDeploymentLocalState { + if x != nil { + return x.OverrideState + } + return nil +} + +// Signal input for force-continue-as-new on Version workflow +type ForceCANVersionSignalArgs struct { + state protoimpl.MessageState `protogen:"open.v1"` + // If provided, this state will be used instead of the current state + // when performing continue-as-new. + OverrideState *VersionLocalState `protobuf:"bytes,1,opt,name=override_state,json=overrideState,proto3" json:"override_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceCANVersionSignalArgs) Reset() { + *x = ForceCANVersionSignalArgs{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceCANVersionSignalArgs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceCANVersionSignalArgs) ProtoMessage() {} + +func (x *ForceCANVersionSignalArgs) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[58] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceCANVersionSignalArgs.ProtoReflect.Descriptor instead. +func (*ForceCANVersionSignalArgs) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{58} +} + +func (x *ForceCANVersionSignalArgs) GetOverrideState() *VersionLocalState { + if x != nil { + return x.OverrideState + } + return nil +} + +type VersionLocalState_TaskQueueFamilyData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Key: Task Queue Type + TaskQueues map[int32]*TaskQueueVersionData `protobuf:"bytes,1,rep,name=task_queues,json=taskQueues,proto3" json:"task_queues,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VersionLocalState_TaskQueueFamilyData) Reset() { + *x = VersionLocalState_TaskQueueFamilyData{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VersionLocalState_TaskQueueFamilyData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionLocalState_TaskQueueFamilyData) ProtoMessage() {} + +func (x *VersionLocalState_TaskQueueFamilyData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[60] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionLocalState_TaskQueueFamilyData.ProtoReflect.Descriptor instead. +func (*VersionLocalState_TaskQueueFamilyData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *VersionLocalState_TaskQueueFamilyData) GetTaskQueues() map[int32]*TaskQueueVersionData { + if x != nil { + return x.TaskQueues + } + return nil +} + +type SyncDeploymentVersionUserDataRequest_SyncUserData struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Types []v1.TaskQueueType `protobuf:"varint,2,rep,packed,name=types,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"types,omitempty"` + Data *DeploymentVersionData `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncDeploymentVersionUserDataRequest_SyncUserData) Reset() { + *x = SyncDeploymentVersionUserDataRequest_SyncUserData{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDeploymentVersionUserDataRequest_SyncUserData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDeploymentVersionUserDataRequest_SyncUserData) ProtoMessage() {} + +func (x *SyncDeploymentVersionUserDataRequest_SyncUserData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[64] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncDeploymentVersionUserDataRequest_SyncUserData.ProtoReflect.Descriptor instead. +func (*SyncDeploymentVersionUserDataRequest_SyncUserData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{25, 0} +} + +func (x *SyncDeploymentVersionUserDataRequest_SyncUserData) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SyncDeploymentVersionUserDataRequest_SyncUserData) GetTypes() []v1.TaskQueueType { + if x != nil { + return x.Types + } + return nil +} + +func (x *SyncDeploymentVersionUserDataRequest_SyncUserData) GetData() *DeploymentVersionData { + if x != nil { + return x.Data + } + return nil +} + +type CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes struct { + state protoimpl.MessageState `protogen:"open.v1"` + Types []v1.TaskQueueType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"types,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes) Reset() { + *x = CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes{} + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes) ProtoMessage() {} + +func (x *CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_deployment_v1_message_proto_msgTypes[70] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes.ProtoReflect.Descriptor instead. +func (*CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP(), []int{40, 1} +} + +func (x *CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes) GetTypes() []v1.TaskQueueType { + if x != nil { + return x.Types + } + return nil +} + +var File_temporal_server_api_deployment_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_api_deployment_v1_message_proto_rawDesc = "" + + "\n" + + "/temporal/server/api/deployment/v1/message.proto\x12!temporal.server.api.deployment.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a$temporal/api/compute/v1/config.proto\x1a(temporal/api/deployment/v1/message.proto\x1a&temporal/api/enums/v1/deployment.proto\x1a&temporal/api/enums/v1/task_queue.proto\"]\n" + + "\x17WorkerDeploymentVersion\x12'\n" + + "\x0fdeployment_name\x18\x01 \x01(\tR\x0edeploymentName\x12\x19\n" + + "\bbuild_id\x18\x02 \x01(\tR\abuildId\"\xc4\x03\n" + + "\x15DeploymentVersionData\x12T\n" + + "\aversion\x18\x01 \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionR\aversion\x12J\n" + + "\x13routing_update_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x11routingUpdateTime\x12H\n" + + "\x12current_since_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x10currentSinceTime\x12H\n" + + "\x12ramping_since_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x10rampingSinceTime\x12'\n" + + "\x0framp_percentage\x18\x05 \x01(\x02R\x0erampPercentage\x12L\n" + + "\x06status\x18\x06 \x01(\x0e24.temporal.api.enums.v1.WorkerDeploymentVersionStatusR\x06status\"\xeb\x01\n" + + "\x1bWorkerDeploymentVersionData\x12'\n" + + "\x0frevision_number\x18\x01 \x01(\x03R\x0erevisionNumber\x12;\n" + + "\vupdate_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "updateTime\x12\x18\n" + + "\adeleted\x18\x03 \x01(\bR\adeleted\x12L\n" + + "\x06status\x18\x06 \x01(\x0e24.temporal.api.enums.v1.WorkerDeploymentVersionStatusR\x06status\"\xc0\r\n" + + "\x11VersionLocalState\x12T\n" + + "\aversion\x18\x01 \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionR\aversion\x12;\n" + + "\vcreate_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "createTime\x12J\n" + + "\x13routing_update_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x11routingUpdateTime\x12H\n" + + "\x12current_since_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x10currentSinceTime\x12H\n" + + "\x12ramping_since_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\x10rampingSinceTime\x12'\n" + + "\x0framp_percentage\x18\x06 \x01(\x02R\x0erampPercentage\x12N\n" + + "\x15first_activation_time\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\x13firstActivationTime\x12F\n" + + "\x11last_current_time\x18\x10 \x01(\v2\x1a.google.protobuf.TimestampR\x0flastCurrentTime\x12P\n" + + "\x16last_deactivation_time\x18\r \x01(\v2\x1a.google.protobuf.TimestampR\x14lastDeactivationTime\x12T\n" + + "\rdrainage_info\x18\a \x01(\v2/.temporal.api.deployment.v1.VersionDrainageInfoR\fdrainageInfo\x12G\n" + + "\bmetadata\x18\b \x01(\v2+.temporal.api.deployment.v1.VersionMetadataR\bmetadata\x12B\n" + + "\x1bstarted_deployment_workflow\x18\t \x01(\bB\x02\x18\x01R\x19startedDeploymentWorkflow\x12{\n" + + "\x13task_queue_families\x18\n" + + " \x03(\v2K.temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamiliesEntryR\x11taskQueueFamilies\x12&\n" + + "\x0fsync_batch_size\x18\v \x01(\x05R\rsyncBatchSize\x12L\n" + + "\x06status\x18\x0e \x01(\x0e24.temporal.api.enums.v1.WorkerDeploymentVersionStatusR\x06status\x12'\n" + + "\x0frevision_number\x18\x0f \x01(\x03R\x0erevisionNumber\x124\n" + + "\x16last_modifier_identity\x18\x11 \x01(\tR\x14lastModifierIdentity\x12T\n" + + "\x0ecompute_config\x18\x12 \x01(\v2-.temporal.api.compute.v1.ComputeConfigSummaryR\rcomputeConfig\x1a\x8e\x01\n" + + "\x16TaskQueueFamiliesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12^\n" + + "\x05value\x18\x02 \x01(\v2H.temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamilyDataR\x05value:\x028\x01\x1a\x88\x02\n" + + "\x13TaskQueueFamilyData\x12y\n" + + "\vtask_queues\x18\x01 \x03(\v2X.temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamilyData.TaskQueuesEntryR\n" + + "taskQueues\x1av\n" + + "\x0fTaskQueuesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x05R\x03key\x12M\n" + + "\x05value\x18\x02 \x01(\v27.temporal.server.api.deployment.v1.TaskQueueVersionDataR\x05value:\x028\x01\"\x16\n" + + "\x14TaskQueueVersionData\"\xca\x01\n" + + "#WorkerDeploymentVersionWorkflowArgs\x12%\n" + + "\x0enamespace_name\x18\x01 \x01(\tR\rnamespaceName\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12Y\n" + + "\rversion_state\x18\x03 \x01(\v24.temporal.server.api.deployment.v1.VersionLocalStateR\fversionState\"\xe6\x01\n" + + "\x1cWorkerDeploymentWorkflowArgs\x12%\n" + + "\x0enamespace_name\x18\x01 \x01(\tR\rnamespaceName\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12'\n" + + "\x0fdeployment_name\x18\x03 \x01(\tR\x0edeploymentName\x12S\n" + + "\x05state\x18\x04 \x01(\v2=.temporal.server.api.deployment.v1.WorkerDeploymentLocalStateR\x05state\"\x82\a\n" + + "\x1aWorkerDeploymentLocalState\x12;\n" + + "\vcreate_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "createTime\x12P\n" + + "\x0erouting_config\x18\x02 \x01(\v2).temporal.api.deployment.v1.RoutingConfigR\rroutingConfig\x12g\n" + + "\bversions\x18\x03 \x03(\v2K.temporal.server.api.deployment.v1.WorkerDeploymentLocalState.VersionsEntryR\bversions\x12%\n" + + "\x0econflict_token\x18\x04 \x01(\fR\rconflictToken\x124\n" + + "\x16last_modifier_identity\x18\x05 \x01(\tR\x14lastModifierIdentity\x12&\n" + + "\x0fsync_batch_size\x18\x06 \x01(\x05R\rsyncBatchSize\x12)\n" + + "\x10manager_identity\x18\a \x01(\tR\x0fmanagerIdentity\x12\x8c\x01\n" + + "\x15propagating_revisions\x18\b \x03(\v2W.temporal.server.api.deployment.v1.WorkerDeploymentLocalState.PropagatingRevisionsEntryR\x14propagatingRevisions\x12*\n" + + "\x11create_request_id\x18\t \x01(\tR\x0fcreateRequestId\x1a~\n" + + "\rVersionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12W\n" + + "\x05value\x18\x02 \x01(\v2A.temporal.server.api.deployment.v1.WorkerDeploymentVersionSummaryR\x05value:\x028\x01\x1a\x80\x01\n" + + "\x19PropagatingRevisionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12M\n" + + "\x05value\x18\x02 \x01(\v27.temporal.server.api.deployment.v1.PropagatingRevisionsR\x05value:\x028\x01\"A\n" + + "\x14PropagatingRevisions\x12)\n" + + "\x10revision_numbers\x18\x01 \x03(\x03R\x0frevisionNumbers\"\xc2\a\n" + + "\x1eWorkerDeploymentVersionSummary\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x12;\n" + + "\vcreate_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "createTime\x12Y\n" + + "\x0fdrainage_status\x18\x03 \x01(\x0e2,.temporal.api.enums.v1.VersionDrainageStatusB\x02\x18\x01R\x0edrainageStatus\x12T\n" + + "\rdrainage_info\x18\x04 \x01(\v2/.temporal.api.deployment.v1.VersionDrainageInfoR\fdrainageInfo\x12J\n" + + "\x13routing_update_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\x11routingUpdateTime\x12H\n" + + "\x12current_since_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x10currentSinceTime\x12H\n" + + "\x12ramping_since_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x10rampingSinceTime\x12N\n" + + "\x15first_activation_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\x13firstActivationTime\x12F\n" + + "\x11last_current_time\x18\v \x01(\v2\x1a.google.protobuf.TimestampR\x0flastCurrentTime\x12P\n" + + "\x16last_deactivation_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\x14lastDeactivationTime\x12L\n" + + "\x06status\x18\n" + + " \x01(\x0e24.temporal.api.enums.v1.WorkerDeploymentVersionStatusR\x06status\x12*\n" + + "\x11create_request_id\x18\f \x01(\tR\x0fcreateRequestId\x12T\n" + + "\x0ecompute_config\x18\r \x01(\v2-.temporal.api.compute.v1.ComputeConfigSummaryR\rcomputeConfig\"\xa7\x02\n" + + "\x1bRegisterWorkerInVersionArgs\x12&\n" + + "\x0ftask_queue_name\x18\x01 \x01(\tR\rtaskQueueName\x12L\n" + + "\x0ftask_queue_type\x18\x02 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12&\n" + + "\x0fmax_task_queues\x18\x03 \x01(\x05R\rmaxTaskQueues\x12\x18\n" + + "\aversion\x18\x04 \x01(\tR\aversion\x12P\n" + + "\x0erouting_config\x18\x05 \x01(\v2).temporal.api.deployment.v1.RoutingConfigR\rroutingConfig\"\x9a\x02\n" + + "$RegisterWorkerInWorkerDeploymentArgs\x12&\n" + + "\x0ftask_queue_name\x18\x01 \x01(\tR\rtaskQueueName\x12L\n" + + "\x0ftask_queue_type\x18\x02 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12&\n" + + "\x0fmax_task_queues\x18\x03 \x01(\x05R\rmaxTaskQueues\x12T\n" + + "\aversion\x18\x04 \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionR\aversion\"K\n" + + "/DescribeVersionFromWorkerDeploymentActivityArgs\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\"\xab\x01\n" + + "1DescribeVersionFromWorkerDeploymentActivityResult\x12v\n" + + "\x10task_queue_infos\x18\x01 \x03(\v2L.temporal.api.deployment.v1.WorkerDeploymentVersionInfo.VersionTaskQueueInfoR\x0etaskQueueInfos\"\x87\x03\n" + + "\x1aSyncVersionStateUpdateArgs\x12N\n" + + "\x13routing_update_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampB\x02\x18\x01R\x11routingUpdateTime\x12L\n" + + "\x12current_since_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampB\x02\x18\x01R\x10currentSinceTime\x12L\n" + + "\x12ramping_since_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampB\x02\x18\x01R\x10rampingSinceTime\x12+\n" + + "\x0framp_percentage\x18\x04 \x01(\x02B\x02\x18\x01R\x0erampPercentage\x12P\n" + + "\x0erouting_config\x18\x05 \x01(\v2).temporal.api.deployment.v1.RoutingConfigR\rroutingConfig\"\xd6\x01\n" + + "\x18SyncVersionStateResponse\x12]\n" + + "\rversion_state\x18\x01 \x01(\v24.temporal.server.api.deployment.v1.VersionLocalStateB\x02\x18\x01R\fversionState\x12[\n" + + "\asummary\x18\x02 \x01(\v2A.temporal.server.api.deployment.v1.WorkerDeploymentVersionSummaryR\asummary\"m\n" + + "\x14AddVersionUpdateArgs\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x12;\n" + + "\vcreate_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "createTime\"r\n" + + "\x1aSyncDrainageInfoSignalArgs\x12T\n" + + "\rdrainage_info\x18\x01 \x01(\v2/.temporal.api.deployment.v1.VersionDrainageInfoR\fdrainageInfo\"\x8f\x01\n" + + "\x1cSyncDrainageStatusSignalArgs\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x12U\n" + + "\x0fdrainage_status\x18\x02 \x01(\x0e2,.temporal.api.enums.v1.VersionDrainageStatusR\x0edrainageStatus\"_\n" + + "\x19PropagationCompletionInfo\x12'\n" + + "\x0frevision_number\x18\x01 \x01(\x03R\x0erevisionNumber\x12\x19\n" + + "\bbuild_id\x18\x02 \x01(\tR\abuildId\"y\n" + + "\x1cQueryDescribeVersionResponse\x12Y\n" + + "\rversion_state\x18\x01 \x01(\v24.temporal.server.api.deployment.v1.VersionLocalStateR\fversionState\"|\n" + + "%QueryDescribeWorkerDeploymentResponse\x12S\n" + + "\x05state\x18\x01 \x01(\v2=.temporal.server.api.deployment.v1.WorkerDeploymentLocalStateR\x05state\"d\n" + + "\x1cCreateRequestIDQueryResponse\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\x12%\n" + + "\x0econflict_token\x18\x02 \x01(\fR\rconflictToken\"f\n" + + "\x1cStartWorkerDeploymentRequest\x12'\n" + + "\x0fdeployment_name\x18\x01 \x01(\tR\x0edeploymentName\x12\x1d\n" + + "\n" + + "request_id\x18\x02 \x01(\tR\trequestId\"\xfa\x01\n" + + "#StartWorkerDeploymentVersionRequest\x12'\n" + + "\x0fdeployment_name\x18\x01 \x01(\tR\x0edeploymentName\x12\x19\n" + + "\bbuild_id\x18\x02 \x01(\tR\abuildId\x12\x1d\n" + + "\n" + + "request_id\x18\x03 \x01(\tR\trequestId\x12\x1a\n" + + "\bidentity\x18\x04 \x01(\tR\bidentity\x12T\n" + + "\x0ecompute_config\x18\x05 \x01(\v2-.temporal.api.compute.v1.ComputeConfigSummaryR\rcomputeConfig\"\xb4\x05\n" + + "$SyncDeploymentVersionUserDataRequest\x12'\n" + + "\x0fdeployment_name\x18\x04 \x01(\tR\x0edeploymentName\x12T\n" + + "\aversion\x18\x01 \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionR\aversion\x12h\n" + + "\x04sync\x18\x02 \x03(\v2T.temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.SyncUserDataR\x04sync\x12%\n" + + "\x0eforget_version\x18\x03 \x01(\bR\rforgetVersion\x12]\n" + + "\x15update_routing_config\x18\x05 \x01(\v2).temporal.api.deployment.v1.RoutingConfigR\x13updateRoutingConfig\x12n\n" + + "\x13upsert_version_data\x18\x06 \x01(\v2>.temporal.server.api.deployment.v1.WorkerDeploymentVersionDataR\x11upsertVersionData\x1a\xac\x01\n" + + "\fSyncUserData\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12:\n" + + "\x05types\x18\x02 \x03(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\x05types\x12L\n" + + "\x04data\x18\x03 \x01(\v28.temporal.server.api.deployment.v1.DeploymentVersionDataR\x04data\"\x8c\x02\n" + + "%SyncDeploymentVersionUserDataResponse\x12\x99\x01\n" + + "\x17task_queue_max_versions\x18\x01 \x03(\v2b.temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataResponse.TaskQueueMaxVersionsEntryR\x14taskQueueMaxVersions\x1aG\n" + + "\x19TaskQueueMaxVersionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01\"\xa0\x02\n" + + "/CheckWorkerDeploymentUserDataPropagationRequest\x12\xa3\x01\n" + + "\x17task_queue_max_versions\x18\x01 \x03(\v2l.temporal.server.api.deployment.v1.CheckWorkerDeploymentUserDataPropagationRequest.TaskQueueMaxVersionsEntryR\x14taskQueueMaxVersions\x1aG\n" + + "\x19TaskQueueMaxVersionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01\"\xaa\x01\n" + + "\x1fSyncUnversionedRampActivityArgs\x12'\n" + + "\x0fcurrent_version\x18\x01 \x01(\tR\x0ecurrentVersion\x12^\n" + + "\vupdate_args\x18\x02 \x01(\v2=.temporal.server.api.deployment.v1.SyncVersionStateUpdateArgsR\n" + + "updateArgs\"\x88\x02\n" + + "#SyncUnversionedRampActivityResponse\x12\x97\x01\n" + + "\x17task_queue_max_versions\x18\x01 \x03(\v2`.temporal.server.api.deployment.v1.SyncUnversionedRampActivityResponse.TaskQueueMaxVersionsEntryR\x14taskQueueMaxVersions\x1aG\n" + + "\x19TaskQueueMaxVersionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01\"\xb9\x02\n" + + "\x19UpdateVersionMetadataArgs\x12v\n" + + "\x0eupsert_entries\x18\x01 \x03(\v2O.temporal.server.api.deployment.v1.UpdateVersionMetadataArgs.UpsertEntriesEntryR\rupsertEntries\x12%\n" + + "\x0eremove_entries\x18\x02 \x03(\tR\rremoveEntries\x12\x1a\n" + + "\bidentity\x18\x03 \x01(\tR\bidentity\x1aa\n" + + "\x12UpsertEntriesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x125\n" + + "\x05value\x18\x02 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\x05value:\x028\x01\"h\n" + + "\x1dUpdateVersionMetadataResponse\x12G\n" + + "\bmetadata\x18\x01 \x01(\v2+.temporal.api.deployment.v1.VersionMetadataR\bmetadata\"\xdb\x01\n" + + "\x15SetCurrentVersionArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12\x18\n" + + "\aversion\x18\x02 \x01(\tR\aversion\x12;\n" + + "\x1aignore_missing_task_queues\x18\x03 \x01(\bR\x17ignoreMissingTaskQueues\x12%\n" + + "\x0econflict_token\x18\x04 \x01(\fR\rconflictToken\x12(\n" + + "\x10allow_no_pollers\x18\x05 \x01(\bR\x0eallowNoPollers\"m\n" + + "\x19SetCurrentVersionResponse\x12)\n" + + "\x10previous_version\x18\x01 \x01(\tR\x0fpreviousVersion\x12%\n" + + "\x0econflict_token\x18\x02 \x01(\fR\rconflictToken\"W\n" + + "\x1aCreateWorkerDeploymentArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12\x1d\n" + + "\n" + + "request_id\x18\x02 \x01(\tR\trequestId\"G\n" + + "\x1eCreateWorkerDeploymentResponse\x12%\n" + + "\x0econflict_token\x18\x01 \x01(\fR\rconflictToken\"\xc7\x01\n" + + "!CreateWorkerDeploymentVersionArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12\x1d\n" + + "\n" + + "request_id\x18\x02 \x01(\tR\trequestId\x12\x18\n" + + "\aversion\x18\x03 \x01(\tR\aversion\x12M\n" + + "\x0ecompute_config\x18\x04 \x01(\v2&.temporal.api.compute.v1.ComputeConfigR\rcomputeConfig\"'\n" + + "%CreateWorkerDeploymentVersionResponse\"\xc0\x01\n" + + "\x11DeleteVersionArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12\x18\n" + + "\aversion\x18\x02 \x01(\tR\aversion\x12#\n" + + "\rskip_drainage\x18\x03 \x01(\bR\fskipDrainage\x12#\n" + + "\rserver_delete\x18\x04 \x01(\bR\fserverDelete\x12+\n" + + "\x11async_propagation\x18\x05 \x01(\bR\x10asyncPropagation\"\xeb\x01\n" + + "\x19DeleteVersionActivityArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12'\n" + + "\x0fdeployment_name\x18\x02 \x01(\tR\x0edeploymentName\x12\x18\n" + + "\aversion\x18\x03 \x01(\tR\aversion\x12\x1d\n" + + "\n" + + "request_id\x18\x04 \x01(\tR\trequestId\x12#\n" + + "\rskip_drainage\x18\x05 \x01(\bR\fskipDrainage\x12+\n" + + "\x11async_propagation\x18\x06 \x01(\bR\x10asyncPropagation\"\xa7\x04\n" + + "&CheckTaskQueuesHavePollersActivityArgs\x12\x94\x01\n" + + "\x15task_queues_and_types\x18\x01 \x03(\v2a.temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.TaskQueuesAndTypesEntryR\x12taskQueuesAndTypes\x12v\n" + + "\x19worker_deployment_version\x18\x02 \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionR\x17workerDeploymentVersion\x1a\x9f\x01\n" + + "\x17TaskQueuesAndTypesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12n\n" + + "\x05value\x18\x02 \x01(\v2X.temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.TaskQueueTypesR\x05value:\x028\x01\x1aL\n" + + "\x0eTaskQueueTypes\x12:\n" + + "\x05types\x18\x01 \x03(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\x05types\"2\n" + + "\x14DeleteDeploymentArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\"\x9e\x01\n" + + "\x19SetRampingVersionResponse\x12)\n" + + "\x10previous_version\x18\x01 \x01(\tR\x0fpreviousVersion\x12/\n" + + "\x13previous_percentage\x18\x02 \x01(\x02R\x12previousPercentage\x12%\n" + + "\x0econflict_token\x18\x03 \x01(\fR\rconflictToken\"\xfb\x01\n" + + "\x15SetRampingVersionArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12\x18\n" + + "\aversion\x18\x02 \x01(\tR\aversion\x12\x1e\n" + + "\n" + + "percentage\x18\x03 \x01(\x02R\n" + + "percentage\x12;\n" + + "\x1aignore_missing_task_queues\x18\x04 \x01(\bR\x17ignoreMissingTaskQueues\x12%\n" + + "\x0econflict_token\x18\x05 \x01(\fR\rconflictToken\x12(\n" + + "\x10allow_no_pollers\x18\x06 \x01(\bR\x0eallowNoPollers\"\x86\x01\n" + + "\x16SetManagerIdentityArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12)\n" + + "\x10manager_identity\x18\x02 \x01(\tR\x0fmanagerIdentity\x12%\n" + + "\x0econflict_token\x18\x05 \x01(\fR\rconflictToken\"\x7f\n" + + "\x1aSetManagerIdentityResponse\x12:\n" + + "\x19previous_manager_identity\x18\x01 \x01(\tR\x17previousManagerIdentity\x12%\n" + + "\x0econflict_token\x18\x02 \x01(\fR\rconflictToken\"\xe0\x01\n" + + "\x1cSyncVersionStateActivityArgs\x12'\n" + + "\x0fdeployment_name\x18\x01 \x01(\tR\x0edeploymentName\x12\x18\n" + + "\aversion\x18\x02 \x01(\tR\aversion\x12^\n" + + "\vupdate_args\x18\x03 \x01(\v2=.temporal.server.api.deployment.v1.SyncVersionStateUpdateArgsR\n" + + "updateArgs\x12\x1d\n" + + "\n" + + "request_id\x18\x04 \x01(\tR\trequestId\"\xdc\x01\n" + + "\x1eSyncVersionStateActivityResult\x12]\n" + + "\rversion_state\x18\x01 \x01(\v24.temporal.server.api.deployment.v1.VersionLocalStateB\x02\x18\x01R\fversionState\x12[\n" + + "\asummary\x18\x02 \x01(\v2A.temporal.server.api.deployment.v1.WorkerDeploymentVersionSummaryR\asummary\"\x82\x01\n" + + "\x1eIsVersionMissingTaskQueuesArgs\x120\n" + + "\x14prev_current_version\x18\x01 \x01(\tR\x12prevCurrentVersion\x12.\n" + + "\x13new_current_version\x18\x02 \x01(\tR\x11newCurrentVersion\"W\n" + + " IsVersionMissingTaskQueuesResult\x123\n" + + "\x16is_missing_task_queues\x18\x01 \x01(\bR\x13isMissingTaskQueues\"\xf2\x04\n" + + "\x1cWorkerDeploymentWorkflowMemo\x12'\n" + + "\x0fdeployment_name\x18\x01 \x01(\tR\x0edeploymentName\x12;\n" + + "\vcreate_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "createTime\x12P\n" + + "\x0erouting_config\x18\x03 \x01(\v2).temporal.api.deployment.v1.RoutingConfigR\rroutingConfig\x12\x85\x01\n" + + "\x16latest_version_summary\x18\x04 \x01(\v2O.temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummaryR\x14latestVersionSummary\x12\x87\x01\n" + + "\x17current_version_summary\x18\x05 \x01(\v2O.temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummaryR\x15currentVersionSummary\x12\x87\x01\n" + + "\x17ramping_version_summary\x18\x06 \x01(\v2O.temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummaryR\x15rampingVersionSummary\"\xd8\x04\n" + + "\x17WorkerDeploymentSummary\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + + "\vcreate_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "createTime\x12P\n" + + "\x0erouting_config\x18\x03 \x01(\v2).temporal.api.deployment.v1.RoutingConfigR\rroutingConfig\x12\x85\x01\n" + + "\x16latest_version_summary\x18\x04 \x01(\v2O.temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummaryR\x14latestVersionSummary\x12\x87\x01\n" + + "\x17current_version_summary\x18\x05 \x01(\v2O.temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummaryR\x15currentVersionSummary\x12\x87\x01\n" + + "\x17ramping_version_summary\x18\x06 \x01(\v2O.temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummaryR\x15rampingVersionSummary\"\xaa\x02\n" + + ")ValidateWorkerControllerInstanceSpecInput\x12\x86\x01\n" + + "\x0escaling_groups\x18\x01 \x03(\v2_.temporal.server.api.deployment.v1.ValidateWorkerControllerInstanceSpecInput.ScalingGroupsEntryR\rscalingGroups\x1at\n" + + "\x12ScalingGroupsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12H\n" + + "\x05value\x18\x02 \x01(\v22.temporal.api.compute.v1.ComputeConfigScalingGroupR\x05value:\x028\x01\"\xdd\x03\n" + + "#UpdateWorkerControllerInstanceInput\x12M\n" + + "\aversion\x18\x01 \x01(\v23.temporal.api.deployment.v1.WorkerDeploymentVersionR\aversion\x12\x1a\n" + + "\bidentity\x18\x02 \x01(\tR\bidentity\x12\x93\x01\n" + + "\x15upsert_scaling_groups\x18\x03 \x03(\v2_.temporal.server.api.deployment.v1.UpdateWorkerControllerInstanceInput.UpsertScalingGroupsEntryR\x13upsertScalingGroups\x122\n" + + "\x15remove_scaling_groups\x18\x04 \x03(\tR\x13removeScalingGroups\x1a\x80\x01\n" + + "\x18UpsertScalingGroupsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12N\n" + + "\x05value\x18\x02 \x01(\v28.temporal.api.compute.v1.ComputeConfigScalingGroupUpdateR\x05value:\x028\x01\"\x90\x01\n" + + "#DeleteWorkerControllerInstanceInput\x12M\n" + + "\aversion\x18\x01 \x01(\v23.temporal.api.deployment.v1.WorkerDeploymentVersionR\aversion\x12\x1a\n" + + "\bidentity\x18\x02 \x01(\tR\bidentity\"\x95\x03\n" + + "\x17UpdateComputeConfigArgs\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12\x1d\n" + + "\n" + + "request_id\x18\x02 \x01(\tR\trequestId\x12\x87\x01\n" + + "\x15upsert_scaling_groups\x18\x03 \x03(\v2S.temporal.server.api.deployment.v1.UpdateComputeConfigArgs.UpsertScalingGroupsEntryR\x13upsertScalingGroups\x122\n" + + "\x15remove_scaling_groups\x18\x04 \x03(\tR\x13removeScalingGroups\x1a\x80\x01\n" + + "\x18UpsertScalingGroupsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12N\n" + + "\x05value\x18\x02 \x01(\v28.temporal.api.compute.v1.ComputeConfigScalingGroupUpdateR\x05value:\x028\x01\"\x1d\n" + + "\x1bUpdateComputeConfigResponse\"\x84\x01\n" + + "\x1cForceCANDeploymentSignalArgs\x12d\n" + + "\x0eoverride_state\x18\x01 \x01(\v2=.temporal.server.api.deployment.v1.WorkerDeploymentLocalStateR\roverrideState\"x\n" + + "\x19ForceCANVersionSignalArgs\x12[\n" + + "\x0eoverride_state\x18\x01 \x01(\v24.temporal.server.api.deployment.v1.VersionLocalStateR\roverrideStateB4Z2go.temporal.io/server/api/deployment/v1;deploymentb\x06proto3" + +var ( + file_temporal_server_api_deployment_v1_message_proto_rawDescOnce sync.Once + file_temporal_server_api_deployment_v1_message_proto_rawDescData []byte +) + +func file_temporal_server_api_deployment_v1_message_proto_rawDescGZIP() []byte { + file_temporal_server_api_deployment_v1_message_proto_rawDescOnce.Do(func() { + file_temporal_server_api_deployment_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_deployment_v1_message_proto_rawDesc), len(file_temporal_server_api_deployment_v1_message_proto_rawDesc))) + }) + return file_temporal_server_api_deployment_v1_message_proto_rawDescData +} + +var file_temporal_server_api_deployment_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 74) +var file_temporal_server_api_deployment_v1_message_proto_goTypes = []any{ + (*WorkerDeploymentVersion)(nil), // 0: temporal.server.api.deployment.v1.WorkerDeploymentVersion + (*DeploymentVersionData)(nil), // 1: temporal.server.api.deployment.v1.DeploymentVersionData + (*WorkerDeploymentVersionData)(nil), // 2: temporal.server.api.deployment.v1.WorkerDeploymentVersionData + (*VersionLocalState)(nil), // 3: temporal.server.api.deployment.v1.VersionLocalState + (*TaskQueueVersionData)(nil), // 4: temporal.server.api.deployment.v1.TaskQueueVersionData + (*WorkerDeploymentVersionWorkflowArgs)(nil), // 5: temporal.server.api.deployment.v1.WorkerDeploymentVersionWorkflowArgs + (*WorkerDeploymentWorkflowArgs)(nil), // 6: temporal.server.api.deployment.v1.WorkerDeploymentWorkflowArgs + (*WorkerDeploymentLocalState)(nil), // 7: temporal.server.api.deployment.v1.WorkerDeploymentLocalState + (*PropagatingRevisions)(nil), // 8: temporal.server.api.deployment.v1.PropagatingRevisions + (*WorkerDeploymentVersionSummary)(nil), // 9: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary + (*RegisterWorkerInVersionArgs)(nil), // 10: temporal.server.api.deployment.v1.RegisterWorkerInVersionArgs + (*RegisterWorkerInWorkerDeploymentArgs)(nil), // 11: temporal.server.api.deployment.v1.RegisterWorkerInWorkerDeploymentArgs + (*DescribeVersionFromWorkerDeploymentActivityArgs)(nil), // 12: temporal.server.api.deployment.v1.DescribeVersionFromWorkerDeploymentActivityArgs + (*DescribeVersionFromWorkerDeploymentActivityResult)(nil), // 13: temporal.server.api.deployment.v1.DescribeVersionFromWorkerDeploymentActivityResult + (*SyncVersionStateUpdateArgs)(nil), // 14: temporal.server.api.deployment.v1.SyncVersionStateUpdateArgs + (*SyncVersionStateResponse)(nil), // 15: temporal.server.api.deployment.v1.SyncVersionStateResponse + (*AddVersionUpdateArgs)(nil), // 16: temporal.server.api.deployment.v1.AddVersionUpdateArgs + (*SyncDrainageInfoSignalArgs)(nil), // 17: temporal.server.api.deployment.v1.SyncDrainageInfoSignalArgs + (*SyncDrainageStatusSignalArgs)(nil), // 18: temporal.server.api.deployment.v1.SyncDrainageStatusSignalArgs + (*PropagationCompletionInfo)(nil), // 19: temporal.server.api.deployment.v1.PropagationCompletionInfo + (*QueryDescribeVersionResponse)(nil), // 20: temporal.server.api.deployment.v1.QueryDescribeVersionResponse + (*QueryDescribeWorkerDeploymentResponse)(nil), // 21: temporal.server.api.deployment.v1.QueryDescribeWorkerDeploymentResponse + (*CreateRequestIDQueryResponse)(nil), // 22: temporal.server.api.deployment.v1.CreateRequestIDQueryResponse + (*StartWorkerDeploymentRequest)(nil), // 23: temporal.server.api.deployment.v1.StartWorkerDeploymentRequest + (*StartWorkerDeploymentVersionRequest)(nil), // 24: temporal.server.api.deployment.v1.StartWorkerDeploymentVersionRequest + (*SyncDeploymentVersionUserDataRequest)(nil), // 25: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest + (*SyncDeploymentVersionUserDataResponse)(nil), // 26: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataResponse + (*CheckWorkerDeploymentUserDataPropagationRequest)(nil), // 27: temporal.server.api.deployment.v1.CheckWorkerDeploymentUserDataPropagationRequest + (*SyncUnversionedRampActivityArgs)(nil), // 28: temporal.server.api.deployment.v1.SyncUnversionedRampActivityArgs + (*SyncUnversionedRampActivityResponse)(nil), // 29: temporal.server.api.deployment.v1.SyncUnversionedRampActivityResponse + (*UpdateVersionMetadataArgs)(nil), // 30: temporal.server.api.deployment.v1.UpdateVersionMetadataArgs + (*UpdateVersionMetadataResponse)(nil), // 31: temporal.server.api.deployment.v1.UpdateVersionMetadataResponse + (*SetCurrentVersionArgs)(nil), // 32: temporal.server.api.deployment.v1.SetCurrentVersionArgs + (*SetCurrentVersionResponse)(nil), // 33: temporal.server.api.deployment.v1.SetCurrentVersionResponse + (*CreateWorkerDeploymentArgs)(nil), // 34: temporal.server.api.deployment.v1.CreateWorkerDeploymentArgs + (*CreateWorkerDeploymentResponse)(nil), // 35: temporal.server.api.deployment.v1.CreateWorkerDeploymentResponse + (*CreateWorkerDeploymentVersionArgs)(nil), // 36: temporal.server.api.deployment.v1.CreateWorkerDeploymentVersionArgs + (*CreateWorkerDeploymentVersionResponse)(nil), // 37: temporal.server.api.deployment.v1.CreateWorkerDeploymentVersionResponse + (*DeleteVersionArgs)(nil), // 38: temporal.server.api.deployment.v1.DeleteVersionArgs + (*DeleteVersionActivityArgs)(nil), // 39: temporal.server.api.deployment.v1.DeleteVersionActivityArgs + (*CheckTaskQueuesHavePollersActivityArgs)(nil), // 40: temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs + (*DeleteDeploymentArgs)(nil), // 41: temporal.server.api.deployment.v1.DeleteDeploymentArgs + (*SetRampingVersionResponse)(nil), // 42: temporal.server.api.deployment.v1.SetRampingVersionResponse + (*SetRampingVersionArgs)(nil), // 43: temporal.server.api.deployment.v1.SetRampingVersionArgs + (*SetManagerIdentityArgs)(nil), // 44: temporal.server.api.deployment.v1.SetManagerIdentityArgs + (*SetManagerIdentityResponse)(nil), // 45: temporal.server.api.deployment.v1.SetManagerIdentityResponse + (*SyncVersionStateActivityArgs)(nil), // 46: temporal.server.api.deployment.v1.SyncVersionStateActivityArgs + (*SyncVersionStateActivityResult)(nil), // 47: temporal.server.api.deployment.v1.SyncVersionStateActivityResult + (*IsVersionMissingTaskQueuesArgs)(nil), // 48: temporal.server.api.deployment.v1.IsVersionMissingTaskQueuesArgs + (*IsVersionMissingTaskQueuesResult)(nil), // 49: temporal.server.api.deployment.v1.IsVersionMissingTaskQueuesResult + (*WorkerDeploymentWorkflowMemo)(nil), // 50: temporal.server.api.deployment.v1.WorkerDeploymentWorkflowMemo + (*WorkerDeploymentSummary)(nil), // 51: temporal.server.api.deployment.v1.WorkerDeploymentSummary + (*ValidateWorkerControllerInstanceSpecInput)(nil), // 52: temporal.server.api.deployment.v1.ValidateWorkerControllerInstanceSpecInput + (*UpdateWorkerControllerInstanceInput)(nil), // 53: temporal.server.api.deployment.v1.UpdateWorkerControllerInstanceInput + (*DeleteWorkerControllerInstanceInput)(nil), // 54: temporal.server.api.deployment.v1.DeleteWorkerControllerInstanceInput + (*UpdateComputeConfigArgs)(nil), // 55: temporal.server.api.deployment.v1.UpdateComputeConfigArgs + (*UpdateComputeConfigResponse)(nil), // 56: temporal.server.api.deployment.v1.UpdateComputeConfigResponse + (*ForceCANDeploymentSignalArgs)(nil), // 57: temporal.server.api.deployment.v1.ForceCANDeploymentSignalArgs + (*ForceCANVersionSignalArgs)(nil), // 58: temporal.server.api.deployment.v1.ForceCANVersionSignalArgs + nil, // 59: temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamiliesEntry + (*VersionLocalState_TaskQueueFamilyData)(nil), // 60: temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamilyData + nil, // 61: temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamilyData.TaskQueuesEntry + nil, // 62: temporal.server.api.deployment.v1.WorkerDeploymentLocalState.VersionsEntry + nil, // 63: temporal.server.api.deployment.v1.WorkerDeploymentLocalState.PropagatingRevisionsEntry + (*SyncDeploymentVersionUserDataRequest_SyncUserData)(nil), // 64: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.SyncUserData + nil, // 65: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataResponse.TaskQueueMaxVersionsEntry + nil, // 66: temporal.server.api.deployment.v1.CheckWorkerDeploymentUserDataPropagationRequest.TaskQueueMaxVersionsEntry + nil, // 67: temporal.server.api.deployment.v1.SyncUnversionedRampActivityResponse.TaskQueueMaxVersionsEntry + nil, // 68: temporal.server.api.deployment.v1.UpdateVersionMetadataArgs.UpsertEntriesEntry + nil, // 69: temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.TaskQueuesAndTypesEntry + (*CheckTaskQueuesHavePollersActivityArgs_TaskQueueTypes)(nil), // 70: temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.TaskQueueTypes + nil, // 71: temporal.server.api.deployment.v1.ValidateWorkerControllerInstanceSpecInput.ScalingGroupsEntry + nil, // 72: temporal.server.api.deployment.v1.UpdateWorkerControllerInstanceInput.UpsertScalingGroupsEntry + nil, // 73: temporal.server.api.deployment.v1.UpdateComputeConfigArgs.UpsertScalingGroupsEntry + (*timestamppb.Timestamp)(nil), // 74: google.protobuf.Timestamp + (v1.WorkerDeploymentVersionStatus)(0), // 75: temporal.api.enums.v1.WorkerDeploymentVersionStatus + (*v11.VersionDrainageInfo)(nil), // 76: temporal.api.deployment.v1.VersionDrainageInfo + (*v11.VersionMetadata)(nil), // 77: temporal.api.deployment.v1.VersionMetadata + (*v12.ComputeConfigSummary)(nil), // 78: temporal.api.compute.v1.ComputeConfigSummary + (*v11.RoutingConfig)(nil), // 79: temporal.api.deployment.v1.RoutingConfig + (v1.VersionDrainageStatus)(0), // 80: temporal.api.enums.v1.VersionDrainageStatus + (v1.TaskQueueType)(0), // 81: temporal.api.enums.v1.TaskQueueType + (*v11.WorkerDeploymentVersionInfo_VersionTaskQueueInfo)(nil), // 82: temporal.api.deployment.v1.WorkerDeploymentVersionInfo.VersionTaskQueueInfo + (*v12.ComputeConfig)(nil), // 83: temporal.api.compute.v1.ComputeConfig + (*v11.WorkerDeploymentInfo_WorkerDeploymentVersionSummary)(nil), // 84: temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummary + (*v11.WorkerDeploymentVersion)(nil), // 85: temporal.api.deployment.v1.WorkerDeploymentVersion + (*v13.Payload)(nil), // 86: temporal.api.common.v1.Payload + (*v12.ComputeConfigScalingGroup)(nil), // 87: temporal.api.compute.v1.ComputeConfigScalingGroup + (*v12.ComputeConfigScalingGroupUpdate)(nil), // 88: temporal.api.compute.v1.ComputeConfigScalingGroupUpdate +} +var file_temporal_server_api_deployment_v1_message_proto_depIdxs = []int32{ + 0, // 0: temporal.server.api.deployment.v1.DeploymentVersionData.version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 74, // 1: temporal.server.api.deployment.v1.DeploymentVersionData.routing_update_time:type_name -> google.protobuf.Timestamp + 74, // 2: temporal.server.api.deployment.v1.DeploymentVersionData.current_since_time:type_name -> google.protobuf.Timestamp + 74, // 3: temporal.server.api.deployment.v1.DeploymentVersionData.ramping_since_time:type_name -> google.protobuf.Timestamp + 75, // 4: temporal.server.api.deployment.v1.DeploymentVersionData.status:type_name -> temporal.api.enums.v1.WorkerDeploymentVersionStatus + 74, // 5: temporal.server.api.deployment.v1.WorkerDeploymentVersionData.update_time:type_name -> google.protobuf.Timestamp + 75, // 6: temporal.server.api.deployment.v1.WorkerDeploymentVersionData.status:type_name -> temporal.api.enums.v1.WorkerDeploymentVersionStatus + 0, // 7: temporal.server.api.deployment.v1.VersionLocalState.version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 74, // 8: temporal.server.api.deployment.v1.VersionLocalState.create_time:type_name -> google.protobuf.Timestamp + 74, // 9: temporal.server.api.deployment.v1.VersionLocalState.routing_update_time:type_name -> google.protobuf.Timestamp + 74, // 10: temporal.server.api.deployment.v1.VersionLocalState.current_since_time:type_name -> google.protobuf.Timestamp + 74, // 11: temporal.server.api.deployment.v1.VersionLocalState.ramping_since_time:type_name -> google.protobuf.Timestamp + 74, // 12: temporal.server.api.deployment.v1.VersionLocalState.first_activation_time:type_name -> google.protobuf.Timestamp + 74, // 13: temporal.server.api.deployment.v1.VersionLocalState.last_current_time:type_name -> google.protobuf.Timestamp + 74, // 14: temporal.server.api.deployment.v1.VersionLocalState.last_deactivation_time:type_name -> google.protobuf.Timestamp + 76, // 15: temporal.server.api.deployment.v1.VersionLocalState.drainage_info:type_name -> temporal.api.deployment.v1.VersionDrainageInfo + 77, // 16: temporal.server.api.deployment.v1.VersionLocalState.metadata:type_name -> temporal.api.deployment.v1.VersionMetadata + 59, // 17: temporal.server.api.deployment.v1.VersionLocalState.task_queue_families:type_name -> temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamiliesEntry + 75, // 18: temporal.server.api.deployment.v1.VersionLocalState.status:type_name -> temporal.api.enums.v1.WorkerDeploymentVersionStatus + 78, // 19: temporal.server.api.deployment.v1.VersionLocalState.compute_config:type_name -> temporal.api.compute.v1.ComputeConfigSummary + 3, // 20: temporal.server.api.deployment.v1.WorkerDeploymentVersionWorkflowArgs.version_state:type_name -> temporal.server.api.deployment.v1.VersionLocalState + 7, // 21: temporal.server.api.deployment.v1.WorkerDeploymentWorkflowArgs.state:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentLocalState + 74, // 22: temporal.server.api.deployment.v1.WorkerDeploymentLocalState.create_time:type_name -> google.protobuf.Timestamp + 79, // 23: temporal.server.api.deployment.v1.WorkerDeploymentLocalState.routing_config:type_name -> temporal.api.deployment.v1.RoutingConfig + 62, // 24: temporal.server.api.deployment.v1.WorkerDeploymentLocalState.versions:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentLocalState.VersionsEntry + 63, // 25: temporal.server.api.deployment.v1.WorkerDeploymentLocalState.propagating_revisions:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentLocalState.PropagatingRevisionsEntry + 74, // 26: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.create_time:type_name -> google.protobuf.Timestamp + 80, // 27: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.drainage_status:type_name -> temporal.api.enums.v1.VersionDrainageStatus + 76, // 28: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.drainage_info:type_name -> temporal.api.deployment.v1.VersionDrainageInfo + 74, // 29: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.routing_update_time:type_name -> google.protobuf.Timestamp + 74, // 30: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.current_since_time:type_name -> google.protobuf.Timestamp + 74, // 31: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.ramping_since_time:type_name -> google.protobuf.Timestamp + 74, // 32: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.first_activation_time:type_name -> google.protobuf.Timestamp + 74, // 33: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.last_current_time:type_name -> google.protobuf.Timestamp + 74, // 34: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.last_deactivation_time:type_name -> google.protobuf.Timestamp + 75, // 35: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.status:type_name -> temporal.api.enums.v1.WorkerDeploymentVersionStatus + 78, // 36: temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary.compute_config:type_name -> temporal.api.compute.v1.ComputeConfigSummary + 81, // 37: temporal.server.api.deployment.v1.RegisterWorkerInVersionArgs.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 79, // 38: temporal.server.api.deployment.v1.RegisterWorkerInVersionArgs.routing_config:type_name -> temporal.api.deployment.v1.RoutingConfig + 81, // 39: temporal.server.api.deployment.v1.RegisterWorkerInWorkerDeploymentArgs.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 0, // 40: temporal.server.api.deployment.v1.RegisterWorkerInWorkerDeploymentArgs.version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 82, // 41: temporal.server.api.deployment.v1.DescribeVersionFromWorkerDeploymentActivityResult.task_queue_infos:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersionInfo.VersionTaskQueueInfo + 74, // 42: temporal.server.api.deployment.v1.SyncVersionStateUpdateArgs.routing_update_time:type_name -> google.protobuf.Timestamp + 74, // 43: temporal.server.api.deployment.v1.SyncVersionStateUpdateArgs.current_since_time:type_name -> google.protobuf.Timestamp + 74, // 44: temporal.server.api.deployment.v1.SyncVersionStateUpdateArgs.ramping_since_time:type_name -> google.protobuf.Timestamp + 79, // 45: temporal.server.api.deployment.v1.SyncVersionStateUpdateArgs.routing_config:type_name -> temporal.api.deployment.v1.RoutingConfig + 3, // 46: temporal.server.api.deployment.v1.SyncVersionStateResponse.version_state:type_name -> temporal.server.api.deployment.v1.VersionLocalState + 9, // 47: temporal.server.api.deployment.v1.SyncVersionStateResponse.summary:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary + 74, // 48: temporal.server.api.deployment.v1.AddVersionUpdateArgs.create_time:type_name -> google.protobuf.Timestamp + 76, // 49: temporal.server.api.deployment.v1.SyncDrainageInfoSignalArgs.drainage_info:type_name -> temporal.api.deployment.v1.VersionDrainageInfo + 80, // 50: temporal.server.api.deployment.v1.SyncDrainageStatusSignalArgs.drainage_status:type_name -> temporal.api.enums.v1.VersionDrainageStatus + 3, // 51: temporal.server.api.deployment.v1.QueryDescribeVersionResponse.version_state:type_name -> temporal.server.api.deployment.v1.VersionLocalState + 7, // 52: temporal.server.api.deployment.v1.QueryDescribeWorkerDeploymentResponse.state:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentLocalState + 78, // 53: temporal.server.api.deployment.v1.StartWorkerDeploymentVersionRequest.compute_config:type_name -> temporal.api.compute.v1.ComputeConfigSummary + 0, // 54: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 64, // 55: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.sync:type_name -> temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.SyncUserData + 79, // 56: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.update_routing_config:type_name -> temporal.api.deployment.v1.RoutingConfig + 2, // 57: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.upsert_version_data:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersionData + 65, // 58: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataResponse.task_queue_max_versions:type_name -> temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataResponse.TaskQueueMaxVersionsEntry + 66, // 59: temporal.server.api.deployment.v1.CheckWorkerDeploymentUserDataPropagationRequest.task_queue_max_versions:type_name -> temporal.server.api.deployment.v1.CheckWorkerDeploymentUserDataPropagationRequest.TaskQueueMaxVersionsEntry + 14, // 60: temporal.server.api.deployment.v1.SyncUnversionedRampActivityArgs.update_args:type_name -> temporal.server.api.deployment.v1.SyncVersionStateUpdateArgs + 67, // 61: temporal.server.api.deployment.v1.SyncUnversionedRampActivityResponse.task_queue_max_versions:type_name -> temporal.server.api.deployment.v1.SyncUnversionedRampActivityResponse.TaskQueueMaxVersionsEntry + 68, // 62: temporal.server.api.deployment.v1.UpdateVersionMetadataArgs.upsert_entries:type_name -> temporal.server.api.deployment.v1.UpdateVersionMetadataArgs.UpsertEntriesEntry + 77, // 63: temporal.server.api.deployment.v1.UpdateVersionMetadataResponse.metadata:type_name -> temporal.api.deployment.v1.VersionMetadata + 83, // 64: temporal.server.api.deployment.v1.CreateWorkerDeploymentVersionArgs.compute_config:type_name -> temporal.api.compute.v1.ComputeConfig + 69, // 65: temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.task_queues_and_types:type_name -> temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.TaskQueuesAndTypesEntry + 0, // 66: temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.worker_deployment_version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 14, // 67: temporal.server.api.deployment.v1.SyncVersionStateActivityArgs.update_args:type_name -> temporal.server.api.deployment.v1.SyncVersionStateUpdateArgs + 3, // 68: temporal.server.api.deployment.v1.SyncVersionStateActivityResult.version_state:type_name -> temporal.server.api.deployment.v1.VersionLocalState + 9, // 69: temporal.server.api.deployment.v1.SyncVersionStateActivityResult.summary:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary + 74, // 70: temporal.server.api.deployment.v1.WorkerDeploymentWorkflowMemo.create_time:type_name -> google.protobuf.Timestamp + 79, // 71: temporal.server.api.deployment.v1.WorkerDeploymentWorkflowMemo.routing_config:type_name -> temporal.api.deployment.v1.RoutingConfig + 84, // 72: temporal.server.api.deployment.v1.WorkerDeploymentWorkflowMemo.latest_version_summary:type_name -> temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummary + 84, // 73: temporal.server.api.deployment.v1.WorkerDeploymentWorkflowMemo.current_version_summary:type_name -> temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummary + 84, // 74: temporal.server.api.deployment.v1.WorkerDeploymentWorkflowMemo.ramping_version_summary:type_name -> temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummary + 74, // 75: temporal.server.api.deployment.v1.WorkerDeploymentSummary.create_time:type_name -> google.protobuf.Timestamp + 79, // 76: temporal.server.api.deployment.v1.WorkerDeploymentSummary.routing_config:type_name -> temporal.api.deployment.v1.RoutingConfig + 84, // 77: temporal.server.api.deployment.v1.WorkerDeploymentSummary.latest_version_summary:type_name -> temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummary + 84, // 78: temporal.server.api.deployment.v1.WorkerDeploymentSummary.current_version_summary:type_name -> temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummary + 84, // 79: temporal.server.api.deployment.v1.WorkerDeploymentSummary.ramping_version_summary:type_name -> temporal.api.deployment.v1.WorkerDeploymentInfo.WorkerDeploymentVersionSummary + 71, // 80: temporal.server.api.deployment.v1.ValidateWorkerControllerInstanceSpecInput.scaling_groups:type_name -> temporal.server.api.deployment.v1.ValidateWorkerControllerInstanceSpecInput.ScalingGroupsEntry + 85, // 81: temporal.server.api.deployment.v1.UpdateWorkerControllerInstanceInput.version:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersion + 72, // 82: temporal.server.api.deployment.v1.UpdateWorkerControllerInstanceInput.upsert_scaling_groups:type_name -> temporal.server.api.deployment.v1.UpdateWorkerControllerInstanceInput.UpsertScalingGroupsEntry + 85, // 83: temporal.server.api.deployment.v1.DeleteWorkerControllerInstanceInput.version:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersion + 73, // 84: temporal.server.api.deployment.v1.UpdateComputeConfigArgs.upsert_scaling_groups:type_name -> temporal.server.api.deployment.v1.UpdateComputeConfigArgs.UpsertScalingGroupsEntry + 7, // 85: temporal.server.api.deployment.v1.ForceCANDeploymentSignalArgs.override_state:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentLocalState + 3, // 86: temporal.server.api.deployment.v1.ForceCANVersionSignalArgs.override_state:type_name -> temporal.server.api.deployment.v1.VersionLocalState + 60, // 87: temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamiliesEntry.value:type_name -> temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamilyData + 61, // 88: temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamilyData.task_queues:type_name -> temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamilyData.TaskQueuesEntry + 4, // 89: temporal.server.api.deployment.v1.VersionLocalState.TaskQueueFamilyData.TaskQueuesEntry.value:type_name -> temporal.server.api.deployment.v1.TaskQueueVersionData + 9, // 90: temporal.server.api.deployment.v1.WorkerDeploymentLocalState.VersionsEntry.value:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersionSummary + 8, // 91: temporal.server.api.deployment.v1.WorkerDeploymentLocalState.PropagatingRevisionsEntry.value:type_name -> temporal.server.api.deployment.v1.PropagatingRevisions + 81, // 92: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.SyncUserData.types:type_name -> temporal.api.enums.v1.TaskQueueType + 1, // 93: temporal.server.api.deployment.v1.SyncDeploymentVersionUserDataRequest.SyncUserData.data:type_name -> temporal.server.api.deployment.v1.DeploymentVersionData + 86, // 94: temporal.server.api.deployment.v1.UpdateVersionMetadataArgs.UpsertEntriesEntry.value:type_name -> temporal.api.common.v1.Payload + 70, // 95: temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.TaskQueuesAndTypesEntry.value:type_name -> temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.TaskQueueTypes + 81, // 96: temporal.server.api.deployment.v1.CheckTaskQueuesHavePollersActivityArgs.TaskQueueTypes.types:type_name -> temporal.api.enums.v1.TaskQueueType + 87, // 97: temporal.server.api.deployment.v1.ValidateWorkerControllerInstanceSpecInput.ScalingGroupsEntry.value:type_name -> temporal.api.compute.v1.ComputeConfigScalingGroup + 88, // 98: temporal.server.api.deployment.v1.UpdateWorkerControllerInstanceInput.UpsertScalingGroupsEntry.value:type_name -> temporal.api.compute.v1.ComputeConfigScalingGroupUpdate + 88, // 99: temporal.server.api.deployment.v1.UpdateComputeConfigArgs.UpsertScalingGroupsEntry.value:type_name -> temporal.api.compute.v1.ComputeConfigScalingGroupUpdate + 100, // [100:100] is the sub-list for method output_type + 100, // [100:100] is the sub-list for method input_type + 100, // [100:100] is the sub-list for extension type_name + 100, // [100:100] is the sub-list for extension extendee + 0, // [0:100] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_deployment_v1_message_proto_init() } +func file_temporal_server_api_deployment_v1_message_proto_init() { + if File_temporal_server_api_deployment_v1_message_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_deployment_v1_message_proto_rawDesc), len(file_temporal_server_api_deployment_v1_message_proto_rawDesc)), + NumEnums: 0, + NumMessages: 74, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_deployment_v1_message_proto_goTypes, + DependencyIndexes: file_temporal_server_api_deployment_v1_message_proto_depIdxs, + MessageInfos: file_temporal_server_api_deployment_v1_message_proto_msgTypes, + }.Build() + File_temporal_server_api_deployment_v1_message_proto = out.File + file_temporal_server_api_deployment_v1_message_proto_goTypes = nil + file_temporal_server_api_deployment_v1_message_proto_depIdxs = nil +} diff --git a/api/enums/v1/cluster.go-helpers.pb.go b/api/enums/v1/cluster.go-helpers.pb.go index f1c3773120d..fcb46ddaa75 100644 --- a/api/enums/v1/cluster.go-helpers.pb.go +++ b/api/enums/v1/cluster.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package enums @@ -49,3 +25,24 @@ func ClusterMemberRoleFromString(s string) (ClusterMemberRole, error) { } return ClusterMemberRole(0), fmt.Errorf("%s is not a valid ClusterMemberRole", s) } + +var ( + HealthState_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Serving": 1, + "NotServing": 2, + "DeclinedServing": 3, + "InternalError": 4, + } +) + +// HealthStateFromString parses a HealthState value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to HealthState +func HealthStateFromString(s string) (HealthState, error) { + if v, ok := HealthState_value[s]; ok { + return HealthState(v), nil + } else if v, ok := HealthState_shorthandValue[s]; ok { + return HealthState(v), nil + } + return HealthState(0), fmt.Errorf("%s is not a valid HealthState", s) +} diff --git a/api/enums/v1/cluster.pb.go b/api/enums/v1/cluster.pb.go index 226154bb1d5..ff2b2ff84b9 100644 --- a/api/enums/v1/cluster.pb.go +++ b/api/enums/v1/cluster.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2021 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,6 +10,7 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -113,46 +92,113 @@ func (ClusterMemberRole) EnumDescriptor() ([]byte, []int) { return file_temporal_server_api_enums_v1_cluster_proto_rawDescGZIP(), []int{0} } -var File_temporal_server_api_enums_v1_cluster_proto protoreflect.FileDescriptor +type HealthState int32 + +const ( + HEALTH_STATE_UNSPECIFIED HealthState = 0 + // The host is in a healthy state. + HEALTH_STATE_SERVING HealthState = 1 + // The host is unhealthy through external observation. + HEALTH_STATE_NOT_SERVING HealthState = 2 + // The host has marked itself as not ready to serve traffic. + HEALTH_STATE_DECLINED_SERVING HealthState = 3 + // An internal error occurred while checking health (e.g. resolver failure). + HEALTH_STATE_INTERNAL_ERROR HealthState = 4 +) + +// Enum value maps for HealthState. +var ( + HealthState_name = map[int32]string{ + 0: "HEALTH_STATE_UNSPECIFIED", + 1: "HEALTH_STATE_SERVING", + 2: "HEALTH_STATE_NOT_SERVING", + 3: "HEALTH_STATE_DECLINED_SERVING", + 4: "HEALTH_STATE_INTERNAL_ERROR", + } + HealthState_value = map[string]int32{ + "HEALTH_STATE_UNSPECIFIED": 0, + "HEALTH_STATE_SERVING": 1, + "HEALTH_STATE_NOT_SERVING": 2, + "HEALTH_STATE_DECLINED_SERVING": 3, + "HEALTH_STATE_INTERNAL_ERROR": 4, + } +) + +func (x HealthState) Enum() *HealthState { + p := new(HealthState) + *p = x + return p +} + +func (x HealthState) String() string { + switch x { + case HEALTH_STATE_UNSPECIFIED: + return "Unspecified" + case HEALTH_STATE_SERVING: + return "Serving" + case HEALTH_STATE_NOT_SERVING: + return "NotServing" + case HEALTH_STATE_DECLINED_SERVING: + return "DeclinedServing" + case HEALTH_STATE_INTERNAL_ERROR: + return "InternalError" + default: + return strconv.Itoa(int(x)) + } + +} + +func (HealthState) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_enums_v1_cluster_proto_enumTypes[1].Descriptor() +} -var file_temporal_server_api_enums_v1_cluster_proto_rawDesc = []byte{ - 0x0a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2a, 0xbd, 0x01, 0x0a, 0x11, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x52, 0x6f, 0x6c, 0x65, - 0x12, 0x23, 0x0a, 0x1f, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x4d, 0x42, - 0x45, 0x52, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1c, 0x43, 0x4c, 0x55, 0x53, 0x54, 0x45, 0x52, - 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x46, 0x52, 0x4f, - 0x4e, 0x54, 0x45, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4c, 0x55, 0x53, 0x54, - 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, 0x48, - 0x49, 0x53, 0x54, 0x4f, 0x52, 0x59, 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x43, 0x4c, 0x55, 0x53, - 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x52, 0x4f, 0x4c, 0x45, 0x5f, - 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x1e, 0x0a, 0x1a, 0x43, 0x4c, - 0x55, 0x53, 0x54, 0x45, 0x52, 0x5f, 0x4d, 0x45, 0x4d, 0x42, 0x45, 0x52, 0x5f, 0x52, 0x4f, 0x4c, - 0x45, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x45, 0x52, 0x10, 0x04, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x6f, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, - 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (HealthState) Type() protoreflect.EnumType { + return &file_temporal_server_api_enums_v1_cluster_proto_enumTypes[1] } +func (x HealthState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use HealthState.Descriptor instead. +func (HealthState) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_enums_v1_cluster_proto_rawDescGZIP(), []int{1} +} + +var File_temporal_server_api_enums_v1_cluster_proto protoreflect.FileDescriptor + +const file_temporal_server_api_enums_v1_cluster_proto_rawDesc = "" + + "\n" + + "*temporal/server/api/enums/v1/cluster.proto\x12\x1ctemporal.server.api.enums.v1*\xbd\x01\n" + + "\x11ClusterMemberRole\x12#\n" + + "\x1fCLUSTER_MEMBER_ROLE_UNSPECIFIED\x10\x00\x12 \n" + + "\x1cCLUSTER_MEMBER_ROLE_FRONTEND\x10\x01\x12\x1f\n" + + "\x1bCLUSTER_MEMBER_ROLE_HISTORY\x10\x02\x12 \n" + + "\x1cCLUSTER_MEMBER_ROLE_MATCHING\x10\x03\x12\x1e\n" + + "\x1aCLUSTER_MEMBER_ROLE_WORKER\x10\x04*\xa7\x01\n" + + "\vHealthState\x12\x1c\n" + + "\x18HEALTH_STATE_UNSPECIFIED\x10\x00\x12\x18\n" + + "\x14HEALTH_STATE_SERVING\x10\x01\x12\x1c\n" + + "\x18HEALTH_STATE_NOT_SERVING\x10\x02\x12!\n" + + "\x1dHEALTH_STATE_DECLINED_SERVING\x10\x03\x12\x1f\n" + + "\x1bHEALTH_STATE_INTERNAL_ERROR\x10\x04B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" + var ( file_temporal_server_api_enums_v1_cluster_proto_rawDescOnce sync.Once - file_temporal_server_api_enums_v1_cluster_proto_rawDescData = file_temporal_server_api_enums_v1_cluster_proto_rawDesc + file_temporal_server_api_enums_v1_cluster_proto_rawDescData []byte ) func file_temporal_server_api_enums_v1_cluster_proto_rawDescGZIP() []byte { file_temporal_server_api_enums_v1_cluster_proto_rawDescOnce.Do(func() { - file_temporal_server_api_enums_v1_cluster_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_enums_v1_cluster_proto_rawDescData) + file_temporal_server_api_enums_v1_cluster_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_cluster_proto_rawDesc), len(file_temporal_server_api_enums_v1_cluster_proto_rawDesc))) }) return file_temporal_server_api_enums_v1_cluster_proto_rawDescData } -var file_temporal_server_api_enums_v1_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_temporal_server_api_enums_v1_cluster_proto_goTypes = []interface{}{ +var file_temporal_server_api_enums_v1_cluster_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_temporal_server_api_enums_v1_cluster_proto_goTypes = []any{ (ClusterMemberRole)(0), // 0: temporal.server.api.enums.v1.ClusterMemberRole + (HealthState)(0), // 1: temporal.server.api.enums.v1.HealthState } var file_temporal_server_api_enums_v1_cluster_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -171,8 +217,8 @@ func file_temporal_server_api_enums_v1_cluster_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_enums_v1_cluster_proto_rawDesc, - NumEnums: 1, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_cluster_proto_rawDesc), len(file_temporal_server_api_enums_v1_cluster_proto_rawDesc)), + NumEnums: 2, NumMessages: 0, NumExtensions: 0, NumServices: 0, @@ -182,7 +228,6 @@ func file_temporal_server_api_enums_v1_cluster_proto_init() { EnumInfos: file_temporal_server_api_enums_v1_cluster_proto_enumTypes, }.Build() File_temporal_server_api_enums_v1_cluster_proto = out.File - file_temporal_server_api_enums_v1_cluster_proto_rawDesc = nil file_temporal_server_api_enums_v1_cluster_proto_goTypes = nil file_temporal_server_api_enums_v1_cluster_proto_depIdxs = nil } diff --git a/api/enums/v1/common.go-helpers.pb.go b/api/enums/v1/common.go-helpers.pb.go index 34bc18d820a..1b4a91f46b8 100644 --- a/api/enums/v1/common.go-helpers.pb.go +++ b/api/enums/v1/common.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package enums @@ -65,3 +41,25 @@ func ChecksumFlavorFromString(s string) (ChecksumFlavor, error) { } return ChecksumFlavor(0), fmt.Errorf("%s is not a valid ChecksumFlavor", s) } + +var ( + CallbackState_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Standby": 1, + "Scheduled": 2, + "BackingOff": 3, + "Failed": 4, + "Succeeded": 5, + } +) + +// CallbackStateFromString parses a CallbackState value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to CallbackState +func CallbackStateFromString(s string) (CallbackState, error) { + if v, ok := CallbackState_value[s]; ok { + return CallbackState(v), nil + } else if v, ok := CallbackState_shorthandValue[s]; ok { + return CallbackState(v), nil + } + return CallbackState(0), fmt.Errorf("%s is not a valid CallbackState", s) +} diff --git a/api/enums/v1/common.pb.go b/api/enums/v1/common.pb.go index 0b97f595761..52fe73a1f0a 100644 --- a/api/enums/v1/common.pb.go +++ b/api/enums/v1/common.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,6 +10,7 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -157,51 +136,124 @@ func (ChecksumFlavor) EnumDescriptor() ([]byte, []int) { return file_temporal_server_api_enums_v1_common_proto_rawDescGZIP(), []int{1} } -var File_temporal_server_api_enums_v1_common_proto protoreflect.FileDescriptor +// State of a callback. +type CallbackState int32 + +const ( + // Default value, unspecified state. + CALLBACK_STATE_UNSPECIFIED CallbackState = 0 + // Callback is standing by, waiting to be triggered. + CALLBACK_STATE_STANDBY CallbackState = 1 + // Callback is in the queue waiting to be executed or is currently executing. + CALLBACK_STATE_SCHEDULED CallbackState = 2 + // Callback has failed with a retryable error and is backing off before the next attempt. + CALLBACK_STATE_BACKING_OFF CallbackState = 3 + // Callback has failed. + CALLBACK_STATE_FAILED CallbackState = 4 + // Callback has succeeded. + CALLBACK_STATE_SUCCEEDED CallbackState = 5 +) + +// Enum value maps for CallbackState. +var ( + CallbackState_name = map[int32]string{ + 0: "CALLBACK_STATE_UNSPECIFIED", + 1: "CALLBACK_STATE_STANDBY", + 2: "CALLBACK_STATE_SCHEDULED", + 3: "CALLBACK_STATE_BACKING_OFF", + 4: "CALLBACK_STATE_FAILED", + 5: "CALLBACK_STATE_SUCCEEDED", + } + CallbackState_value = map[string]int32{ + "CALLBACK_STATE_UNSPECIFIED": 0, + "CALLBACK_STATE_STANDBY": 1, + "CALLBACK_STATE_SCHEDULED": 2, + "CALLBACK_STATE_BACKING_OFF": 3, + "CALLBACK_STATE_FAILED": 4, + "CALLBACK_STATE_SUCCEEDED": 5, + } +) + +func (x CallbackState) Enum() *CallbackState { + p := new(CallbackState) + *p = x + return p +} + +func (x CallbackState) String() string { + switch x { + case CALLBACK_STATE_UNSPECIFIED: + return "Unspecified" + case CALLBACK_STATE_STANDBY: + return "Standby" + case CALLBACK_STATE_SCHEDULED: + return "Scheduled" + case CALLBACK_STATE_BACKING_OFF: + return "BackingOff" + case CALLBACK_STATE_FAILED: + return "Failed" + case CALLBACK_STATE_SUCCEEDED: + return "Succeeded" + default: + return strconv.Itoa(int(x)) + } + +} + +func (CallbackState) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_enums_v1_common_proto_enumTypes[2].Descriptor() +} -var file_temporal_server_api_enums_v1_common_proto_rawDesc = []byte{ - 0x0a, 0x29, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2a, 0x8b, 0x01, 0x0a, 0x13, 0x44, 0x65, - 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x26, 0x0a, 0x22, 0x44, 0x45, 0x41, 0x44, 0x5f, 0x4c, 0x45, 0x54, 0x54, 0x45, 0x52, - 0x5f, 0x51, 0x55, 0x45, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x26, 0x0a, 0x22, 0x44, 0x45, 0x41, - 0x44, 0x5f, 0x4c, 0x45, 0x54, 0x54, 0x45, 0x52, 0x5f, 0x51, 0x55, 0x45, 0x55, 0x45, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, - 0x01, 0x12, 0x24, 0x0a, 0x20, 0x44, 0x45, 0x41, 0x44, 0x5f, 0x4c, 0x45, 0x54, 0x54, 0x45, 0x52, - 0x5f, 0x51, 0x55, 0x45, 0x55, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, - 0x53, 0x50, 0x41, 0x43, 0x45, 0x10, 0x02, 0x2a, 0x64, 0x0a, 0x0e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x46, 0x6c, 0x61, 0x76, 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x48, 0x45, - 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x56, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x48, - 0x45, 0x43, 0x4b, 0x53, 0x55, 0x4d, 0x5f, 0x46, 0x4c, 0x41, 0x56, 0x4f, 0x52, 0x5f, 0x49, 0x45, - 0x45, 0x45, 0x5f, 0x43, 0x52, 0x43, 0x33, 0x32, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x5f, 0x50, 0x52, - 0x4f, 0x54, 0x4f, 0x33, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x01, 0x42, 0x2a, 0x5a, - 0x28, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, - 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, +func (CallbackState) Type() protoreflect.EnumType { + return &file_temporal_server_api_enums_v1_common_proto_enumTypes[2] } +func (x CallbackState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CallbackState.Descriptor instead. +func (CallbackState) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_enums_v1_common_proto_rawDescGZIP(), []int{2} +} + +var File_temporal_server_api_enums_v1_common_proto protoreflect.FileDescriptor + +const file_temporal_server_api_enums_v1_common_proto_rawDesc = "" + + "\n" + + ")temporal/server/api/enums/v1/common.proto\x12\x1ctemporal.server.api.enums.v1*\x8b\x01\n" + + "\x13DeadLetterQueueType\x12&\n" + + "\"DEAD_LETTER_QUEUE_TYPE_UNSPECIFIED\x10\x00\x12&\n" + + "\"DEAD_LETTER_QUEUE_TYPE_REPLICATION\x10\x01\x12$\n" + + " DEAD_LETTER_QUEUE_TYPE_NAMESPACE\x10\x02*d\n" + + "\x0eChecksumFlavor\x12\x1f\n" + + "\x1bCHECKSUM_FLAVOR_UNSPECIFIED\x10\x00\x121\n" + + "-CHECKSUM_FLAVOR_IEEE_CRC32_OVER_PROTO3_BINARY\x10\x01*\xc2\x01\n" + + "\rCallbackState\x12\x1e\n" + + "\x1aCALLBACK_STATE_UNSPECIFIED\x10\x00\x12\x1a\n" + + "\x16CALLBACK_STATE_STANDBY\x10\x01\x12\x1c\n" + + "\x18CALLBACK_STATE_SCHEDULED\x10\x02\x12\x1e\n" + + "\x1aCALLBACK_STATE_BACKING_OFF\x10\x03\x12\x19\n" + + "\x15CALLBACK_STATE_FAILED\x10\x04\x12\x1c\n" + + "\x18CALLBACK_STATE_SUCCEEDED\x10\x05B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" + var ( file_temporal_server_api_enums_v1_common_proto_rawDescOnce sync.Once - file_temporal_server_api_enums_v1_common_proto_rawDescData = file_temporal_server_api_enums_v1_common_proto_rawDesc + file_temporal_server_api_enums_v1_common_proto_rawDescData []byte ) func file_temporal_server_api_enums_v1_common_proto_rawDescGZIP() []byte { file_temporal_server_api_enums_v1_common_proto_rawDescOnce.Do(func() { - file_temporal_server_api_enums_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_enums_v1_common_proto_rawDescData) + file_temporal_server_api_enums_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_common_proto_rawDesc), len(file_temporal_server_api_enums_v1_common_proto_rawDesc))) }) return file_temporal_server_api_enums_v1_common_proto_rawDescData } -var file_temporal_server_api_enums_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_temporal_server_api_enums_v1_common_proto_goTypes = []interface{}{ +var file_temporal_server_api_enums_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_temporal_server_api_enums_v1_common_proto_goTypes = []any{ (DeadLetterQueueType)(0), // 0: temporal.server.api.enums.v1.DeadLetterQueueType (ChecksumFlavor)(0), // 1: temporal.server.api.enums.v1.ChecksumFlavor + (CallbackState)(0), // 2: temporal.server.api.enums.v1.CallbackState } var file_temporal_server_api_enums_v1_common_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -220,8 +272,8 @@ func file_temporal_server_api_enums_v1_common_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_enums_v1_common_proto_rawDesc, - NumEnums: 2, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_common_proto_rawDesc), len(file_temporal_server_api_enums_v1_common_proto_rawDesc)), + NumEnums: 3, NumMessages: 0, NumExtensions: 0, NumServices: 0, @@ -231,7 +283,6 @@ func file_temporal_server_api_enums_v1_common_proto_init() { EnumInfos: file_temporal_server_api_enums_v1_common_proto_enumTypes, }.Build() File_temporal_server_api_enums_v1_common_proto = out.File - file_temporal_server_api_enums_v1_common_proto_rawDesc = nil file_temporal_server_api_enums_v1_common_proto_goTypes = nil file_temporal_server_api_enums_v1_common_proto_depIdxs = nil } diff --git a/api/enums/v1/dlq.go-helpers.pb.go b/api/enums/v1/dlq.go-helpers.pb.go index e980cdb3b25..2fc3ac6434b 100644 --- a/api/enums/v1/dlq.go-helpers.pb.go +++ b/api/enums/v1/dlq.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package enums diff --git a/api/enums/v1/dlq.pb.go b/api/enums/v1/dlq.pb.go index 76c3bf8b393..250d7073edf 100644 --- a/api/enums/v1/dlq.pb.go +++ b/api/enums/v1/dlq.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,6 +10,7 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -169,48 +148,33 @@ func (DLQOperationState) EnumDescriptor() ([]byte, []int) { var File_temporal_server_api_enums_v1_dlq_proto protoreflect.FileDescriptor -var file_temporal_server_api_enums_v1_dlq_proto_rawDesc = []byte{ - 0x0a, 0x26, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x64, - 0x6c, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2a, 0x72, 0x0a, 0x10, 0x44, 0x4c, 0x51, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x1e, 0x44, 0x4c, - 0x51, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, - 0x0a, 0x18, 0x44, 0x4c, 0x51, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x52, 0x47, 0x45, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, - 0x44, 0x4c, 0x51, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x50, 0x55, 0x52, 0x47, 0x45, 0x10, 0x02, 0x2a, 0x9c, 0x01, 0x0a, 0x11, 0x44, - 0x4c, 0x51, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4c, 0x51, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x44, 0x4c, 0x51, 0x5f, 0x4f, 0x50, 0x45, - 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, - 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x44, 0x4c, 0x51, 0x5f, 0x4f, 0x50, - 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, - 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x44, 0x4c, 0x51, - 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x03, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x6f, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x3b, - 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_enums_v1_dlq_proto_rawDesc = "" + + "\n" + + "&temporal/server/api/enums/v1/dlq.proto\x12\x1ctemporal.server.api.enums.v1*r\n" + + "\x10DLQOperationType\x12\"\n" + + "\x1eDLQ_OPERATION_TYPE_UNSPECIFIED\x10\x00\x12\x1c\n" + + "\x18DLQ_OPERATION_TYPE_MERGE\x10\x01\x12\x1c\n" + + "\x18DLQ_OPERATION_TYPE_PURGE\x10\x02*\x9c\x01\n" + + "\x11DLQOperationState\x12#\n" + + "\x1fDLQ_OPERATION_STATE_UNSPECIFIED\x10\x00\x12\x1f\n" + + "\x1bDLQ_OPERATION_STATE_RUNNING\x10\x01\x12!\n" + + "\x1dDLQ_OPERATION_STATE_COMPLETED\x10\x02\x12\x1e\n" + + "\x1aDLQ_OPERATION_STATE_FAILED\x10\x03B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" var ( file_temporal_server_api_enums_v1_dlq_proto_rawDescOnce sync.Once - file_temporal_server_api_enums_v1_dlq_proto_rawDescData = file_temporal_server_api_enums_v1_dlq_proto_rawDesc + file_temporal_server_api_enums_v1_dlq_proto_rawDescData []byte ) func file_temporal_server_api_enums_v1_dlq_proto_rawDescGZIP() []byte { file_temporal_server_api_enums_v1_dlq_proto_rawDescOnce.Do(func() { - file_temporal_server_api_enums_v1_dlq_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_enums_v1_dlq_proto_rawDescData) + file_temporal_server_api_enums_v1_dlq_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_dlq_proto_rawDesc), len(file_temporal_server_api_enums_v1_dlq_proto_rawDesc))) }) return file_temporal_server_api_enums_v1_dlq_proto_rawDescData } var file_temporal_server_api_enums_v1_dlq_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_temporal_server_api_enums_v1_dlq_proto_goTypes = []interface{}{ +var file_temporal_server_api_enums_v1_dlq_proto_goTypes = []any{ (DLQOperationType)(0), // 0: temporal.server.api.enums.v1.DLQOperationType (DLQOperationState)(0), // 1: temporal.server.api.enums.v1.DLQOperationState } @@ -231,7 +195,7 @@ func file_temporal_server_api_enums_v1_dlq_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_enums_v1_dlq_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_dlq_proto_rawDesc), len(file_temporal_server_api_enums_v1_dlq_proto_rawDesc)), NumEnums: 2, NumMessages: 0, NumExtensions: 0, @@ -242,7 +206,6 @@ func file_temporal_server_api_enums_v1_dlq_proto_init() { EnumInfos: file_temporal_server_api_enums_v1_dlq_proto_enumTypes, }.Build() File_temporal_server_api_enums_v1_dlq_proto = out.File - file_temporal_server_api_enums_v1_dlq_proto_rawDesc = nil file_temporal_server_api_enums_v1_dlq_proto_goTypes = nil file_temporal_server_api_enums_v1_dlq_proto_depIdxs = nil } diff --git a/api/enums/v1/fairness_state.go-helpers.pb.go b/api/enums/v1/fairness_state.go-helpers.pb.go new file mode 100644 index 00000000000..61e0b929129 --- /dev/null +++ b/api/enums/v1/fairness_state.go-helpers.pb.go @@ -0,0 +1,26 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package enums + +import ( + "fmt" +) + +var ( + FairnessState_shorthandValue = map[string]int32{ + "Unspecified": 0, + "V0": 1, + "V1": 2, + "V2": 3, + } +) + +// FairnessStateFromString parses a FairnessState value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to FairnessState +func FairnessStateFromString(s string) (FairnessState, error) { + if v, ok := FairnessState_value[s]; ok { + return FairnessState(v), nil + } else if v, ok := FairnessState_shorthandValue[s]; ok { + return FairnessState(v), nil + } + return FairnessState(0), fmt.Errorf("%s is not a valid FairnessState", s) +} diff --git a/api/enums/v1/fairness_state.pb.go b/api/enums/v1/fairness_state.pb.go new file mode 100644 index 00000000000..bc700ad67a4 --- /dev/null +++ b/api/enums/v1/fairness_state.pb.go @@ -0,0 +1,147 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/enums/v1/fairness_state.proto + +package enums + +import ( + reflect "reflect" + "strconv" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FairnessState int32 + +const ( + FAIRNESS_STATE_UNSPECIFIED FairnessState = 0 + FAIRNESS_STATE_V0 FairnessState = 1 + FAIRNESS_STATE_V1 FairnessState = 2 + FAIRNESS_STATE_V2 FairnessState = 3 +) + +// Enum value maps for FairnessState. +var ( + FairnessState_name = map[int32]string{ + 0: "FAIRNESS_STATE_UNSPECIFIED", + 1: "FAIRNESS_STATE_V0", + 2: "FAIRNESS_STATE_V1", + 3: "FAIRNESS_STATE_V2", + } + FairnessState_value = map[string]int32{ + "FAIRNESS_STATE_UNSPECIFIED": 0, + "FAIRNESS_STATE_V0": 1, + "FAIRNESS_STATE_V1": 2, + "FAIRNESS_STATE_V2": 3, + } +) + +func (x FairnessState) Enum() *FairnessState { + p := new(FairnessState) + *p = x + return p +} + +func (x FairnessState) String() string { + switch x { + case FAIRNESS_STATE_UNSPECIFIED: + return "Unspecified" + case FAIRNESS_STATE_V0: + return "V0" + case FAIRNESS_STATE_V1: + return "V1" + case FAIRNESS_STATE_V2: + return "V2" + default: + return strconv.Itoa(int(x)) + } + +} + +func (FairnessState) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_enums_v1_fairness_state_proto_enumTypes[0].Descriptor() +} + +func (FairnessState) Type() protoreflect.EnumType { + return &file_temporal_server_api_enums_v1_fairness_state_proto_enumTypes[0] +} + +func (x FairnessState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FairnessState.Descriptor instead. +func (FairnessState) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_enums_v1_fairness_state_proto_rawDescGZIP(), []int{0} +} + +var File_temporal_server_api_enums_v1_fairness_state_proto protoreflect.FileDescriptor + +const file_temporal_server_api_enums_v1_fairness_state_proto_rawDesc = "" + + "\n" + + "1temporal/server/api/enums/v1/fairness_state.proto\x12\x1ctemporal.server.api.enums.v1*t\n" + + "\rFairnessState\x12\x1e\n" + + "\x1aFAIRNESS_STATE_UNSPECIFIED\x10\x00\x12\x15\n" + + "\x11FAIRNESS_STATE_V0\x10\x01\x12\x15\n" + + "\x11FAIRNESS_STATE_V1\x10\x02\x12\x15\n" + + "\x11FAIRNESS_STATE_V2\x10\x03B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" + +var ( + file_temporal_server_api_enums_v1_fairness_state_proto_rawDescOnce sync.Once + file_temporal_server_api_enums_v1_fairness_state_proto_rawDescData []byte +) + +func file_temporal_server_api_enums_v1_fairness_state_proto_rawDescGZIP() []byte { + file_temporal_server_api_enums_v1_fairness_state_proto_rawDescOnce.Do(func() { + file_temporal_server_api_enums_v1_fairness_state_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_fairness_state_proto_rawDesc), len(file_temporal_server_api_enums_v1_fairness_state_proto_rawDesc))) + }) + return file_temporal_server_api_enums_v1_fairness_state_proto_rawDescData +} + +var file_temporal_server_api_enums_v1_fairness_state_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_temporal_server_api_enums_v1_fairness_state_proto_goTypes = []any{ + (FairnessState)(0), // 0: temporal.server.api.enums.v1.FairnessState +} +var file_temporal_server_api_enums_v1_fairness_state_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_enums_v1_fairness_state_proto_init() } +func file_temporal_server_api_enums_v1_fairness_state_proto_init() { + if File_temporal_server_api_enums_v1_fairness_state_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_fairness_state_proto_rawDesc), len(file_temporal_server_api_enums_v1_fairness_state_proto_rawDesc)), + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_enums_v1_fairness_state_proto_goTypes, + DependencyIndexes: file_temporal_server_api_enums_v1_fairness_state_proto_depIdxs, + EnumInfos: file_temporal_server_api_enums_v1_fairness_state_proto_enumTypes, + }.Build() + File_temporal_server_api_enums_v1_fairness_state_proto = out.File + file_temporal_server_api_enums_v1_fairness_state_proto_goTypes = nil + file_temporal_server_api_enums_v1_fairness_state_proto_depIdxs = nil +} diff --git a/api/enums/v1/nexus.go-helpers.pb.go b/api/enums/v1/nexus.go-helpers.pb.go new file mode 100644 index 00000000000..5501dc93e31 --- /dev/null +++ b/api/enums/v1/nexus.go-helpers.pb.go @@ -0,0 +1,30 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package enums + +import ( + "fmt" +) + +var ( + NexusOperationState_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Scheduled": 1, + "BackingOff": 2, + "Started": 3, + "Succeeded": 4, + "Failed": 5, + "Canceled": 6, + "TimedOut": 7, + } +) + +// NexusOperationStateFromString parses a NexusOperationState value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to NexusOperationState +func NexusOperationStateFromString(s string) (NexusOperationState, error) { + if v, ok := NexusOperationState_value[s]; ok { + return NexusOperationState(v), nil + } else if v, ok := NexusOperationState_shorthandValue[s]; ok { + return NexusOperationState(v), nil + } + return NexusOperationState(0), fmt.Errorf("%s is not a valid NexusOperationState", s) +} diff --git a/api/enums/v1/nexus.pb.go b/api/enums/v1/nexus.pb.go new file mode 100644 index 00000000000..64d8cbe6e0c --- /dev/null +++ b/api/enums/v1/nexus.pb.go @@ -0,0 +1,182 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/enums/v1/nexus.proto + +package enums + +import ( + reflect "reflect" + "strconv" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type NexusOperationState int32 + +const ( + // Default value, unspecified state. + NEXUS_OPERATION_STATE_UNSPECIFIED NexusOperationState = 0 + // Operation is in the queue waiting to be executed or is currently executing. + NEXUS_OPERATION_STATE_SCHEDULED NexusOperationState = 1 + // Operation has failed with a retryable error and is backing off before the next attempt. + NEXUS_OPERATION_STATE_BACKING_OFF NexusOperationState = 2 + // Operation was started and will complete asynchronously. + NEXUS_OPERATION_STATE_STARTED NexusOperationState = 3 + // Operation succeeded. + // This may happen either as a response to a start request or as reported via callback. + NEXUS_OPERATION_STATE_SUCCEEDED NexusOperationState = 4 + // Operation failed either when a start request encounters a non-retryable error or as reported via callback. + NEXUS_OPERATION_STATE_FAILED NexusOperationState = 5 + // Operation completed as canceled (may have not ever been delivered). + // This may happen either as a response to a start request or as reported via callback. + NEXUS_OPERATION_STATE_CANCELED NexusOperationState = 6 + // Operation timed out - exceeded the user supplied schedule-to-close timeout. + // Any attempts to complete the operation in this state will be ignored. + NEXUS_OPERATION_STATE_TIMED_OUT NexusOperationState = 7 +) + +// Enum value maps for NexusOperationState. +var ( + NexusOperationState_name = map[int32]string{ + 0: "NEXUS_OPERATION_STATE_UNSPECIFIED", + 1: "NEXUS_OPERATION_STATE_SCHEDULED", + 2: "NEXUS_OPERATION_STATE_BACKING_OFF", + 3: "NEXUS_OPERATION_STATE_STARTED", + 4: "NEXUS_OPERATION_STATE_SUCCEEDED", + 5: "NEXUS_OPERATION_STATE_FAILED", + 6: "NEXUS_OPERATION_STATE_CANCELED", + 7: "NEXUS_OPERATION_STATE_TIMED_OUT", + } + NexusOperationState_value = map[string]int32{ + "NEXUS_OPERATION_STATE_UNSPECIFIED": 0, + "NEXUS_OPERATION_STATE_SCHEDULED": 1, + "NEXUS_OPERATION_STATE_BACKING_OFF": 2, + "NEXUS_OPERATION_STATE_STARTED": 3, + "NEXUS_OPERATION_STATE_SUCCEEDED": 4, + "NEXUS_OPERATION_STATE_FAILED": 5, + "NEXUS_OPERATION_STATE_CANCELED": 6, + "NEXUS_OPERATION_STATE_TIMED_OUT": 7, + } +) + +func (x NexusOperationState) Enum() *NexusOperationState { + p := new(NexusOperationState) + *p = x + return p +} + +func (x NexusOperationState) String() string { + switch x { + case NEXUS_OPERATION_STATE_UNSPECIFIED: + return "Unspecified" + case NEXUS_OPERATION_STATE_SCHEDULED: + return "Scheduled" + case NEXUS_OPERATION_STATE_BACKING_OFF: + return "BackingOff" + case NEXUS_OPERATION_STATE_STARTED: + return "Started" + case NEXUS_OPERATION_STATE_SUCCEEDED: + return "Succeeded" + case NEXUS_OPERATION_STATE_FAILED: + return "Failed" + case NEXUS_OPERATION_STATE_CANCELED: + return "Canceled" + case NEXUS_OPERATION_STATE_TIMED_OUT: + return "TimedOut" + default: + return strconv.Itoa(int(x)) + } + +} + +func (NexusOperationState) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_enums_v1_nexus_proto_enumTypes[0].Descriptor() +} + +func (NexusOperationState) Type() protoreflect.EnumType { + return &file_temporal_server_api_enums_v1_nexus_proto_enumTypes[0] +} + +func (x NexusOperationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NexusOperationState.Descriptor instead. +func (NexusOperationState) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_enums_v1_nexus_proto_rawDescGZIP(), []int{0} +} + +var File_temporal_server_api_enums_v1_nexus_proto protoreflect.FileDescriptor + +const file_temporal_server_api_enums_v1_nexus_proto_rawDesc = "" + + "\n" + + "(temporal/server/api/enums/v1/nexus.proto\x12\x1ctemporal.server.api.enums.v1*\xbb\x02\n" + + "\x13NexusOperationState\x12%\n" + + "!NEXUS_OPERATION_STATE_UNSPECIFIED\x10\x00\x12#\n" + + "\x1fNEXUS_OPERATION_STATE_SCHEDULED\x10\x01\x12%\n" + + "!NEXUS_OPERATION_STATE_BACKING_OFF\x10\x02\x12!\n" + + "\x1dNEXUS_OPERATION_STATE_STARTED\x10\x03\x12#\n" + + "\x1fNEXUS_OPERATION_STATE_SUCCEEDED\x10\x04\x12 \n" + + "\x1cNEXUS_OPERATION_STATE_FAILED\x10\x05\x12\"\n" + + "\x1eNEXUS_OPERATION_STATE_CANCELED\x10\x06\x12#\n" + + "\x1fNEXUS_OPERATION_STATE_TIMED_OUT\x10\aB*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" + +var ( + file_temporal_server_api_enums_v1_nexus_proto_rawDescOnce sync.Once + file_temporal_server_api_enums_v1_nexus_proto_rawDescData []byte +) + +func file_temporal_server_api_enums_v1_nexus_proto_rawDescGZIP() []byte { + file_temporal_server_api_enums_v1_nexus_proto_rawDescOnce.Do(func() { + file_temporal_server_api_enums_v1_nexus_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_nexus_proto_rawDesc), len(file_temporal_server_api_enums_v1_nexus_proto_rawDesc))) + }) + return file_temporal_server_api_enums_v1_nexus_proto_rawDescData +} + +var file_temporal_server_api_enums_v1_nexus_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_temporal_server_api_enums_v1_nexus_proto_goTypes = []any{ + (NexusOperationState)(0), // 0: temporal.server.api.enums.v1.NexusOperationState +} +var file_temporal_server_api_enums_v1_nexus_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_enums_v1_nexus_proto_init() } +func file_temporal_server_api_enums_v1_nexus_proto_init() { + if File_temporal_server_api_enums_v1_nexus_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_nexus_proto_rawDesc), len(file_temporal_server_api_enums_v1_nexus_proto_rawDesc)), + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_enums_v1_nexus_proto_goTypes, + DependencyIndexes: file_temporal_server_api_enums_v1_nexus_proto_depIdxs, + EnumInfos: file_temporal_server_api_enums_v1_nexus_proto_enumTypes, + }.Build() + File_temporal_server_api_enums_v1_nexus_proto = out.File + file_temporal_server_api_enums_v1_nexus_proto_goTypes = nil + file_temporal_server_api_enums_v1_nexus_proto_depIdxs = nil +} diff --git a/api/enums/v1/predicate.go-helpers.pb.go b/api/enums/v1/predicate.go-helpers.pb.go index a79699e5367..b75581ea722 100644 --- a/api/enums/v1/predicate.go-helpers.pb.go +++ b/api/enums/v1/predicate.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package enums @@ -31,15 +7,17 @@ import ( var ( PredicateType_shorthandValue = map[string]int32{ - "Unspecified": 0, - "Universal": 1, - "Empty": 2, - "And": 3, - "Or": 4, - "Not": 5, - "NamespaceId": 6, - "TaskType": 7, - "Destination": 8, + "Unspecified": 0, + "Universal": 1, + "Empty": 2, + "And": 3, + "Or": 4, + "Not": 5, + "NamespaceId": 6, + "TaskType": 7, + "Destination": 8, + "OutboundTaskGroup": 9, + "OutboundTask": 10, } ) diff --git a/api/enums/v1/predicate.pb.go b/api/enums/v1/predicate.pb.go index b3f9880e19e..1b910680ff9 100644 --- a/api/enums/v1/predicate.pb.go +++ b/api/enums/v1/predicate.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2021 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,6 +10,7 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -47,40 +26,48 @@ const ( type PredicateType int32 const ( - PREDICATE_TYPE_UNSPECIFIED PredicateType = 0 - PREDICATE_TYPE_UNIVERSAL PredicateType = 1 - PREDICATE_TYPE_EMPTY PredicateType = 2 - PREDICATE_TYPE_AND PredicateType = 3 - PREDICATE_TYPE_OR PredicateType = 4 - PREDICATE_TYPE_NOT PredicateType = 5 - PREDICATE_TYPE_NAMESPACE_ID PredicateType = 6 - PREDICATE_TYPE_TASK_TYPE PredicateType = 7 - PREDICATE_TYPE_DESTINATION PredicateType = 8 + PREDICATE_TYPE_UNSPECIFIED PredicateType = 0 + PREDICATE_TYPE_UNIVERSAL PredicateType = 1 + PREDICATE_TYPE_EMPTY PredicateType = 2 + PREDICATE_TYPE_AND PredicateType = 3 + PREDICATE_TYPE_OR PredicateType = 4 + PREDICATE_TYPE_NOT PredicateType = 5 + PREDICATE_TYPE_NAMESPACE_ID PredicateType = 6 + PREDICATE_TYPE_TASK_TYPE PredicateType = 7 + PREDICATE_TYPE_DESTINATION PredicateType = 8 + PREDICATE_TYPE_OUTBOUND_TASK_GROUP PredicateType = 9 + // Predicate used for grouping outbound tasks. Consists of task_group, namespace_id, and destination. + // This replaces a previous implementation which used an AND predicate over 3 separate predicate types. + PREDICATE_TYPE_OUTBOUND_TASK PredicateType = 10 ) // Enum value maps for PredicateType. var ( PredicateType_name = map[int32]string{ - 0: "PREDICATE_TYPE_UNSPECIFIED", - 1: "PREDICATE_TYPE_UNIVERSAL", - 2: "PREDICATE_TYPE_EMPTY", - 3: "PREDICATE_TYPE_AND", - 4: "PREDICATE_TYPE_OR", - 5: "PREDICATE_TYPE_NOT", - 6: "PREDICATE_TYPE_NAMESPACE_ID", - 7: "PREDICATE_TYPE_TASK_TYPE", - 8: "PREDICATE_TYPE_DESTINATION", + 0: "PREDICATE_TYPE_UNSPECIFIED", + 1: "PREDICATE_TYPE_UNIVERSAL", + 2: "PREDICATE_TYPE_EMPTY", + 3: "PREDICATE_TYPE_AND", + 4: "PREDICATE_TYPE_OR", + 5: "PREDICATE_TYPE_NOT", + 6: "PREDICATE_TYPE_NAMESPACE_ID", + 7: "PREDICATE_TYPE_TASK_TYPE", + 8: "PREDICATE_TYPE_DESTINATION", + 9: "PREDICATE_TYPE_OUTBOUND_TASK_GROUP", + 10: "PREDICATE_TYPE_OUTBOUND_TASK", } PredicateType_value = map[string]int32{ - "PREDICATE_TYPE_UNSPECIFIED": 0, - "PREDICATE_TYPE_UNIVERSAL": 1, - "PREDICATE_TYPE_EMPTY": 2, - "PREDICATE_TYPE_AND": 3, - "PREDICATE_TYPE_OR": 4, - "PREDICATE_TYPE_NOT": 5, - "PREDICATE_TYPE_NAMESPACE_ID": 6, - "PREDICATE_TYPE_TASK_TYPE": 7, - "PREDICATE_TYPE_DESTINATION": 8, + "PREDICATE_TYPE_UNSPECIFIED": 0, + "PREDICATE_TYPE_UNIVERSAL": 1, + "PREDICATE_TYPE_EMPTY": 2, + "PREDICATE_TYPE_AND": 3, + "PREDICATE_TYPE_OR": 4, + "PREDICATE_TYPE_NOT": 5, + "PREDICATE_TYPE_NAMESPACE_ID": 6, + "PREDICATE_TYPE_TASK_TYPE": 7, + "PREDICATE_TYPE_DESTINATION": 8, + "PREDICATE_TYPE_OUTBOUND_TASK_GROUP": 9, + "PREDICATE_TYPE_OUTBOUND_TASK": 10, } ) @@ -110,6 +97,13 @@ func (x PredicateType) String() string { return "TaskType" case PREDICATE_TYPE_DESTINATION: return "Destination" + case PREDICATE_TYPE_OUTBOUND_TASK_GROUP: + return "OutboundTaskGroup" + case + + // Deprecated: Use PredicateType.Descriptor instead. + PREDICATE_TYPE_OUTBOUND_TASK: + return "OutboundTask" default: return strconv.Itoa(int(x)) } @@ -128,55 +122,43 @@ func (x PredicateType) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use PredicateType.Descriptor instead. func (PredicateType) EnumDescriptor() ([]byte, []int) { return file_temporal_server_api_enums_v1_predicate_proto_rawDescGZIP(), []int{0} } var File_temporal_server_api_enums_v1_predicate_proto protoreflect.FileDescriptor -var file_temporal_server_api_enums_v1_predicate_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x70, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2a, 0x8d, 0x02, 0x0a, - 0x0d, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1e, - 0x0a, 0x1a, 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1c, - 0x0a, 0x18, 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, - 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, - 0x4d, 0x50, 0x54, 0x59, 0x10, 0x02, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, - 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x10, 0x03, 0x12, 0x15, - 0x0a, 0x11, 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x4f, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, 0x41, - 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x10, 0x05, 0x12, 0x1f, 0x0a, - 0x1b, 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x50, 0x41, 0x43, 0x45, 0x5f, 0x49, 0x44, 0x10, 0x06, 0x12, 0x1c, - 0x0a, 0x18, 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x10, 0x07, 0x12, 0x1e, 0x0a, 0x1a, - 0x50, 0x52, 0x45, 0x44, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, - 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x08, 0x42, 0x2a, 0x5a, 0x28, - 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, - 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_enums_v1_predicate_proto_rawDesc = "" + + "\n" + + ",temporal/server/api/enums/v1/predicate.proto\x12\x1ctemporal.server.api.enums.v1*\xd7\x02\n" + + "\rPredicateType\x12\x1e\n" + + "\x1aPREDICATE_TYPE_UNSPECIFIED\x10\x00\x12\x1c\n" + + "\x18PREDICATE_TYPE_UNIVERSAL\x10\x01\x12\x18\n" + + "\x14PREDICATE_TYPE_EMPTY\x10\x02\x12\x16\n" + + "\x12PREDICATE_TYPE_AND\x10\x03\x12\x15\n" + + "\x11PREDICATE_TYPE_OR\x10\x04\x12\x16\n" + + "\x12PREDICATE_TYPE_NOT\x10\x05\x12\x1f\n" + + "\x1bPREDICATE_TYPE_NAMESPACE_ID\x10\x06\x12\x1c\n" + + "\x18PREDICATE_TYPE_TASK_TYPE\x10\a\x12\x1e\n" + + "\x1aPREDICATE_TYPE_DESTINATION\x10\b\x12&\n" + + "\"PREDICATE_TYPE_OUTBOUND_TASK_GROUP\x10\t\x12 \n" + + "\x1cPREDICATE_TYPE_OUTBOUND_TASK\x10\n" + + "B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" var ( file_temporal_server_api_enums_v1_predicate_proto_rawDescOnce sync.Once - file_temporal_server_api_enums_v1_predicate_proto_rawDescData = file_temporal_server_api_enums_v1_predicate_proto_rawDesc + file_temporal_server_api_enums_v1_predicate_proto_rawDescData []byte ) func file_temporal_server_api_enums_v1_predicate_proto_rawDescGZIP() []byte { file_temporal_server_api_enums_v1_predicate_proto_rawDescOnce.Do(func() { - file_temporal_server_api_enums_v1_predicate_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_enums_v1_predicate_proto_rawDescData) + file_temporal_server_api_enums_v1_predicate_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_predicate_proto_rawDesc), len(file_temporal_server_api_enums_v1_predicate_proto_rawDesc))) }) return file_temporal_server_api_enums_v1_predicate_proto_rawDescData } var file_temporal_server_api_enums_v1_predicate_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_temporal_server_api_enums_v1_predicate_proto_goTypes = []interface{}{ +var file_temporal_server_api_enums_v1_predicate_proto_goTypes = []any{ (PredicateType)(0), // 0: temporal.server.api.enums.v1.PredicateType } var file_temporal_server_api_enums_v1_predicate_proto_depIdxs = []int32{ @@ -196,7 +178,7 @@ func file_temporal_server_api_enums_v1_predicate_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_enums_v1_predicate_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_predicate_proto_rawDesc), len(file_temporal_server_api_enums_v1_predicate_proto_rawDesc)), NumEnums: 1, NumMessages: 0, NumExtensions: 0, @@ -207,7 +189,6 @@ func file_temporal_server_api_enums_v1_predicate_proto_init() { EnumInfos: file_temporal_server_api_enums_v1_predicate_proto_enumTypes, }.Build() File_temporal_server_api_enums_v1_predicate_proto = out.File - file_temporal_server_api_enums_v1_predicate_proto_rawDesc = nil file_temporal_server_api_enums_v1_predicate_proto_goTypes = nil file_temporal_server_api_enums_v1_predicate_proto_depIdxs = nil } diff --git a/api/enums/v1/replication.go-helpers.pb.go b/api/enums/v1/replication.go-helpers.pb.go index b2eb2a2b18d..33656860401 100644 --- a/api/enums/v1/replication.go-helpers.pb.go +++ b/api/enums/v1/replication.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package enums @@ -31,15 +7,20 @@ import ( var ( ReplicationTaskType_shorthandValue = map[string]int32{ - "Unspecified": 0, - "NamespaceTask": 1, - "HistoryTask": 2, - "SyncShardStatusTask": 3, - "SyncActivityTask": 4, - "HistoryMetadataTask": 5, - "HistoryV2Task": 6, - "SyncWorkflowStateTask": 7, - "TaskQueueUserData": 8, + "Unspecified": 0, + "NamespaceTask": 1, + "HistoryTask": 2, + "SyncShardStatusTask": 3, + "SyncActivityTask": 4, + "HistoryMetadataTask": 5, + "HistoryV2Task": 6, + "SyncWorkflowStateTask": 7, + "TaskQueueUserData": 8, + "SyncHsmTask": 9, + "BackfillHistoryTask": 10, + "VerifyVersionedTransitionTask": 11, + "SyncVersionedTransitionTask": 12, + "DeleteExecutionTask": 13, } ) @@ -72,3 +53,22 @@ func NamespaceOperationFromString(s string) (NamespaceOperation, error) { } return NamespaceOperation(0), fmt.Errorf("%s is not a valid NamespaceOperation", s) } + +var ( + ReplicationFlowControlCommand_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Resume": 1, + "Pause": 2, + } +) + +// ReplicationFlowControlCommandFromString parses a ReplicationFlowControlCommand value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to ReplicationFlowControlCommand +func ReplicationFlowControlCommandFromString(s string) (ReplicationFlowControlCommand, error) { + if v, ok := ReplicationFlowControlCommand_value[s]; ok { + return ReplicationFlowControlCommand(v), nil + } else if v, ok := ReplicationFlowControlCommand_shorthandValue[s]; ok { + return ReplicationFlowControlCommand(v), nil + } + return ReplicationFlowControlCommand(0), fmt.Errorf("%s is not a valid ReplicationFlowControlCommand", s) +} diff --git a/api/enums/v1/replication.pb.go b/api/enums/v1/replication.pb.go index 3b6ecfad4f3..49cf16cc13d 100644 --- a/api/enums/v1/replication.pb.go +++ b/api/enums/v1/replication.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,6 +10,7 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -47,40 +26,55 @@ const ( type ReplicationTaskType int32 const ( - REPLICATION_TASK_TYPE_UNSPECIFIED ReplicationTaskType = 0 - REPLICATION_TASK_TYPE_NAMESPACE_TASK ReplicationTaskType = 1 - REPLICATION_TASK_TYPE_HISTORY_TASK ReplicationTaskType = 2 - REPLICATION_TASK_TYPE_SYNC_SHARD_STATUS_TASK ReplicationTaskType = 3 - REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK ReplicationTaskType = 4 - REPLICATION_TASK_TYPE_HISTORY_METADATA_TASK ReplicationTaskType = 5 - REPLICATION_TASK_TYPE_HISTORY_V2_TASK ReplicationTaskType = 6 - REPLICATION_TASK_TYPE_SYNC_WORKFLOW_STATE_TASK ReplicationTaskType = 7 - REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA ReplicationTaskType = 8 + REPLICATION_TASK_TYPE_UNSPECIFIED ReplicationTaskType = 0 + REPLICATION_TASK_TYPE_NAMESPACE_TASK ReplicationTaskType = 1 + REPLICATION_TASK_TYPE_HISTORY_TASK ReplicationTaskType = 2 + REPLICATION_TASK_TYPE_SYNC_SHARD_STATUS_TASK ReplicationTaskType = 3 + REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK ReplicationTaskType = 4 + REPLICATION_TASK_TYPE_HISTORY_METADATA_TASK ReplicationTaskType = 5 + REPLICATION_TASK_TYPE_HISTORY_V2_TASK ReplicationTaskType = 6 + REPLICATION_TASK_TYPE_SYNC_WORKFLOW_STATE_TASK ReplicationTaskType = 7 + REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA ReplicationTaskType = 8 + REPLICATION_TASK_TYPE_SYNC_HSM_TASK ReplicationTaskType = 9 + REPLICATION_TASK_TYPE_BACKFILL_HISTORY_TASK ReplicationTaskType = 10 + REPLICATION_TASK_TYPE_VERIFY_VERSIONED_TRANSITION_TASK ReplicationTaskType = 11 + REPLICATION_TASK_TYPE_SYNC_VERSIONED_TRANSITION_TASK ReplicationTaskType = 12 + REPLICATION_TASK_TYPE_DELETE_EXECUTION_TASK ReplicationTaskType = 13 ) // Enum value maps for ReplicationTaskType. var ( ReplicationTaskType_name = map[int32]string{ - 0: "REPLICATION_TASK_TYPE_UNSPECIFIED", - 1: "REPLICATION_TASK_TYPE_NAMESPACE_TASK", - 2: "REPLICATION_TASK_TYPE_HISTORY_TASK", - 3: "REPLICATION_TASK_TYPE_SYNC_SHARD_STATUS_TASK", - 4: "REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK", - 5: "REPLICATION_TASK_TYPE_HISTORY_METADATA_TASK", - 6: "REPLICATION_TASK_TYPE_HISTORY_V2_TASK", - 7: "REPLICATION_TASK_TYPE_SYNC_WORKFLOW_STATE_TASK", - 8: "REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA", + 0: "REPLICATION_TASK_TYPE_UNSPECIFIED", + 1: "REPLICATION_TASK_TYPE_NAMESPACE_TASK", + 2: "REPLICATION_TASK_TYPE_HISTORY_TASK", + 3: "REPLICATION_TASK_TYPE_SYNC_SHARD_STATUS_TASK", + 4: "REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK", + 5: "REPLICATION_TASK_TYPE_HISTORY_METADATA_TASK", + 6: "REPLICATION_TASK_TYPE_HISTORY_V2_TASK", + 7: "REPLICATION_TASK_TYPE_SYNC_WORKFLOW_STATE_TASK", + 8: "REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA", + 9: "REPLICATION_TASK_TYPE_SYNC_HSM_TASK", + 10: "REPLICATION_TASK_TYPE_BACKFILL_HISTORY_TASK", + 11: "REPLICATION_TASK_TYPE_VERIFY_VERSIONED_TRANSITION_TASK", + 12: "REPLICATION_TASK_TYPE_SYNC_VERSIONED_TRANSITION_TASK", + 13: "REPLICATION_TASK_TYPE_DELETE_EXECUTION_TASK", } ReplicationTaskType_value = map[string]int32{ - "REPLICATION_TASK_TYPE_UNSPECIFIED": 0, - "REPLICATION_TASK_TYPE_NAMESPACE_TASK": 1, - "REPLICATION_TASK_TYPE_HISTORY_TASK": 2, - "REPLICATION_TASK_TYPE_SYNC_SHARD_STATUS_TASK": 3, - "REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK": 4, - "REPLICATION_TASK_TYPE_HISTORY_METADATA_TASK": 5, - "REPLICATION_TASK_TYPE_HISTORY_V2_TASK": 6, - "REPLICATION_TASK_TYPE_SYNC_WORKFLOW_STATE_TASK": 7, - "REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA": 8, + "REPLICATION_TASK_TYPE_UNSPECIFIED": 0, + "REPLICATION_TASK_TYPE_NAMESPACE_TASK": 1, + "REPLICATION_TASK_TYPE_HISTORY_TASK": 2, + "REPLICATION_TASK_TYPE_SYNC_SHARD_STATUS_TASK": 3, + "REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK": 4, + "REPLICATION_TASK_TYPE_HISTORY_METADATA_TASK": 5, + "REPLICATION_TASK_TYPE_HISTORY_V2_TASK": 6, + "REPLICATION_TASK_TYPE_SYNC_WORKFLOW_STATE_TASK": 7, + "REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA": 8, + "REPLICATION_TASK_TYPE_SYNC_HSM_TASK": 9, + "REPLICATION_TASK_TYPE_BACKFILL_HISTORY_TASK": 10, + "REPLICATION_TASK_TYPE_VERIFY_VERSIONED_TRANSITION_TASK": 11, + "REPLICATION_TASK_TYPE_SYNC_VERSIONED_TRANSITION_TASK": 12, + "REPLICATION_TASK_TYPE_DELETE_EXECUTION_TASK": 13, } ) @@ -112,6 +106,18 @@ func (x ReplicationTaskType) String() string { return "SyncWorkflowStateTask" case REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA: return "TaskQueueUserData" + case REPLICATION_TASK_TYPE_SYNC_HSM_TASK: + return "SyncHsmTask" + case REPLICATION_TASK_TYPE_BACKFILL_HISTORY_TASK: + return "BackfillHistoryTask" + case REPLICATION_TASK_TYPE_VERIFY_VERSIONED_TRANSITION_TASK: + return "VerifyVersionedTransitionTask" + case REPLICATION_TASK_TYPE_SYNC_VERSIONED_TRANSITION_TASK: + return "SyncVersionedTransitionTask" + case REPLICATION_TASK_TYPE_DELETE_EXECUTION_TASK: + + // Enum value maps for NamespaceOperation. + return "DeleteExecutionTask" default: return strconv.Itoa(int(x)) } @@ -142,7 +148,6 @@ const ( NAMESPACE_OPERATION_UPDATE NamespaceOperation = 2 ) -// Enum value maps for NamespaceOperation. var ( NamespaceOperation_name = map[int32]string{ 0: "NAMESPACE_OPERATION_UNSPECIFIED", @@ -193,70 +198,112 @@ func (NamespaceOperation) EnumDescriptor() ([]byte, []int) { return file_temporal_server_api_enums_v1_replication_proto_rawDescGZIP(), []int{1} } -var File_temporal_server_api_enums_v1_replication_proto protoreflect.FileDescriptor +type ReplicationFlowControlCommand int32 + +const ( + REPLICATION_FLOW_CONTROL_COMMAND_UNSPECIFIED ReplicationFlowControlCommand = 0 + REPLICATION_FLOW_CONTROL_COMMAND_RESUME ReplicationFlowControlCommand = 1 + REPLICATION_FLOW_CONTROL_COMMAND_PAUSE ReplicationFlowControlCommand = 2 +) + +// Enum value maps for ReplicationFlowControlCommand. +var ( + ReplicationFlowControlCommand_name = map[int32]string{ + 0: "REPLICATION_FLOW_CONTROL_COMMAND_UNSPECIFIED", + 1: "REPLICATION_FLOW_CONTROL_COMMAND_RESUME", + 2: "REPLICATION_FLOW_CONTROL_COMMAND_PAUSE", + } + ReplicationFlowControlCommand_value = map[string]int32{ + "REPLICATION_FLOW_CONTROL_COMMAND_UNSPECIFIED": 0, + "REPLICATION_FLOW_CONTROL_COMMAND_RESUME": 1, + "REPLICATION_FLOW_CONTROL_COMMAND_PAUSE": 2, + } +) + +func (x ReplicationFlowControlCommand) Enum() *ReplicationFlowControlCommand { + p := new(ReplicationFlowControlCommand) + *p = x + return p +} + +func (x ReplicationFlowControlCommand) String() string { + switch x { + case REPLICATION_FLOW_CONTROL_COMMAND_UNSPECIFIED: + return "Unspecified" + case REPLICATION_FLOW_CONTROL_COMMAND_RESUME: + return "Resume" + case REPLICATION_FLOW_CONTROL_COMMAND_PAUSE: + return "Pause" + default: + return strconv.Itoa(int(x)) + } + +} + +func (ReplicationFlowControlCommand) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_enums_v1_replication_proto_enumTypes[2].Descriptor() +} -var file_temporal_server_api_enums_v1_replication_proto_rawDesc = []byte{ - 0x0a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x1c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2a, 0xae, - 0x03, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, - 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x21, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, - 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x28, 0x0a, - 0x24, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x41, 0x53, - 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x50, 0x41, 0x43, 0x45, - 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, 0x01, 0x12, 0x26, 0x0a, 0x22, 0x52, 0x45, 0x50, 0x4c, 0x49, - 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, 0x02, 0x12, - 0x30, 0x0a, 0x2c, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, - 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x53, 0x48, - 0x41, 0x52, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, - 0x03, 0x12, 0x2c, 0x0a, 0x28, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, - 0x41, 0x43, 0x54, 0x49, 0x56, 0x49, 0x54, 0x59, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, 0x04, 0x12, - 0x2f, 0x0a, 0x2b, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, - 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x52, 0x59, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, 0x05, - 0x12, 0x29, 0x0a, 0x25, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x52, - 0x59, 0x5f, 0x56, 0x32, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, 0x06, 0x12, 0x32, 0x0a, 0x2e, 0x52, - 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, - 0x4f, 0x57, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, 0x07, 0x12, - 0x2e, 0x0a, 0x2a, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, - 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x51, 0x55, - 0x45, 0x55, 0x45, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x10, 0x08, 0x2a, - 0x79, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x1f, 0x4e, 0x41, 0x4d, 0x45, 0x53, 0x50, 0x41, - 0x43, 0x45, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1e, 0x0a, 0x1a, 0x4e, 0x41, - 0x4d, 0x45, 0x53, 0x50, 0x41, 0x43, 0x45, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x4e, 0x41, - 0x4d, 0x45, 0x53, 0x50, 0x41, 0x43, 0x45, 0x5f, 0x4f, 0x50, 0x45, 0x52, 0x41, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x10, 0x02, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x6f, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, - 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (ReplicationFlowControlCommand) Type() protoreflect.EnumType { + return &file_temporal_server_api_enums_v1_replication_proto_enumTypes[2] } +func (x ReplicationFlowControlCommand) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ReplicationFlowControlCommand.Descriptor instead. +func (ReplicationFlowControlCommand) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_enums_v1_replication_proto_rawDescGZIP(), []int{2} +} + +var File_temporal_server_api_enums_v1_replication_proto protoreflect.FileDescriptor + +const file_temporal_server_api_enums_v1_replication_proto_rawDesc = "" + + "\n" + + ".temporal/server/api/enums/v1/replication.proto\x12\x1ctemporal.server.api.enums.v1*\xaf\x05\n" + + "\x13ReplicationTaskType\x12%\n" + + "!REPLICATION_TASK_TYPE_UNSPECIFIED\x10\x00\x12(\n" + + "$REPLICATION_TASK_TYPE_NAMESPACE_TASK\x10\x01\x12&\n" + + "\"REPLICATION_TASK_TYPE_HISTORY_TASK\x10\x02\x120\n" + + ",REPLICATION_TASK_TYPE_SYNC_SHARD_STATUS_TASK\x10\x03\x12,\n" + + "(REPLICATION_TASK_TYPE_SYNC_ACTIVITY_TASK\x10\x04\x12/\n" + + "+REPLICATION_TASK_TYPE_HISTORY_METADATA_TASK\x10\x05\x12)\n" + + "%REPLICATION_TASK_TYPE_HISTORY_V2_TASK\x10\x06\x122\n" + + ".REPLICATION_TASK_TYPE_SYNC_WORKFLOW_STATE_TASK\x10\a\x12.\n" + + "*REPLICATION_TASK_TYPE_TASK_QUEUE_USER_DATA\x10\b\x12'\n" + + "#REPLICATION_TASK_TYPE_SYNC_HSM_TASK\x10\t\x12/\n" + + "+REPLICATION_TASK_TYPE_BACKFILL_HISTORY_TASK\x10\n" + + "\x12:\n" + + "6REPLICATION_TASK_TYPE_VERIFY_VERSIONED_TRANSITION_TASK\x10\v\x128\n" + + "4REPLICATION_TASK_TYPE_SYNC_VERSIONED_TRANSITION_TASK\x10\f\x12/\n" + + "+REPLICATION_TASK_TYPE_DELETE_EXECUTION_TASK\x10\r*y\n" + + "\x12NamespaceOperation\x12#\n" + + "\x1fNAMESPACE_OPERATION_UNSPECIFIED\x10\x00\x12\x1e\n" + + "\x1aNAMESPACE_OPERATION_CREATE\x10\x01\x12\x1e\n" + + "\x1aNAMESPACE_OPERATION_UPDATE\x10\x02*\xaa\x01\n" + + "\x1dReplicationFlowControlCommand\x120\n" + + ",REPLICATION_FLOW_CONTROL_COMMAND_UNSPECIFIED\x10\x00\x12+\n" + + "'REPLICATION_FLOW_CONTROL_COMMAND_RESUME\x10\x01\x12*\n" + + "&REPLICATION_FLOW_CONTROL_COMMAND_PAUSE\x10\x02B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" + var ( file_temporal_server_api_enums_v1_replication_proto_rawDescOnce sync.Once - file_temporal_server_api_enums_v1_replication_proto_rawDescData = file_temporal_server_api_enums_v1_replication_proto_rawDesc + file_temporal_server_api_enums_v1_replication_proto_rawDescData []byte ) func file_temporal_server_api_enums_v1_replication_proto_rawDescGZIP() []byte { file_temporal_server_api_enums_v1_replication_proto_rawDescOnce.Do(func() { - file_temporal_server_api_enums_v1_replication_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_enums_v1_replication_proto_rawDescData) + file_temporal_server_api_enums_v1_replication_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_replication_proto_rawDesc), len(file_temporal_server_api_enums_v1_replication_proto_rawDesc))) }) return file_temporal_server_api_enums_v1_replication_proto_rawDescData } -var file_temporal_server_api_enums_v1_replication_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_temporal_server_api_enums_v1_replication_proto_goTypes = []interface{}{ - (ReplicationTaskType)(0), // 0: temporal.server.api.enums.v1.ReplicationTaskType - (NamespaceOperation)(0), // 1: temporal.server.api.enums.v1.NamespaceOperation +var file_temporal_server_api_enums_v1_replication_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_temporal_server_api_enums_v1_replication_proto_goTypes = []any{ + (ReplicationTaskType)(0), // 0: temporal.server.api.enums.v1.ReplicationTaskType + (NamespaceOperation)(0), // 1: temporal.server.api.enums.v1.NamespaceOperation + (ReplicationFlowControlCommand)(0), // 2: temporal.server.api.enums.v1.ReplicationFlowControlCommand } var file_temporal_server_api_enums_v1_replication_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -275,8 +322,8 @@ func file_temporal_server_api_enums_v1_replication_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_enums_v1_replication_proto_rawDesc, - NumEnums: 2, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_replication_proto_rawDesc), len(file_temporal_server_api_enums_v1_replication_proto_rawDesc)), + NumEnums: 3, NumMessages: 0, NumExtensions: 0, NumServices: 0, @@ -286,7 +333,6 @@ func file_temporal_server_api_enums_v1_replication_proto_init() { EnumInfos: file_temporal_server_api_enums_v1_replication_proto_enumTypes, }.Build() File_temporal_server_api_enums_v1_replication_proto = out.File - file_temporal_server_api_enums_v1_replication_proto_rawDesc = nil file_temporal_server_api_enums_v1_replication_proto_goTypes = nil file_temporal_server_api_enums_v1_replication_proto_depIdxs = nil } diff --git a/api/enums/v1/task.go-helpers.pb.go b/api/enums/v1/task.go-helpers.pb.go index 560fed65d10..a90759fa5f0 100644 --- a/api/enums/v1/task.go-helpers.pb.go +++ b/api/enums/v1/task.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package enums @@ -50,32 +26,40 @@ func TaskSourceFromString(s string) (TaskSource, error) { var ( TaskType_shorthandValue = map[string]int32{ - "Unspecified": 0, - "ReplicationHistory": 1, - "ReplicationSyncActivity": 2, - "TransferWorkflowTask": 3, - "TransferActivityTask": 4, - "TransferCloseExecution": 5, - "TransferCancelExecution": 6, - "TransferStartChildExecution": 7, - "TransferSignalExecution": 8, - "TransferResetWorkflow": 10, - "WorkflowTaskTimeout": 12, - "ActivityTimeout": 13, - "UserTimer": 14, - "WorkflowRunTimeout": 15, - "DeleteHistoryEvent": 16, - "ActivityRetryTimer": 17, - "WorkflowBackoffTimer": 18, - "VisibilityStartExecution": 19, - "VisibilityUpsertExecution": 20, - "VisibilityCloseExecution": 21, - "VisibilityDeleteExecution": 22, - "TransferDeleteExecution": 24, - "ReplicationSyncWorkflowState": 25, - "ArchivalArchiveExecution": 26, - "StateMachineOutbound": 27, - "StateMachineTimer": 28, + "Unspecified": 0, + "ReplicationHistory": 1, + "ReplicationSyncActivity": 2, + "TransferWorkflowTask": 3, + "TransferActivityTask": 4, + "TransferCloseExecution": 5, + "TransferCancelExecution": 6, + "TransferStartChildExecution": 7, + "TransferSignalExecution": 8, + "TransferResetWorkflow": 10, + "WorkflowTaskTimeout": 12, + "ActivityTimeout": 13, + "UserTimer": 14, + "WorkflowRunTimeout": 15, + "DeleteHistoryEvent": 16, + "ActivityRetryTimer": 17, + "WorkflowBackoffTimer": 18, + "VisibilityStartExecution": 19, + "VisibilityUpsertExecution": 20, + "VisibilityCloseExecution": 21, + "VisibilityDeleteExecution": 22, + "TransferDeleteExecution": 24, + "ReplicationSyncWorkflowState": 25, + "ArchivalArchiveExecution": 26, + "StateMachineOutbound": 27, + "StateMachineTimer": 28, + "WorkflowExecutionTimeout": 29, + "ReplicationSyncHsm": 30, + "ReplicationSyncVersionedTransition": 31, + "ChasmPure": 32, + "Chasm": 33, + "ReplicationDeleteExecution": 34, + "WorkerCommands": 35, + "TimeskippingTimer": 36, } ) @@ -89,3 +73,22 @@ func TaskTypeFromString(s string) (TaskType, error) { } return TaskType(0), fmt.Errorf("%s is not a valid TaskType", s) } + +var ( + TaskPriority_shorthandValue = map[string]int32{ + "Unspecified": 0, + "High": 1, + "Low": 10, + } +) + +// TaskPriorityFromString parses a TaskPriority value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to TaskPriority +func TaskPriorityFromString(s string) (TaskPriority, error) { + if v, ok := TaskPriority_value[s]; ok { + return TaskPriority(v), nil + } else if v, ok := TaskPriority_shorthandValue[s]; ok { + return TaskPriority(v), nil + } + return TaskPriority(0), fmt.Errorf("%s is not a valid TaskPriority", s) +} diff --git a/api/enums/v1/task.pb.go b/api/enums/v1/task.pb.go index 2505439c9e6..c8c9b100462 100644 --- a/api/enums/v1/task.pb.go +++ b/api/enums/v1/task.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,6 +10,7 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -139,6 +118,20 @@ const ( TASK_TYPE_STATE_MACHINE_OUTBOUND TaskType = 27 // A timer task generated by a state machine. TASK_TYPE_STATE_MACHINE_TIMER TaskType = 28 + // Timeout task for the entire workflow execution chain. + TASK_TYPE_WORKFLOW_EXECUTION_TIMEOUT TaskType = 29 + TASK_TYPE_REPLICATION_SYNC_HSM TaskType = 30 + TASK_TYPE_REPLICATION_SYNC_VERSIONED_TRANSITION TaskType = 31 + // A task that applies a batch of state changes to a CHASM entity. + TASK_TYPE_CHASM_PURE TaskType = 32 + // A task with side effects generated by a CHASM component. + TASK_TYPE_CHASM TaskType = 33 + // A replication task that deletes workflow on passive cluster(s). + TASK_TYPE_REPLICATION_DELETE_EXECUTION TaskType = 34 + // A task to send worker commands via Nexus. + TASK_TYPE_WORKER_COMMANDS TaskType = 35 + // A timer task that fires when an elapsed-duration time-skipping bound is reached. + TASK_TYPE_TIMESKIPPING_TIMER TaskType = 36 ) // Enum value maps for TaskType. @@ -170,34 +163,50 @@ var ( 26: "TASK_TYPE_ARCHIVAL_ARCHIVE_EXECUTION", 27: "TASK_TYPE_STATE_MACHINE_OUTBOUND", 28: "TASK_TYPE_STATE_MACHINE_TIMER", + 29: "TASK_TYPE_WORKFLOW_EXECUTION_TIMEOUT", + 30: "TASK_TYPE_REPLICATION_SYNC_HSM", + 31: "TASK_TYPE_REPLICATION_SYNC_VERSIONED_TRANSITION", + 32: "TASK_TYPE_CHASM_PURE", + 33: "TASK_TYPE_CHASM", + 34: "TASK_TYPE_REPLICATION_DELETE_EXECUTION", + 35: "TASK_TYPE_WORKER_COMMANDS", + 36: "TASK_TYPE_TIMESKIPPING_TIMER", } TaskType_value = map[string]int32{ - "TASK_TYPE_UNSPECIFIED": 0, - "TASK_TYPE_REPLICATION_HISTORY": 1, - "TASK_TYPE_REPLICATION_SYNC_ACTIVITY": 2, - "TASK_TYPE_TRANSFER_WORKFLOW_TASK": 3, - "TASK_TYPE_TRANSFER_ACTIVITY_TASK": 4, - "TASK_TYPE_TRANSFER_CLOSE_EXECUTION": 5, - "TASK_TYPE_TRANSFER_CANCEL_EXECUTION": 6, - "TASK_TYPE_TRANSFER_START_CHILD_EXECUTION": 7, - "TASK_TYPE_TRANSFER_SIGNAL_EXECUTION": 8, - "TASK_TYPE_TRANSFER_RESET_WORKFLOW": 10, - "TASK_TYPE_WORKFLOW_TASK_TIMEOUT": 12, - "TASK_TYPE_ACTIVITY_TIMEOUT": 13, - "TASK_TYPE_USER_TIMER": 14, - "TASK_TYPE_WORKFLOW_RUN_TIMEOUT": 15, - "TASK_TYPE_DELETE_HISTORY_EVENT": 16, - "TASK_TYPE_ACTIVITY_RETRY_TIMER": 17, - "TASK_TYPE_WORKFLOW_BACKOFF_TIMER": 18, - "TASK_TYPE_VISIBILITY_START_EXECUTION": 19, - "TASK_TYPE_VISIBILITY_UPSERT_EXECUTION": 20, - "TASK_TYPE_VISIBILITY_CLOSE_EXECUTION": 21, - "TASK_TYPE_VISIBILITY_DELETE_EXECUTION": 22, - "TASK_TYPE_TRANSFER_DELETE_EXECUTION": 24, - "TASK_TYPE_REPLICATION_SYNC_WORKFLOW_STATE": 25, - "TASK_TYPE_ARCHIVAL_ARCHIVE_EXECUTION": 26, - "TASK_TYPE_STATE_MACHINE_OUTBOUND": 27, - "TASK_TYPE_STATE_MACHINE_TIMER": 28, + "TASK_TYPE_UNSPECIFIED": 0, + "TASK_TYPE_REPLICATION_HISTORY": 1, + "TASK_TYPE_REPLICATION_SYNC_ACTIVITY": 2, + "TASK_TYPE_TRANSFER_WORKFLOW_TASK": 3, + "TASK_TYPE_TRANSFER_ACTIVITY_TASK": 4, + "TASK_TYPE_TRANSFER_CLOSE_EXECUTION": 5, + "TASK_TYPE_TRANSFER_CANCEL_EXECUTION": 6, + "TASK_TYPE_TRANSFER_START_CHILD_EXECUTION": 7, + "TASK_TYPE_TRANSFER_SIGNAL_EXECUTION": 8, + "TASK_TYPE_TRANSFER_RESET_WORKFLOW": 10, + "TASK_TYPE_WORKFLOW_TASK_TIMEOUT": 12, + "TASK_TYPE_ACTIVITY_TIMEOUT": 13, + "TASK_TYPE_USER_TIMER": 14, + "TASK_TYPE_WORKFLOW_RUN_TIMEOUT": 15, + "TASK_TYPE_DELETE_HISTORY_EVENT": 16, + "TASK_TYPE_ACTIVITY_RETRY_TIMER": 17, + "TASK_TYPE_WORKFLOW_BACKOFF_TIMER": 18, + "TASK_TYPE_VISIBILITY_START_EXECUTION": 19, + "TASK_TYPE_VISIBILITY_UPSERT_EXECUTION": 20, + "TASK_TYPE_VISIBILITY_CLOSE_EXECUTION": 21, + "TASK_TYPE_VISIBILITY_DELETE_EXECUTION": 22, + "TASK_TYPE_TRANSFER_DELETE_EXECUTION": 24, + "TASK_TYPE_REPLICATION_SYNC_WORKFLOW_STATE": 25, + "TASK_TYPE_ARCHIVAL_ARCHIVE_EXECUTION": 26, + "TASK_TYPE_STATE_MACHINE_OUTBOUND": 27, + "TASK_TYPE_STATE_MACHINE_TIMER": 28, + "TASK_TYPE_WORKFLOW_EXECUTION_TIMEOUT": 29, + "TASK_TYPE_REPLICATION_SYNC_HSM": 30, + "TASK_TYPE_REPLICATION_SYNC_VERSIONED_TRANSITION": 31, + "TASK_TYPE_CHASM_PURE": 32, + "TASK_TYPE_CHASM": 33, + "TASK_TYPE_REPLICATION_DELETE_EXECUTION": 34, + "TASK_TYPE_WORKER_COMMANDS": 35, + "TASK_TYPE_TIMESKIPPING_TIMER": 36, } ) @@ -230,6 +239,8 @@ func (x TaskType) String() string { case TASK_TYPE_TRANSFER_SIGNAL_EXECUTION: return "TransferSignalExecution" case TASK_TYPE_TRANSFER_RESET_WORKFLOW: + + // TaskPriority is only used for replication task as of May 2024 return "TransferResetWorkflow" case TASK_TYPE_WORKFLOW_TASK_TIMEOUT: return "WorkflowTaskTimeout" @@ -237,10 +248,14 @@ func (x TaskType) String() string { return "ActivityTimeout" case TASK_TYPE_USER_TIMER: return "UserTimer" + + // gap between index can be used for future priority levels if needed case TASK_TYPE_WORKFLOW_RUN_TIMEOUT: return "WorkflowRunTimeout" case TASK_TYPE_DELETE_HISTORY_EVENT: return "DeleteHistoryEvent" + + // Enum value maps for TaskPriority. case TASK_TYPE_ACTIVITY_RETRY_TIMER: return "ActivityRetryTimer" case TASK_TYPE_WORKFLOW_BACKOFF_TIMER: @@ -263,6 +278,24 @@ func (x TaskType) String() string { return "StateMachineOutbound" case TASK_TYPE_STATE_MACHINE_TIMER: return "StateMachineTimer" + case TASK_TYPE_WORKFLOW_EXECUTION_TIMEOUT: + return "WorkflowExecutionTimeout" + case TASK_TYPE_REPLICATION_SYNC_HSM: + return "ReplicationSyncHsm" + + // Deprecated: Use TaskPriority.Descriptor instead. + case TASK_TYPE_REPLICATION_SYNC_VERSIONED_TRANSITION: + return "ReplicationSyncVersionedTransition" + case TASK_TYPE_CHASM_PURE: + return "ChasmPure" + case TASK_TYPE_CHASM: + return "Chasm" + case TASK_TYPE_REPLICATION_DELETE_EXECUTION: + return "ReplicationDeleteExecution" + case TASK_TYPE_WORKER_COMMANDS: + return "WorkerCommands" + case TASK_TYPE_TIMESKIPPING_TIMER: + return "TimeskippingTimer" default: return strconv.Itoa(int(x)) } @@ -285,106 +318,134 @@ func (TaskType) EnumDescriptor() ([]byte, []int) { return file_temporal_server_api_enums_v1_task_proto_rawDescGZIP(), []int{1} } -var File_temporal_server_api_enums_v1_task_proto protoreflect.FileDescriptor +type TaskPriority int32 + +const ( + TASK_PRIORITY_UNSPECIFIED TaskPriority = 0 + TASK_PRIORITY_HIGH TaskPriority = 1 + + TASK_PRIORITY_LOW TaskPriority = 10 +) + +var ( + TaskPriority_name = map[int32]string{ + 0: "TASK_PRIORITY_UNSPECIFIED", + 1: "TASK_PRIORITY_HIGH", + 10: "TASK_PRIORITY_LOW", + } + TaskPriority_value = map[string]int32{ + "TASK_PRIORITY_UNSPECIFIED": 0, + "TASK_PRIORITY_HIGH": 1, + "TASK_PRIORITY_LOW": 10, + } +) + +func (x TaskPriority) Enum() *TaskPriority { + p := new(TaskPriority) + *p = x + return p +} -var file_temporal_server_api_enums_v1_task_proto_rawDesc = []byte{ - 0x0a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, - 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, - 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2a, 0x5e, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x53, 0x4f, - 0x55, 0x52, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, - 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, - 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x52, 0x59, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x54, - 0x41, 0x53, 0x4b, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x44, 0x42, 0x5f, 0x42, 0x41, - 0x43, 0x4b, 0x4c, 0x4f, 0x47, 0x10, 0x02, 0x2a, 0x84, 0x08, 0x0a, 0x08, 0x54, 0x61, 0x73, 0x6b, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x15, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, - 0x21, 0x0a, 0x1d, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, - 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x52, 0x59, - 0x10, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x59, 0x4e, 0x43, - 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x49, 0x54, 0x59, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20, 0x54, - 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x46, 0x45, - 0x52, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, - 0x03, 0x12, 0x24, 0x0a, 0x20, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, - 0x52, 0x41, 0x4e, 0x53, 0x46, 0x45, 0x52, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x49, 0x54, 0x59, - 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x10, 0x04, 0x12, 0x26, 0x0a, 0x22, 0x54, 0x41, 0x53, 0x4b, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x46, 0x45, 0x52, 0x5f, 0x43, 0x4c, - 0x4f, 0x53, 0x45, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x05, 0x12, - 0x27, 0x0a, 0x23, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, - 0x4e, 0x53, 0x46, 0x45, 0x52, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x5f, 0x45, 0x58, 0x45, - 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x06, 0x12, 0x2c, 0x0a, 0x28, 0x54, 0x41, 0x53, 0x4b, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x46, 0x45, 0x52, 0x5f, 0x53, - 0x54, 0x41, 0x52, 0x54, 0x5f, 0x43, 0x48, 0x49, 0x4c, 0x44, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x07, 0x12, 0x27, 0x0a, 0x23, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x46, 0x45, 0x52, 0x5f, 0x53, 0x49, 0x47, - 0x4e, 0x41, 0x4c, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x08, 0x12, - 0x25, 0x0a, 0x21, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, - 0x4e, 0x53, 0x46, 0x45, 0x52, 0x5f, 0x52, 0x45, 0x53, 0x45, 0x54, 0x5f, 0x57, 0x4f, 0x52, 0x4b, - 0x46, 0x4c, 0x4f, 0x57, 0x10, 0x0a, 0x12, 0x23, 0x0a, 0x1f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x54, 0x41, 0x53, - 0x4b, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x0c, 0x12, 0x1e, 0x0a, 0x1a, 0x54, - 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x49, 0x54, - 0x59, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x0d, 0x12, 0x18, 0x0a, 0x14, 0x54, - 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x54, 0x49, - 0x4d, 0x45, 0x52, 0x10, 0x0e, 0x12, 0x22, 0x0a, 0x1e, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x52, 0x55, 0x4e, 0x5f, - 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x0f, 0x12, 0x22, 0x0a, 0x1e, 0x54, 0x41, 0x53, - 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x48, 0x49, - 0x53, 0x54, 0x4f, 0x52, 0x59, 0x5f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x10, 0x10, 0x12, 0x22, 0x0a, - 0x1e, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, - 0x49, 0x54, 0x59, 0x5f, 0x52, 0x45, 0x54, 0x52, 0x59, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x52, 0x10, - 0x11, 0x12, 0x24, 0x0a, 0x20, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x57, - 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x4f, 0x46, 0x46, 0x5f, - 0x54, 0x49, 0x4d, 0x45, 0x52, 0x10, 0x12, 0x12, 0x28, 0x0a, 0x24, 0x54, 0x41, 0x53, 0x4b, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x56, 0x49, 0x53, 0x49, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, - 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, - 0x13, 0x12, 0x29, 0x0a, 0x25, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x56, - 0x49, 0x53, 0x49, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x55, 0x50, 0x53, 0x45, 0x52, 0x54, - 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x14, 0x12, 0x28, 0x0a, 0x24, - 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x56, 0x49, 0x53, 0x49, 0x42, 0x49, - 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x15, 0x12, 0x29, 0x0a, 0x25, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x56, 0x49, 0x53, 0x49, 0x42, 0x49, 0x4c, 0x49, 0x54, 0x59, 0x5f, 0x44, - 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, - 0x16, 0x12, 0x27, 0x0a, 0x23, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x54, - 0x52, 0x41, 0x4e, 0x53, 0x46, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x45, - 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x18, 0x12, 0x2d, 0x0a, 0x29, 0x54, 0x41, - 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, - 0x57, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0x19, 0x12, 0x28, 0x0a, 0x24, 0x54, 0x41, 0x53, - 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x52, 0x43, 0x48, 0x49, 0x56, 0x41, 0x4c, 0x5f, - 0x41, 0x52, 0x43, 0x48, 0x49, 0x56, 0x45, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, - 0x4e, 0x10, 0x1a, 0x12, 0x24, 0x0a, 0x20, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x43, 0x48, 0x49, 0x4e, 0x45, 0x5f, 0x4f, - 0x55, 0x54, 0x42, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x1b, 0x12, 0x21, 0x0a, 0x1d, 0x54, 0x41, 0x53, - 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x43, - 0x48, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x52, 0x10, 0x1c, 0x22, 0x04, 0x08, 0x09, - 0x10, 0x09, 0x22, 0x04, 0x08, 0x0b, 0x10, 0x0b, 0x22, 0x04, 0x08, 0x17, 0x10, 0x17, 0x42, 0x2a, - 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, - 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, +func (x TaskPriority) String() string { + switch x { + case TASK_PRIORITY_UNSPECIFIED: + return "Unspecified" + case TASK_PRIORITY_HIGH: + return "High" + case TASK_PRIORITY_LOW: + return "Low" + default: + return strconv.Itoa(int(x)) + } + +} + +func (TaskPriority) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_enums_v1_task_proto_enumTypes[2].Descriptor() +} + +func (TaskPriority) Type() protoreflect.EnumType { + return &file_temporal_server_api_enums_v1_task_proto_enumTypes[2] } +func (x TaskPriority) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +func (TaskPriority) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_enums_v1_task_proto_rawDescGZIP(), []int{2} +} + +var File_temporal_server_api_enums_v1_task_proto protoreflect.FileDescriptor + +const file_temporal_server_api_enums_v1_task_proto_rawDesc = "" + + "\n" + + "'temporal/server/api/enums/v1/task.proto\x12\x1ctemporal.server.api.enums.v1*^\n" + + "\n" + + "TaskSource\x12\x1b\n" + + "\x17TASK_SOURCE_UNSPECIFIED\x10\x00\x12\x17\n" + + "\x13TASK_SOURCE_HISTORY\x10\x01\x12\x1a\n" + + "\x16TASK_SOURCE_DB_BACKLOG\x10\x02*\xa3\n" + + "\n" + + "\bTaskType\x12\x19\n" + + "\x15TASK_TYPE_UNSPECIFIED\x10\x00\x12!\n" + + "\x1dTASK_TYPE_REPLICATION_HISTORY\x10\x01\x12'\n" + + "#TASK_TYPE_REPLICATION_SYNC_ACTIVITY\x10\x02\x12$\n" + + " TASK_TYPE_TRANSFER_WORKFLOW_TASK\x10\x03\x12$\n" + + " TASK_TYPE_TRANSFER_ACTIVITY_TASK\x10\x04\x12&\n" + + "\"TASK_TYPE_TRANSFER_CLOSE_EXECUTION\x10\x05\x12'\n" + + "#TASK_TYPE_TRANSFER_CANCEL_EXECUTION\x10\x06\x12,\n" + + "(TASK_TYPE_TRANSFER_START_CHILD_EXECUTION\x10\a\x12'\n" + + "#TASK_TYPE_TRANSFER_SIGNAL_EXECUTION\x10\b\x12%\n" + + "!TASK_TYPE_TRANSFER_RESET_WORKFLOW\x10\n" + + "\x12#\n" + + "\x1fTASK_TYPE_WORKFLOW_TASK_TIMEOUT\x10\f\x12\x1e\n" + + "\x1aTASK_TYPE_ACTIVITY_TIMEOUT\x10\r\x12\x18\n" + + "\x14TASK_TYPE_USER_TIMER\x10\x0e\x12\"\n" + + "\x1eTASK_TYPE_WORKFLOW_RUN_TIMEOUT\x10\x0f\x12\"\n" + + "\x1eTASK_TYPE_DELETE_HISTORY_EVENT\x10\x10\x12\"\n" + + "\x1eTASK_TYPE_ACTIVITY_RETRY_TIMER\x10\x11\x12$\n" + + " TASK_TYPE_WORKFLOW_BACKOFF_TIMER\x10\x12\x12(\n" + + "$TASK_TYPE_VISIBILITY_START_EXECUTION\x10\x13\x12)\n" + + "%TASK_TYPE_VISIBILITY_UPSERT_EXECUTION\x10\x14\x12(\n" + + "$TASK_TYPE_VISIBILITY_CLOSE_EXECUTION\x10\x15\x12)\n" + + "%TASK_TYPE_VISIBILITY_DELETE_EXECUTION\x10\x16\x12'\n" + + "#TASK_TYPE_TRANSFER_DELETE_EXECUTION\x10\x18\x12-\n" + + ")TASK_TYPE_REPLICATION_SYNC_WORKFLOW_STATE\x10\x19\x12(\n" + + "$TASK_TYPE_ARCHIVAL_ARCHIVE_EXECUTION\x10\x1a\x12$\n" + + " TASK_TYPE_STATE_MACHINE_OUTBOUND\x10\x1b\x12!\n" + + "\x1dTASK_TYPE_STATE_MACHINE_TIMER\x10\x1c\x12(\n" + + "$TASK_TYPE_WORKFLOW_EXECUTION_TIMEOUT\x10\x1d\x12\"\n" + + "\x1eTASK_TYPE_REPLICATION_SYNC_HSM\x10\x1e\x123\n" + + "/TASK_TYPE_REPLICATION_SYNC_VERSIONED_TRANSITION\x10\x1f\x12\x18\n" + + "\x14TASK_TYPE_CHASM_PURE\x10 \x12\x13\n" + + "\x0fTASK_TYPE_CHASM\x10!\x12*\n" + + "&TASK_TYPE_REPLICATION_DELETE_EXECUTION\x10\"\x12\x1d\n" + + "\x19TASK_TYPE_WORKER_COMMANDS\x10#\x12 \n" + + "\x1cTASK_TYPE_TIMESKIPPING_TIMER\x10$\"\x04\b\t\x10\t\"\x04\b\v\x10\v\"\x04\b\x17\x10\x17*\\\n" + + "\fTaskPriority\x12\x1d\n" + + "\x19TASK_PRIORITY_UNSPECIFIED\x10\x00\x12\x16\n" + + "\x12TASK_PRIORITY_HIGH\x10\x01\x12\x15\n" + + "\x11TASK_PRIORITY_LOW\x10\n" + + "B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" + var ( file_temporal_server_api_enums_v1_task_proto_rawDescOnce sync.Once - file_temporal_server_api_enums_v1_task_proto_rawDescData = file_temporal_server_api_enums_v1_task_proto_rawDesc + file_temporal_server_api_enums_v1_task_proto_rawDescData []byte ) func file_temporal_server_api_enums_v1_task_proto_rawDescGZIP() []byte { file_temporal_server_api_enums_v1_task_proto_rawDescOnce.Do(func() { - file_temporal_server_api_enums_v1_task_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_enums_v1_task_proto_rawDescData) + file_temporal_server_api_enums_v1_task_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_task_proto_rawDesc), len(file_temporal_server_api_enums_v1_task_proto_rawDesc))) }) return file_temporal_server_api_enums_v1_task_proto_rawDescData } -var file_temporal_server_api_enums_v1_task_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_temporal_server_api_enums_v1_task_proto_goTypes = []interface{}{ - (TaskSource)(0), // 0: temporal.server.api.enums.v1.TaskSource - (TaskType)(0), // 1: temporal.server.api.enums.v1.TaskType +var file_temporal_server_api_enums_v1_task_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_temporal_server_api_enums_v1_task_proto_goTypes = []any{ + (TaskSource)(0), // 0: temporal.server.api.enums.v1.TaskSource + (TaskType)(0), // 1: temporal.server.api.enums.v1.TaskType + (TaskPriority)(0), // 2: temporal.server.api.enums.v1.TaskPriority } var file_temporal_server_api_enums_v1_task_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -403,8 +464,8 @@ func file_temporal_server_api_enums_v1_task_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_enums_v1_task_proto_rawDesc, - NumEnums: 2, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_task_proto_rawDesc), len(file_temporal_server_api_enums_v1_task_proto_rawDesc)), + NumEnums: 3, NumMessages: 0, NumExtensions: 0, NumServices: 0, @@ -414,7 +475,6 @@ func file_temporal_server_api_enums_v1_task_proto_init() { EnumInfos: file_temporal_server_api_enums_v1_task_proto_enumTypes, }.Build() File_temporal_server_api_enums_v1_task_proto = out.File - file_temporal_server_api_enums_v1_task_proto_rawDesc = nil file_temporal_server_api_enums_v1_task_proto_goTypes = nil file_temporal_server_api_enums_v1_task_proto_depIdxs = nil } diff --git a/api/enums/v1/workflow.go-helpers.pb.go b/api/enums/v1/workflow.go-helpers.pb.go index 58329dbf225..ddbfb80a386 100644 --- a/api/enums/v1/workflow.go-helpers.pb.go +++ b/api/enums/v1/workflow.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package enums @@ -71,3 +47,22 @@ func WorkflowBackoffTypeFromString(s string) (WorkflowBackoffType, error) { } return WorkflowBackoffType(0), fmt.Errorf("%s is not a valid WorkflowBackoffType", s) } + +var ( + PausedWorkflowEntityType_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Activity": 1, + "Workflow": 2, + } +) + +// PausedWorkflowEntityTypeFromString parses a PausedWorkflowEntityType value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to PausedWorkflowEntityType +func PausedWorkflowEntityTypeFromString(s string) (PausedWorkflowEntityType, error) { + if v, ok := PausedWorkflowEntityType_value[s]; ok { + return PausedWorkflowEntityType(v), nil + } else if v, ok := PausedWorkflowEntityType_shorthandValue[s]; ok { + return PausedWorkflowEntityType(v), nil + } + return PausedWorkflowEntityType(0), fmt.Errorf("%s is not a valid PausedWorkflowEntityType", s) +} diff --git a/api/enums/v1/workflow.pb.go b/api/enums/v1/workflow.pb.go index 6ce81582f8f..22594b5dbc6 100644 --- a/api/enums/v1/workflow.pb.go +++ b/api/enums/v1/workflow.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,6 +10,7 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -187,64 +166,105 @@ func (WorkflowBackoffType) EnumDescriptor() ([]byte, []int) { return file_temporal_server_api_enums_v1_workflow_proto_rawDescGZIP(), []int{1} } -var File_temporal_server_api_enums_v1_workflow_proto protoreflect.FileDescriptor +type PausedWorkflowEntityType int32 + +const ( + PAUSED_WORKFLOW_ENTITY_TYPE_UNSPECIFIED PausedWorkflowEntityType = 0 + PAUSED_WORKFLOW_ENTITY_TYPE_ACTIVITY PausedWorkflowEntityType = 1 + PAUSED_WORKFLOW_ENTITY_TYPE_WORKFLOW PausedWorkflowEntityType = 2 +) + +// Enum value maps for PausedWorkflowEntityType. +var ( + PausedWorkflowEntityType_name = map[int32]string{ + 0: "PAUSED_WORKFLOW_ENTITY_TYPE_UNSPECIFIED", + 1: "PAUSED_WORKFLOW_ENTITY_TYPE_ACTIVITY", + 2: "PAUSED_WORKFLOW_ENTITY_TYPE_WORKFLOW", + } + PausedWorkflowEntityType_value = map[string]int32{ + "PAUSED_WORKFLOW_ENTITY_TYPE_UNSPECIFIED": 0, + "PAUSED_WORKFLOW_ENTITY_TYPE_ACTIVITY": 1, + "PAUSED_WORKFLOW_ENTITY_TYPE_WORKFLOW": 2, + } +) + +func (x PausedWorkflowEntityType) Enum() *PausedWorkflowEntityType { + p := new(PausedWorkflowEntityType) + *p = x + return p +} + +func (x PausedWorkflowEntityType) String() string { + switch x { + case PAUSED_WORKFLOW_ENTITY_TYPE_UNSPECIFIED: + return "Unspecified" + case PAUSED_WORKFLOW_ENTITY_TYPE_ACTIVITY: + return "Activity" + case PAUSED_WORKFLOW_ENTITY_TYPE_WORKFLOW: + return "Workflow" + default: + return strconv.Itoa(int(x)) + } + +} + +func (PausedWorkflowEntityType) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_api_enums_v1_workflow_proto_enumTypes[2].Descriptor() +} -var file_temporal_server_api_enums_v1_workflow_proto_rawDesc = []byte{ - 0x0a, 0x2b, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2a, 0xa6, 0x02, 0x0a, 0x16, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x24, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, - 0x4f, 0x57, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x24, 0x0a, 0x20, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x45, 0x58, 0x45, - 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x52, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x24, 0x0a, 0x20, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, - 0x4f, 0x57, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, - 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, - 0x45, 0x44, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, - 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, - 0x5f, 0x5a, 0x4f, 0x4d, 0x42, 0x49, 0x45, 0x10, 0x04, 0x12, 0x21, 0x0a, 0x1d, 0x57, 0x4f, 0x52, - 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x56, 0x4f, 0x49, 0x44, 0x10, 0x05, 0x12, 0x26, 0x0a, 0x22, - 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x43, 0x4f, 0x52, 0x52, 0x55, 0x50, 0x54, - 0x45, 0x44, 0x10, 0x06, 0x2a, 0xa4, 0x01, 0x0a, 0x13, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x21, - 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x4f, 0x46, 0x46, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, - 0x42, 0x41, 0x43, 0x4b, 0x4f, 0x46, 0x46, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x54, - 0x52, 0x59, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, - 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x4f, 0x46, 0x46, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, - 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x25, 0x0a, 0x21, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, - 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x4f, 0x46, 0x46, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x45, - 0x4c, 0x41, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x10, 0x03, 0x42, 0x2a, 0x5a, 0x28, 0x67, - 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, - 0x31, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (PausedWorkflowEntityType) Type() protoreflect.EnumType { + return &file_temporal_server_api_enums_v1_workflow_proto_enumTypes[2] } +func (x PausedWorkflowEntityType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PausedWorkflowEntityType.Descriptor instead. +func (PausedWorkflowEntityType) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_api_enums_v1_workflow_proto_rawDescGZIP(), []int{2} +} + +var File_temporal_server_api_enums_v1_workflow_proto protoreflect.FileDescriptor + +const file_temporal_server_api_enums_v1_workflow_proto_rawDesc = "" + + "\n" + + "+temporal/server/api/enums/v1/workflow.proto\x12\x1ctemporal.server.api.enums.v1*\xa6\x02\n" + + "\x16WorkflowExecutionState\x12(\n" + + "$WORKFLOW_EXECUTION_STATE_UNSPECIFIED\x10\x00\x12$\n" + + " WORKFLOW_EXECUTION_STATE_CREATED\x10\x01\x12$\n" + + " WORKFLOW_EXECUTION_STATE_RUNNING\x10\x02\x12&\n" + + "\"WORKFLOW_EXECUTION_STATE_COMPLETED\x10\x03\x12#\n" + + "\x1fWORKFLOW_EXECUTION_STATE_ZOMBIE\x10\x04\x12!\n" + + "\x1dWORKFLOW_EXECUTION_STATE_VOID\x10\x05\x12&\n" + + "\"WORKFLOW_EXECUTION_STATE_CORRUPTED\x10\x06*\xa4\x01\n" + + "\x13WorkflowBackoffType\x12%\n" + + "!WORKFLOW_BACKOFF_TYPE_UNSPECIFIED\x10\x00\x12\x1f\n" + + "\x1bWORKFLOW_BACKOFF_TYPE_RETRY\x10\x01\x12\x1e\n" + + "\x1aWORKFLOW_BACKOFF_TYPE_CRON\x10\x02\x12%\n" + + "!WORKFLOW_BACKOFF_TYPE_DELAY_START\x10\x03*\x9b\x01\n" + + "\x18PausedWorkflowEntityType\x12+\n" + + "'PAUSED_WORKFLOW_ENTITY_TYPE_UNSPECIFIED\x10\x00\x12(\n" + + "$PAUSED_WORKFLOW_ENTITY_TYPE_ACTIVITY\x10\x01\x12(\n" + + "$PAUSED_WORKFLOW_ENTITY_TYPE_WORKFLOW\x10\x02B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" + var ( file_temporal_server_api_enums_v1_workflow_proto_rawDescOnce sync.Once - file_temporal_server_api_enums_v1_workflow_proto_rawDescData = file_temporal_server_api_enums_v1_workflow_proto_rawDesc + file_temporal_server_api_enums_v1_workflow_proto_rawDescData []byte ) func file_temporal_server_api_enums_v1_workflow_proto_rawDescGZIP() []byte { file_temporal_server_api_enums_v1_workflow_proto_rawDescOnce.Do(func() { - file_temporal_server_api_enums_v1_workflow_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_enums_v1_workflow_proto_rawDescData) + file_temporal_server_api_enums_v1_workflow_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_workflow_proto_rawDesc), len(file_temporal_server_api_enums_v1_workflow_proto_rawDesc))) }) return file_temporal_server_api_enums_v1_workflow_proto_rawDescData } -var file_temporal_server_api_enums_v1_workflow_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_temporal_server_api_enums_v1_workflow_proto_goTypes = []interface{}{ - (WorkflowExecutionState)(0), // 0: temporal.server.api.enums.v1.WorkflowExecutionState - (WorkflowBackoffType)(0), // 1: temporal.server.api.enums.v1.WorkflowBackoffType +var file_temporal_server_api_enums_v1_workflow_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_temporal_server_api_enums_v1_workflow_proto_goTypes = []any{ + (WorkflowExecutionState)(0), // 0: temporal.server.api.enums.v1.WorkflowExecutionState + (WorkflowBackoffType)(0), // 1: temporal.server.api.enums.v1.WorkflowBackoffType + (PausedWorkflowEntityType)(0), // 2: temporal.server.api.enums.v1.PausedWorkflowEntityType } var file_temporal_server_api_enums_v1_workflow_proto_depIdxs = []int32{ 0, // [0:0] is the sub-list for method output_type @@ -263,8 +283,8 @@ func file_temporal_server_api_enums_v1_workflow_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_enums_v1_workflow_proto_rawDesc, - NumEnums: 2, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_workflow_proto_rawDesc), len(file_temporal_server_api_enums_v1_workflow_proto_rawDesc)), + NumEnums: 3, NumMessages: 0, NumExtensions: 0, NumServices: 0, @@ -274,7 +294,6 @@ func file_temporal_server_api_enums_v1_workflow_proto_init() { EnumInfos: file_temporal_server_api_enums_v1_workflow_proto_enumTypes, }.Build() File_temporal_server_api_enums_v1_workflow_proto = out.File - file_temporal_server_api_enums_v1_workflow_proto_rawDesc = nil file_temporal_server_api_enums_v1_workflow_proto_goTypes = nil file_temporal_server_api_enums_v1_workflow_proto_depIdxs = nil } diff --git a/api/enums/v1/workflow_task_type.go-helpers.pb.go b/api/enums/v1/workflow_task_type.go-helpers.pb.go index 1ecb5182e93..3117890fdf1 100644 --- a/api/enums/v1/workflow_task_type.go-helpers.pb.go +++ b/api/enums/v1/workflow_task_type.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package enums diff --git a/api/enums/v1/workflow_task_type.pb.go b/api/enums/v1/workflow_task_type.pb.go index b178668a741..e65d6945585 100644 --- a/api/enums/v1/workflow_task_type.pb.go +++ b/api/enums/v1/workflow_task_type.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2021 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,6 +10,7 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -111,42 +90,29 @@ func (WorkflowTaskType) EnumDescriptor() ([]byte, []int) { var File_temporal_server_api_enums_v1_workflow_task_type_proto protoreflect.FileDescriptor -var file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc = []byte{ - 0x0a, 0x35, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2a, 0x9b, 0x01, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x22, 0x0a, 0x1e, 0x57, 0x4f, - 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, 0x59, 0x50, 0x45, - 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1d, - 0x0a, 0x19, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x20, 0x0a, - 0x1c, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x54, 0x41, 0x53, 0x4b, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x54, 0x52, 0x41, 0x4e, 0x53, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x12, - 0x22, 0x0a, 0x1e, 0x57, 0x4f, 0x52, 0x4b, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x54, 0x41, 0x53, 0x4b, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x50, 0x45, 0x43, 0x55, 0x4c, 0x41, 0x54, 0x49, 0x56, - 0x45, 0x10, 0x03, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc = "" + + "\n" + + "5temporal/server/api/enums/v1/workflow_task_type.proto\x12\x1ctemporal.server.api.enums.v1*\x9b\x01\n" + + "\x10WorkflowTaskType\x12\"\n" + + "\x1eWORKFLOW_TASK_TYPE_UNSPECIFIED\x10\x00\x12\x1d\n" + + "\x19WORKFLOW_TASK_TYPE_NORMAL\x10\x01\x12 \n" + + "\x1cWORKFLOW_TASK_TYPE_TRANSIENT\x10\x02\x12\"\n" + + "\x1eWORKFLOW_TASK_TYPE_SPECULATIVE\x10\x03B*Z(go.temporal.io/server/api/enums/v1;enumsb\x06proto3" var ( file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescOnce sync.Once - file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescData = file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc + file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescData []byte ) func file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescGZIP() []byte { file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescOnce.Do(func() { - file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescData) + file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc), len(file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc))) }) return file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDescData } var file_temporal_server_api_enums_v1_workflow_task_type_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_temporal_server_api_enums_v1_workflow_task_type_proto_goTypes = []interface{}{ +var file_temporal_server_api_enums_v1_workflow_task_type_proto_goTypes = []any{ (WorkflowTaskType)(0), // 0: temporal.server.api.enums.v1.WorkflowTaskType } var file_temporal_server_api_enums_v1_workflow_task_type_proto_depIdxs = []int32{ @@ -166,7 +132,7 @@ func file_temporal_server_api_enums_v1_workflow_task_type_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc), len(file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc)), NumEnums: 1, NumMessages: 0, NumExtensions: 0, @@ -177,7 +143,6 @@ func file_temporal_server_api_enums_v1_workflow_task_type_proto_init() { EnumInfos: file_temporal_server_api_enums_v1_workflow_task_type_proto_enumTypes, }.Build() File_temporal_server_api_enums_v1_workflow_task_type_proto = out.File - file_temporal_server_api_enums_v1_workflow_task_type_proto_rawDesc = nil file_temporal_server_api_enums_v1_workflow_task_type_proto_goTypes = nil file_temporal_server_api_enums_v1_workflow_task_type_proto_depIdxs = nil } diff --git a/api/errordetails/v1/message.go-helpers.pb.go b/api/errordetails/v1/message.go-helpers.pb.go index 37f99b9bb96..525bbe41f41 100644 --- a/api/errordetails/v1/message.go-helpers.pb.go +++ b/api/errordetails/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package errordetails @@ -177,6 +153,43 @@ func (this *RetryReplicationFailure) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type SyncStateFailure to the protobuf v3 wire format +func (val *SyncStateFailure) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncStateFailure from the protobuf v3 wire format +func (val *SyncStateFailure) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncStateFailure) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncStateFailure values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncStateFailure) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncStateFailure + switch t := that.(type) { + case *SyncStateFailure: + that1 = t + case SyncStateFailure: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type StickyWorkerUnavailableFailure to the protobuf v3 wire format func (val *StickyWorkerUnavailableFailure) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -213,3 +226,151 @@ func (this *StickyWorkerUnavailableFailure) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type ObsoleteDispatchBuildIdFailure to the protobuf v3 wire format +func (val *ObsoleteDispatchBuildIdFailure) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ObsoleteDispatchBuildIdFailure from the protobuf v3 wire format +func (val *ObsoleteDispatchBuildIdFailure) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ObsoleteDispatchBuildIdFailure) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ObsoleteDispatchBuildIdFailure values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ObsoleteDispatchBuildIdFailure) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ObsoleteDispatchBuildIdFailure + switch t := that.(type) { + case *ObsoleteDispatchBuildIdFailure: + that1 = t + case ObsoleteDispatchBuildIdFailure: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ObsoleteMatchingTaskFailure to the protobuf v3 wire format +func (val *ObsoleteMatchingTaskFailure) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ObsoleteMatchingTaskFailure from the protobuf v3 wire format +func (val *ObsoleteMatchingTaskFailure) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ObsoleteMatchingTaskFailure) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ObsoleteMatchingTaskFailure values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ObsoleteMatchingTaskFailure) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ObsoleteMatchingTaskFailure + switch t := that.(type) { + case *ObsoleteMatchingTaskFailure: + that1 = t + case ObsoleteMatchingTaskFailure: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityStartDuringTransitionFailure to the protobuf v3 wire format +func (val *ActivityStartDuringTransitionFailure) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityStartDuringTransitionFailure from the protobuf v3 wire format +func (val *ActivityStartDuringTransitionFailure) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityStartDuringTransitionFailure) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityStartDuringTransitionFailure values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityStartDuringTransitionFailure) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityStartDuringTransitionFailure + switch t := that.(type) { + case *ActivityStartDuringTransitionFailure: + that1 = t + case ActivityStartDuringTransitionFailure: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StalePartitionCountsFailure to the protobuf v3 wire format +func (val *StalePartitionCountsFailure) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StalePartitionCountsFailure from the protobuf v3 wire format +func (val *StalePartitionCountsFailure) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StalePartitionCountsFailure) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StalePartitionCountsFailure values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StalePartitionCountsFailure) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StalePartitionCountsFailure + switch t := that.(type) { + case *StalePartitionCountsFailure: + that1 = t + case StalePartitionCountsFailure: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/errordetails/v1/message.pb.go b/api/errordetails/v1/message.pb.go index 752eaa3ccd8..2a70d12de25 100644 --- a/api/errordetails/v1/message.pb.go +++ b/api/errordetails/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -33,7 +11,10 @@ package errordetails import ( reflect "reflect" sync "sync" + unsafe "unsafe" + v11 "go.temporal.io/server/api/history/v1" + v1 "go.temporal.io/server/api/persistence/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -46,18 +27,16 @@ const ( ) type TaskAlreadyStartedFailure struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TaskAlreadyStartedFailure) Reset() { *x = TaskAlreadyStartedFailure{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskAlreadyStartedFailure) String() string { @@ -68,7 +47,7 @@ func (*TaskAlreadyStartedFailure) ProtoMessage() {} func (x *TaskAlreadyStartedFailure) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -84,21 +63,20 @@ func (*TaskAlreadyStartedFailure) Descriptor() ([]byte, []int) { } type CurrentBranchChangedFailure struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CurrentBranchToken []byte `protobuf:"bytes,1,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` - RequestBranchToken []byte `protobuf:"bytes,2,opt,name=request_branch_token,json=requestBranchToken,proto3" json:"request_branch_token,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + CurrentBranchToken []byte `protobuf:"bytes,1,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` + RequestBranchToken []byte `protobuf:"bytes,2,opt,name=request_branch_token,json=requestBranchToken,proto3" json:"request_branch_token,omitempty"` + CurrentVersionedTransition *v1.VersionedTransition `protobuf:"bytes,3,opt,name=current_versioned_transition,json=currentVersionedTransition,proto3" json:"current_versioned_transition,omitempty"` + RequestVersionedTransition *v1.VersionedTransition `protobuf:"bytes,4,opt,name=request_versioned_transition,json=requestVersionedTransition,proto3" json:"request_versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CurrentBranchChangedFailure) Reset() { *x = CurrentBranchChangedFailure{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CurrentBranchChangedFailure) String() string { @@ -109,7 +87,7 @@ func (*CurrentBranchChangedFailure) ProtoMessage() {} func (x *CurrentBranchChangedFailure) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -138,22 +116,33 @@ func (x *CurrentBranchChangedFailure) GetRequestBranchToken() []byte { return nil } +func (x *CurrentBranchChangedFailure) GetCurrentVersionedTransition() *v1.VersionedTransition { + if x != nil { + return x.CurrentVersionedTransition + } + return nil +} + +func (x *CurrentBranchChangedFailure) GetRequestVersionedTransition() *v1.VersionedTransition { + if x != nil { + return x.RequestVersionedTransition + } + return nil +} + type ShardOwnershipLostFailure struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + OwnerHost string `protobuf:"bytes,1,opt,name=owner_host,json=ownerHost,proto3" json:"owner_host,omitempty"` + CurrentHost string `protobuf:"bytes,2,opt,name=current_host,json=currentHost,proto3" json:"current_host,omitempty"` unknownFields protoimpl.UnknownFields - - OwnerHost string `protobuf:"bytes,1,opt,name=owner_host,json=ownerHost,proto3" json:"owner_host,omitempty"` - CurrentHost string `protobuf:"bytes,2,opt,name=current_host,json=currentHost,proto3" json:"current_host,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ShardOwnershipLostFailure) Reset() { *x = ShardOwnershipLostFailure{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ShardOwnershipLostFailure) String() string { @@ -164,7 +153,7 @@ func (*ShardOwnershipLostFailure) ProtoMessage() {} func (x *ShardOwnershipLostFailure) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -194,26 +183,23 @@ func (x *ShardOwnershipLostFailure) GetCurrentHost() string { } type RetryReplicationFailure struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - StartEventId int64 `protobuf:"varint,4,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` - StartEventVersion int64 `protobuf:"varint,5,opt,name=start_event_version,json=startEventVersion,proto3" json:"start_event_version,omitempty"` - EndEventId int64 `protobuf:"varint,6,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` - EndEventVersion int64 `protobuf:"varint,7,opt,name=end_event_version,json=endEventVersion,proto3" json:"end_event_version,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + StartEventId int64 `protobuf:"varint,4,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` + StartEventVersion int64 `protobuf:"varint,5,opt,name=start_event_version,json=startEventVersion,proto3" json:"start_event_version,omitempty"` + EndEventId int64 `protobuf:"varint,6,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` + EndEventVersion int64 `protobuf:"varint,7,opt,name=end_event_version,json=endEventVersion,proto3" json:"end_event_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RetryReplicationFailure) Reset() { *x = RetryReplicationFailure{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RetryReplicationFailure) String() string { @@ -224,7 +210,7 @@ func (*RetryReplicationFailure) ProtoMessage() {} func (x *RetryReplicationFailure) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -288,19 +274,102 @@ func (x *RetryReplicationFailure) GetEndEventVersion() int64 { return 0 } -type StickyWorkerUnavailableFailure struct { - state protoimpl.MessageState +type SyncStateFailure struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + VersionedTransition *v1.VersionedTransition `protobuf:"bytes,4,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + VersionHistories *v11.VersionHistories `protobuf:"bytes,5,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,6,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +} + +func (x *SyncStateFailure) Reset() { + *x = SyncStateFailure{} + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncStateFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncStateFailure) ProtoMessage() {} + +func (x *SyncStateFailure) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncStateFailure.ProtoReflect.Descriptor instead. +func (*SyncStateFailure) Descriptor() ([]byte, []int) { + return file_temporal_server_api_errordetails_v1_message_proto_rawDescGZIP(), []int{4} +} + +func (x *SyncStateFailure) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *SyncStateFailure) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *SyncStateFailure) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *SyncStateFailure) GetVersionedTransition() *v1.VersionedTransition { + if x != nil { + return x.VersionedTransition + } + return nil +} + +func (x *SyncStateFailure) GetVersionHistories() *v11.VersionHistories { + if x != nil { + return x.VersionHistories + } + return nil +} + +func (x *SyncStateFailure) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +type StickyWorkerUnavailableFailure struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StickyWorkerUnavailableFailure) Reset() { *x = StickyWorkerUnavailableFailure{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StickyWorkerUnavailableFailure) String() string { @@ -310,8 +379,8 @@ func (x *StickyWorkerUnavailableFailure) String() string { func (*StickyWorkerUnavailableFailure) ProtoMessage() {} func (x *StickyWorkerUnavailableFailure) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -323,86 +392,236 @@ func (x *StickyWorkerUnavailableFailure) ProtoReflect() protoreflect.Message { // Deprecated: Use StickyWorkerUnavailableFailure.ProtoReflect.Descriptor instead. func (*StickyWorkerUnavailableFailure) Descriptor() ([]byte, []int) { - return file_temporal_server_api_errordetails_v1_message_proto_rawDescGZIP(), []int{4} + return file_temporal_server_api_errordetails_v1_message_proto_rawDescGZIP(), []int{5} } -var File_temporal_server_api_errordetails_v1_message_proto protoreflect.FileDescriptor +// Deprecated. Only used in WV2. [cleanup-old-wv] +type ObsoleteDispatchBuildIdFailure struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObsoleteDispatchBuildIdFailure) Reset() { + *x = ObsoleteDispatchBuildIdFailure{} + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ObsoleteDispatchBuildIdFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObsoleteDispatchBuildIdFailure) ProtoMessage() {} + +func (x *ObsoleteDispatchBuildIdFailure) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObsoleteDispatchBuildIdFailure.ProtoReflect.Descriptor instead. +func (*ObsoleteDispatchBuildIdFailure) Descriptor() ([]byte, []int) { + return file_temporal_server_api_errordetails_v1_message_proto_rawDescGZIP(), []int{6} +} + +// Returned when History determines a task that Matching wants to dispatch is no longer valid. +type ObsoleteMatchingTaskFailure struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ObsoleteMatchingTaskFailure) Reset() { + *x = ObsoleteMatchingTaskFailure{} + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} -var file_temporal_server_api_errordetails_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x31, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x23, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x2e, 0x76, 0x31, 0x22, 0x1b, 0x0a, 0x19, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x6c, 0x72, 0x65, 0x61, 0x64, - 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x22, 0x89, - 0x01, 0x0a, 0x1b, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x34, 0x0a, 0x14, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x34, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x22, 0x65, 0x0a, 0x19, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x73, - 0x68, 0x69, 0x70, 0x4c, 0x6f, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x21, 0x0a, - 0x0a, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x09, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, - 0x0a, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x42, - 0x02, 0x68, 0x00, 0x22, 0xb4, 0x02, 0x0a, 0x17, 0x52, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x25, 0x0a, - 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, - 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x24, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x65, 0x6e, - 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x65, - 0x6e, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x20, 0x0a, 0x1e, 0x53, 0x74, 0x69, 0x63, - 0x6b, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, - 0x6c, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x42, 0x38, 0x5a, 0x36, 0x67, 0x6f, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (x *ObsoleteMatchingTaskFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObsoleteMatchingTaskFailure) ProtoMessage() {} + +func (x *ObsoleteMatchingTaskFailure) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObsoleteMatchingTaskFailure.ProtoReflect.Descriptor instead. +func (*ObsoleteMatchingTaskFailure) Descriptor() ([]byte, []int) { + return file_temporal_server_api_errordetails_v1_message_proto_rawDescGZIP(), []int{7} +} + +// Returned when an activity start is rejected by History because the workflow is in a transitioning +// between worker deployments. +type ActivityStartDuringTransitionFailure struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } +func (x *ActivityStartDuringTransitionFailure) Reset() { + *x = ActivityStartDuringTransitionFailure{} + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityStartDuringTransitionFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityStartDuringTransitionFailure) ProtoMessage() {} + +func (x *ActivityStartDuringTransitionFailure) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityStartDuringTransitionFailure.ProtoReflect.Descriptor instead. +func (*ActivityStartDuringTransitionFailure) Descriptor() ([]byte, []int) { + return file_temporal_server_api_errordetails_v1_message_proto_rawDescGZIP(), []int{8} +} + +// StalePartitionCountsFailure is returned when the clients view of valid partition counts is +// too far from the current value and the server rejects the call. It is an "Aborted" error. +// The current values will be returned in a grpc trailer. +type StalePartitionCountsFailure struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StalePartitionCountsFailure) Reset() { + *x = StalePartitionCountsFailure{} + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StalePartitionCountsFailure) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StalePartitionCountsFailure) ProtoMessage() {} + +func (x *StalePartitionCountsFailure) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_errordetails_v1_message_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StalePartitionCountsFailure.ProtoReflect.Descriptor instead. +func (*StalePartitionCountsFailure) Descriptor() ([]byte, []int) { + return file_temporal_server_api_errordetails_v1_message_proto_rawDescGZIP(), []int{9} +} + +var File_temporal_server_api_errordetails_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_api_errordetails_v1_message_proto_rawDesc = "" + + "\n" + + "1temporal/server/api/errordetails/v1/message.proto\x12#temporal.server.api.errordetails.v1\x1a,temporal/server/api/history/v1/message.proto\x1a,temporal/server/api/persistence/v1/hsm.proto\"\x1b\n" + + "\x19TaskAlreadyStartedFailure\"\xf7\x02\n" + + "\x1bCurrentBranchChangedFailure\x120\n" + + "\x14current_branch_token\x18\x01 \x01(\fR\x12currentBranchToken\x120\n" + + "\x14request_branch_token\x18\x02 \x01(\fR\x12requestBranchToken\x12y\n" + + "\x1ccurrent_versioned_transition\x18\x03 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1acurrentVersionedTransition\x12y\n" + + "\x1crequest_versioned_transition\x18\x04 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1arequestVersionedTransition\"]\n" + + "\x19ShardOwnershipLostFailure\x12\x1d\n" + + "\n" + + "owner_host\x18\x01 \x01(\tR\townerHost\x12!\n" + + "\fcurrent_host\x18\x02 \x01(\tR\vcurrentHost\"\x98\x02\n" + + "\x17RetryReplicationFailure\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12$\n" + + "\x0estart_event_id\x18\x04 \x01(\x03R\fstartEventId\x12.\n" + + "\x13start_event_version\x18\x05 \x01(\x03R\x11startEventVersion\x12 \n" + + "\fend_event_id\x18\x06 \x01(\x03R\n" + + "endEventId\x12*\n" + + "\x11end_event_version\x18\a \x01(\x03R\x0fendEventVersion\"\xdb\x02\n" + + "\x10SyncStateFailure\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12j\n" + + "\x14versioned_transition\x18\x04 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransition\x12]\n" + + "\x11version_histories\x18\x05 \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistories\x12!\n" + + "\farchetype_id\x18\x06 \x01(\rR\varchetypeId\" \n" + + "\x1eStickyWorkerUnavailableFailure\" \n" + + "\x1eObsoleteDispatchBuildIdFailure\"\x1d\n" + + "\x1bObsoleteMatchingTaskFailure\"&\n" + + "$ActivityStartDuringTransitionFailure\"\x1d\n" + + "\x1bStalePartitionCountsFailureB8Z6go.temporal.io/server/api/errordetails/v1;errordetailsb\x06proto3" + var ( file_temporal_server_api_errordetails_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_errordetails_v1_message_proto_rawDescData = file_temporal_server_api_errordetails_v1_message_proto_rawDesc + file_temporal_server_api_errordetails_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_errordetails_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_errordetails_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_errordetails_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_errordetails_v1_message_proto_rawDescData) + file_temporal_server_api_errordetails_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_errordetails_v1_message_proto_rawDesc), len(file_temporal_server_api_errordetails_v1_message_proto_rawDesc))) }) return file_temporal_server_api_errordetails_v1_message_proto_rawDescData } -var file_temporal_server_api_errordetails_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_temporal_server_api_errordetails_v1_message_proto_goTypes = []interface{}{ - (*TaskAlreadyStartedFailure)(nil), // 0: temporal.server.api.errordetails.v1.TaskAlreadyStartedFailure - (*CurrentBranchChangedFailure)(nil), // 1: temporal.server.api.errordetails.v1.CurrentBranchChangedFailure - (*ShardOwnershipLostFailure)(nil), // 2: temporal.server.api.errordetails.v1.ShardOwnershipLostFailure - (*RetryReplicationFailure)(nil), // 3: temporal.server.api.errordetails.v1.RetryReplicationFailure - (*StickyWorkerUnavailableFailure)(nil), // 4: temporal.server.api.errordetails.v1.StickyWorkerUnavailableFailure +var file_temporal_server_api_errordetails_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_temporal_server_api_errordetails_v1_message_proto_goTypes = []any{ + (*TaskAlreadyStartedFailure)(nil), // 0: temporal.server.api.errordetails.v1.TaskAlreadyStartedFailure + (*CurrentBranchChangedFailure)(nil), // 1: temporal.server.api.errordetails.v1.CurrentBranchChangedFailure + (*ShardOwnershipLostFailure)(nil), // 2: temporal.server.api.errordetails.v1.ShardOwnershipLostFailure + (*RetryReplicationFailure)(nil), // 3: temporal.server.api.errordetails.v1.RetryReplicationFailure + (*SyncStateFailure)(nil), // 4: temporal.server.api.errordetails.v1.SyncStateFailure + (*StickyWorkerUnavailableFailure)(nil), // 5: temporal.server.api.errordetails.v1.StickyWorkerUnavailableFailure + (*ObsoleteDispatchBuildIdFailure)(nil), // 6: temporal.server.api.errordetails.v1.ObsoleteDispatchBuildIdFailure + (*ObsoleteMatchingTaskFailure)(nil), // 7: temporal.server.api.errordetails.v1.ObsoleteMatchingTaskFailure + (*ActivityStartDuringTransitionFailure)(nil), // 8: temporal.server.api.errordetails.v1.ActivityStartDuringTransitionFailure + (*StalePartitionCountsFailure)(nil), // 9: temporal.server.api.errordetails.v1.StalePartitionCountsFailure + (*v1.VersionedTransition)(nil), // 10: temporal.server.api.persistence.v1.VersionedTransition + (*v11.VersionHistories)(nil), // 11: temporal.server.api.history.v1.VersionHistories } var file_temporal_server_api_errordetails_v1_message_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 10, // 0: temporal.server.api.errordetails.v1.CurrentBranchChangedFailure.current_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 10, // 1: temporal.server.api.errordetails.v1.CurrentBranchChangedFailure.request_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 10, // 2: temporal.server.api.errordetails.v1.SyncStateFailure.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 11, // 3: temporal.server.api.errordetails.v1.SyncStateFailure.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_temporal_server_api_errordetails_v1_message_proto_init() } @@ -410,75 +629,13 @@ func file_temporal_server_api_errordetails_v1_message_proto_init() { if File_temporal_server_api_errordetails_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_errordetails_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskAlreadyStartedFailure); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_errordetails_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CurrentBranchChangedFailure); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_errordetails_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardOwnershipLostFailure); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_errordetails_v1_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryReplicationFailure); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_errordetails_v1_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StickyWorkerUnavailableFailure); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_errordetails_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_errordetails_v1_message_proto_rawDesc), len(file_temporal_server_api_errordetails_v1_message_proto_rawDesc)), NumEnums: 0, - NumMessages: 5, + NumMessages: 10, NumExtensions: 0, NumServices: 0, }, @@ -487,7 +644,6 @@ func file_temporal_server_api_errordetails_v1_message_proto_init() { MessageInfos: file_temporal_server_api_errordetails_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_errordetails_v1_message_proto = out.File - file_temporal_server_api_errordetails_v1_message_proto_rawDesc = nil file_temporal_server_api_errordetails_v1_message_proto_goTypes = nil file_temporal_server_api_errordetails_v1_message_proto_depIdxs = nil } diff --git a/api/health/v1/message.go-helpers.pb.go b/api/health/v1/message.go-helpers.pb.go new file mode 100644 index 00000000000..f35c7bec0b5 --- /dev/null +++ b/api/health/v1/message.go-helpers.pb.go @@ -0,0 +1,117 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package health + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type HealthCheck to the protobuf v3 wire format +func (val *HealthCheck) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type HealthCheck from the protobuf v3 wire format +func (val *HealthCheck) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *HealthCheck) Size() int { + return proto.Size(val) +} + +// Equal returns whether two HealthCheck values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *HealthCheck) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *HealthCheck + switch t := that.(type) { + case *HealthCheck: + that1 = t + case HealthCheck: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type HostHealthDetail to the protobuf v3 wire format +func (val *HostHealthDetail) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type HostHealthDetail from the protobuf v3 wire format +func (val *HostHealthDetail) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *HostHealthDetail) Size() int { + return proto.Size(val) +} + +// Equal returns whether two HostHealthDetail values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *HostHealthDetail) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *HostHealthDetail + switch t := that.(type) { + case *HostHealthDetail: + that1 = t + case HostHealthDetail: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ServiceHealthDetail to the protobuf v3 wire format +func (val *ServiceHealthDetail) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ServiceHealthDetail from the protobuf v3 wire format +func (val *ServiceHealthDetail) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ServiceHealthDetail) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ServiceHealthDetail values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ServiceHealthDetail) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ServiceHealthDetail + switch t := that.(type) { + case *ServiceHealthDetail: + that1 = t + case ServiceHealthDetail: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/health/v1/message.pb.go b/api/health/v1/message.pb.go new file mode 100644 index 00000000000..72e1ae78ef4 --- /dev/null +++ b/api/health/v1/message.pb.go @@ -0,0 +1,324 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/health/v1/message.proto + +package health + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/server/api/enums/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Individual health check result. +// The check_type field uses human-readable strings rather than an enum for extensibility. +type HealthCheck struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Machine-readable check type identifier for programmatic matching. + // Known values defined as Go constants in api/health/v1/types.go: + // + // "grpc_health", "rpc_latency", "rpc_error_ratio", + // "persistence_latency", "persistence_error_ratio", + // "host_availability", "task_queue_backlog" + // + // We use strings instead of an enum for flexibility: new check types can be + // added without proto changes. See HealthCheck.message for human-readable details. + CheckType string `protobuf:"bytes,1,opt,name=check_type,json=checkType,proto3" json:"check_type,omitempty"` + State v1.HealthState `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.server.api.enums.v1.HealthState" json:"state,omitempty"` + // Actual observed value (0 if N/A). + Value float64 `protobuf:"fixed64,3,opt,name=value,proto3" json:"value,omitempty"` + // Threshold that was exceeded (0 if N/A). + Threshold float64 `protobuf:"fixed64,4,opt,name=threshold,proto3" json:"threshold,omitempty"` + // Human-readable detail describing what happened, e.g. + // "RPC latency 850.00ms exceeded 500.00ms threshold". + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthCheck) Reset() { + *x = HealthCheck{} + mi := &file_temporal_server_api_health_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthCheck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthCheck) ProtoMessage() {} + +func (x *HealthCheck) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_health_v1_message_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthCheck.ProtoReflect.Descriptor instead. +func (*HealthCheck) Descriptor() ([]byte, []int) { + return file_temporal_server_api_health_v1_message_proto_rawDescGZIP(), []int{0} +} + +func (x *HealthCheck) GetCheckType() string { + if x != nil { + return x.CheckType + } + return "" +} + +func (x *HealthCheck) GetState() v1.HealthState { + if x != nil { + return x.State + } + return v1.HealthState(0) +} + +func (x *HealthCheck) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *HealthCheck) GetThreshold() float64 { + if x != nil { + return x.Threshold + } + return 0 +} + +func (x *HealthCheck) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +// Health details for a single host. +type HostHealthDetail struct { + state protoimpl.MessageState `protogen:"open.v1"` + Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + State v1.HealthState `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.server.api.enums.v1.HealthState" json:"state,omitempty"` + Checks []*HealthCheck `protobuf:"bytes,3,rep,name=checks,proto3" json:"checks,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HostHealthDetail) Reset() { + *x = HostHealthDetail{} + mi := &file_temporal_server_api_health_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HostHealthDetail) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostHealthDetail) ProtoMessage() {} + +func (x *HostHealthDetail) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_health_v1_message_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostHealthDetail.ProtoReflect.Descriptor instead. +func (*HostHealthDetail) Descriptor() ([]byte, []int) { + return file_temporal_server_api_health_v1_message_proto_rawDescGZIP(), []int{1} +} + +func (x *HostHealthDetail) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *HostHealthDetail) GetState() v1.HealthState { + if x != nil { + return x.State + } + return v1.HealthState(0) +} + +func (x *HostHealthDetail) GetChecks() []*HealthCheck { + if x != nil { + return x.Checks + } + return nil +} + +// Health details for a service (history, frontend, matching). +type ServiceHealthDetail struct { + state protoimpl.MessageState `protogen:"open.v1"` + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + State v1.HealthState `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.server.api.enums.v1.HealthState" json:"state,omitempty"` + Hosts []*HostHealthDetail `protobuf:"bytes,3,rep,name=hosts,proto3" json:"hosts,omitempty"` + // Service-level diagnostic message (e.g. "no available hosts", "resolver error"). + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ServiceHealthDetail) Reset() { + *x = ServiceHealthDetail{} + mi := &file_temporal_server_api_health_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ServiceHealthDetail) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ServiceHealthDetail) ProtoMessage() {} + +func (x *ServiceHealthDetail) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_health_v1_message_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ServiceHealthDetail.ProtoReflect.Descriptor instead. +func (*ServiceHealthDetail) Descriptor() ([]byte, []int) { + return file_temporal_server_api_health_v1_message_proto_rawDescGZIP(), []int{2} +} + +func (x *ServiceHealthDetail) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *ServiceHealthDetail) GetState() v1.HealthState { + if x != nil { + return x.State + } + return v1.HealthState(0) +} + +func (x *ServiceHealthDetail) GetHosts() []*HostHealthDetail { + if x != nil { + return x.Hosts + } + return nil +} + +func (x *ServiceHealthDetail) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +var File_temporal_server_api_health_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_api_health_v1_message_proto_rawDesc = "" + + "\n" + + "+temporal/server/api/health/v1/message.proto\x12\x1dtemporal.server.api.health.v1\x1a*temporal/server/api/enums/v1/cluster.proto\"\xbb\x01\n" + + "\vHealthCheck\x12\x1d\n" + + "\n" + + "check_type\x18\x01 \x01(\tR\tcheckType\x12?\n" + + "\x05state\x18\x02 \x01(\x0e2).temporal.server.api.enums.v1.HealthStateR\x05state\x12\x14\n" + + "\x05value\x18\x03 \x01(\x01R\x05value\x12\x1c\n" + + "\tthreshold\x18\x04 \x01(\x01R\tthreshold\x12\x18\n" + + "\amessage\x18\x05 \x01(\tR\amessage\"\xb1\x01\n" + + "\x10HostHealthDetail\x12\x18\n" + + "\aaddress\x18\x01 \x01(\tR\aaddress\x12?\n" + + "\x05state\x18\x02 \x01(\x0e2).temporal.server.api.enums.v1.HealthStateR\x05state\x12B\n" + + "\x06checks\x18\x03 \x03(\v2*.temporal.server.api.health.v1.HealthCheckR\x06checks\"\xd1\x01\n" + + "\x13ServiceHealthDetail\x12\x18\n" + + "\aservice\x18\x01 \x01(\tR\aservice\x12?\n" + + "\x05state\x18\x02 \x01(\x0e2).temporal.server.api.enums.v1.HealthStateR\x05state\x12E\n" + + "\x05hosts\x18\x03 \x03(\v2/.temporal.server.api.health.v1.HostHealthDetailR\x05hosts\x12\x18\n" + + "\amessage\x18\x04 \x01(\tR\amessageB,Z*go.temporal.io/server/api/health/v1;healthb\x06proto3" + +var ( + file_temporal_server_api_health_v1_message_proto_rawDescOnce sync.Once + file_temporal_server_api_health_v1_message_proto_rawDescData []byte +) + +func file_temporal_server_api_health_v1_message_proto_rawDescGZIP() []byte { + file_temporal_server_api_health_v1_message_proto_rawDescOnce.Do(func() { + file_temporal_server_api_health_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_health_v1_message_proto_rawDesc), len(file_temporal_server_api_health_v1_message_proto_rawDesc))) + }) + return file_temporal_server_api_health_v1_message_proto_rawDescData +} + +var file_temporal_server_api_health_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_temporal_server_api_health_v1_message_proto_goTypes = []any{ + (*HealthCheck)(nil), // 0: temporal.server.api.health.v1.HealthCheck + (*HostHealthDetail)(nil), // 1: temporal.server.api.health.v1.HostHealthDetail + (*ServiceHealthDetail)(nil), // 2: temporal.server.api.health.v1.ServiceHealthDetail + (v1.HealthState)(0), // 3: temporal.server.api.enums.v1.HealthState +} +var file_temporal_server_api_health_v1_message_proto_depIdxs = []int32{ + 3, // 0: temporal.server.api.health.v1.HealthCheck.state:type_name -> temporal.server.api.enums.v1.HealthState + 3, // 1: temporal.server.api.health.v1.HostHealthDetail.state:type_name -> temporal.server.api.enums.v1.HealthState + 0, // 2: temporal.server.api.health.v1.HostHealthDetail.checks:type_name -> temporal.server.api.health.v1.HealthCheck + 3, // 3: temporal.server.api.health.v1.ServiceHealthDetail.state:type_name -> temporal.server.api.enums.v1.HealthState + 1, // 4: temporal.server.api.health.v1.ServiceHealthDetail.hosts:type_name -> temporal.server.api.health.v1.HostHealthDetail + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_health_v1_message_proto_init() } +func file_temporal_server_api_health_v1_message_proto_init() { + if File_temporal_server_api_health_v1_message_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_health_v1_message_proto_rawDesc), len(file_temporal_server_api_health_v1_message_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_health_v1_message_proto_goTypes, + DependencyIndexes: file_temporal_server_api_health_v1_message_proto_depIdxs, + MessageInfos: file_temporal_server_api_health_v1_message_proto_msgTypes, + }.Build() + File_temporal_server_api_health_v1_message_proto = out.File + file_temporal_server_api_health_v1_message_proto_goTypes = nil + file_temporal_server_api_health_v1_message_proto_depIdxs = nil +} diff --git a/api/history/v1/message.go-helpers.pb.go b/api/history/v1/message.go-helpers.pb.go index c35891302e2..6486d86a52d 100644 --- a/api/history/v1/message.go-helpers.pb.go +++ b/api/history/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package history @@ -250,3 +226,77 @@ func (this *TaskRange) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type StrippedHistoryEvent to the protobuf v3 wire format +func (val *StrippedHistoryEvent) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StrippedHistoryEvent from the protobuf v3 wire format +func (val *StrippedHistoryEvent) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StrippedHistoryEvent) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StrippedHistoryEvent values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StrippedHistoryEvent) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StrippedHistoryEvent + switch t := that.(type) { + case *StrippedHistoryEvent: + that1 = t + case StrippedHistoryEvent: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StrippedHistoryEvents to the protobuf v3 wire format +func (val *StrippedHistoryEvents) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StrippedHistoryEvents from the protobuf v3 wire format +func (val *StrippedHistoryEvents) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StrippedHistoryEvents) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StrippedHistoryEvents values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StrippedHistoryEvents) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StrippedHistoryEvents + switch t := that.(type) { + case *StrippedHistoryEvents: + that1 = t + case StrippedHistoryEvents: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/history/v1/message.pb.go b/api/history/v1/message.pb.go index d1faec9cf54..29665b34dab 100644 --- a/api/history/v1/message.pb.go +++ b/api/history/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package history import ( reflect "reflect" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/api/history/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -46,21 +25,18 @@ const ( ) type TransientWorkflowTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A list of history events that are to be appended to the "real" workflow history. HistorySuffix []*v1.HistoryEvent `protobuf:"bytes,3,rep,name=history_suffix,json=historySuffix,proto3" json:"history_suffix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TransientWorkflowTaskInfo) Reset() { *x = TransientWorkflowTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TransientWorkflowTaskInfo) String() string { @@ -71,7 +47,7 @@ func (*TransientWorkflowTaskInfo) ProtoMessage() {} func (x *TransientWorkflowTaskInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -95,21 +71,18 @@ func (x *TransientWorkflowTaskInfo) GetHistorySuffix() []*v1.HistoryEvent { // VersionHistoryItem contains signal eventId and the corresponding version. type VersionHistoryItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields - - EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` - Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VersionHistoryItem) Reset() { *x = VersionHistoryItem{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VersionHistoryItem) String() string { @@ -120,7 +93,7 @@ func (*VersionHistoryItem) ProtoMessage() {} func (x *VersionHistoryItem) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -151,21 +124,18 @@ func (x *VersionHistoryItem) GetVersion() int64 { // VersionHistory contains the version history of a branch. type VersionHistory struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + BranchToken []byte `protobuf:"bytes,1,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + Items []*VersionHistoryItem `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` unknownFields protoimpl.UnknownFields - - BranchToken []byte `protobuf:"bytes,1,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` - Items []*VersionHistoryItem `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VersionHistory) Reset() { *x = VersionHistory{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VersionHistory) String() string { @@ -176,7 +146,7 @@ func (*VersionHistory) ProtoMessage() {} func (x *VersionHistory) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -207,21 +177,18 @@ func (x *VersionHistory) GetItems() []*VersionHistoryItem { // VersionHistories contains all version histories from all branches. type VersionHistories struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CurrentVersionHistoryIndex int32 `protobuf:"varint,1,opt,name=current_version_history_index,json=currentVersionHistoryIndex,proto3" json:"current_version_history_index,omitempty"` - Histories []*VersionHistory `protobuf:"bytes,2,rep,name=histories,proto3" json:"histories,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + CurrentVersionHistoryIndex int32 `protobuf:"varint,1,opt,name=current_version_history_index,json=currentVersionHistoryIndex,proto3" json:"current_version_history_index,omitempty"` + Histories []*VersionHistory `protobuf:"bytes,2,rep,name=histories,proto3" json:"histories,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *VersionHistories) Reset() { *x = VersionHistories{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VersionHistories) String() string { @@ -232,7 +199,7 @@ func (*VersionHistories) ProtoMessage() {} func (x *VersionHistories) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -262,21 +229,18 @@ func (x *VersionHistories) GetHistories() []*VersionHistory { } type TaskKey struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TaskId int64 `protobuf:"varint,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + FireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=fire_time,json=fireTime,proto3" json:"fire_time,omitempty"` unknownFields protoimpl.UnknownFields - - TaskId int64 `protobuf:"varint,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - FireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=fire_time,json=fireTime,proto3" json:"fire_time,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TaskKey) Reset() { *x = TaskKey{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskKey) String() string { @@ -287,7 +251,7 @@ func (*TaskKey) ProtoMessage() {} func (x *TaskKey) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -317,21 +281,18 @@ func (x *TaskKey) GetFireTime() *timestamppb.Timestamp { } type TaskRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - InclusiveMinTaskKey *TaskKey `protobuf:"bytes,1,opt,name=inclusive_min_task_key,json=inclusiveMinTaskKey,proto3" json:"inclusive_min_task_key,omitempty"` - ExclusiveMaxTaskKey *TaskKey `protobuf:"bytes,2,opt,name=exclusive_max_task_key,json=exclusiveMaxTaskKey,proto3" json:"exclusive_max_task_key,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + InclusiveMinTaskKey *TaskKey `protobuf:"bytes,1,opt,name=inclusive_min_task_key,json=inclusiveMinTaskKey,proto3" json:"inclusive_min_task_key,omitempty"` + ExclusiveMaxTaskKey *TaskKey `protobuf:"bytes,2,opt,name=exclusive_max_task_key,json=exclusiveMaxTaskKey,proto3" json:"exclusive_max_task_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TaskRange) Reset() { *x = TaskRange{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskRange) String() string { @@ -342,7 +303,7 @@ func (*TaskRange) ProtoMessage() {} func (x *TaskRange) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -371,108 +332,169 @@ func (x *TaskRange) GetExclusiveMaxTaskKey() *TaskKey { return nil } -var File_temporal_server_api_history_v1_message_proto protoreflect.FileDescriptor +// StrippedHistoryEvent is a stripped down version of HistoryEvent that only contains the event_id and version. +type StrippedHistoryEvent struct { + state protoimpl.MessageState `protogen:"open.v1"` + EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StrippedHistoryEvent) Reset() { + *x = StrippedHistoryEvent{} + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StrippedHistoryEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StrippedHistoryEvent) ProtoMessage() {} + +func (x *StrippedHistoryEvent) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -var file_temporal_server_api_history_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, - 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0x79, 0x0a, 0x19, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x50, 0x0a, 0x0e, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, - 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x51, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x1d, 0x0a, 0x08, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x02, 0x68, 0x00, 0x22, 0x85, 0x01, 0x0a, 0x0e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x0a, 0x0c, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x72, 0x61, - 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4c, 0x0a, 0x05, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xab, 0x01, 0x0a, 0x10, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x1d, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x1a, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x42, 0x02, 0x68, 0x00, 0x12, 0x50, - 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, - 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x09, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x63, 0x0a, 0x07, - 0x54, 0x61, 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x09, 0x66, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x08, 0x66, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xcf, 0x01, - 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x69, 0x6e, - 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4b, 0x65, 0x79, - 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x54, 0x61, - 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x60, 0x0a, 0x16, 0x65, 0x78, 0x63, 0x6c, - 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6b, 0x65, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x52, 0x13, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x54, 0x61, 0x73, 0x6b, 0x4b, - 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x3b, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +// Deprecated: Use StrippedHistoryEvent.ProtoReflect.Descriptor instead. +func (*StrippedHistoryEvent) Descriptor() ([]byte, []int) { + return file_temporal_server_api_history_v1_message_proto_rawDescGZIP(), []int{6} +} + +func (x *StrippedHistoryEvent) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +func (x *StrippedHistoryEvent) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +type StrippedHistoryEvents struct { + state protoimpl.MessageState `protogen:"open.v1"` + Events []*StrippedHistoryEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StrippedHistoryEvents) Reset() { + *x = StrippedHistoryEvents{} + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StrippedHistoryEvents) String() string { + return protoimpl.X.MessageStringOf(x) } +func (*StrippedHistoryEvents) ProtoMessage() {} + +func (x *StrippedHistoryEvents) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_history_v1_message_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StrippedHistoryEvents.ProtoReflect.Descriptor instead. +func (*StrippedHistoryEvents) Descriptor() ([]byte, []int) { + return file_temporal_server_api_history_v1_message_proto_rawDescGZIP(), []int{7} +} + +func (x *StrippedHistoryEvents) GetEvents() []*StrippedHistoryEvent { + if x != nil { + return x.Events + } + return nil +} + +var File_temporal_server_api_history_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_api_history_v1_message_proto_rawDesc = "" + + "\n" + + ",temporal/server/api/history/v1/message.proto\x12\x1etemporal.server.api.history.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a%temporal/api/history/v1/message.proto\"u\n" + + "\x19TransientWorkflowTaskInfo\x12L\n" + + "\x0ehistory_suffix\x18\x03 \x03(\v2%.temporal.api.history.v1.HistoryEventR\rhistorySuffixJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03\"I\n" + + "\x12VersionHistoryItem\x12\x19\n" + + "\bevent_id\x18\x01 \x01(\x03R\aeventId\x12\x18\n" + + "\aversion\x18\x02 \x01(\x03R\aversion\"}\n" + + "\x0eVersionHistory\x12!\n" + + "\fbranch_token\x18\x01 \x01(\fR\vbranchToken\x12H\n" + + "\x05items\x18\x02 \x03(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x05items\"\xa3\x01\n" + + "\x10VersionHistories\x12A\n" + + "\x1dcurrent_version_history_index\x18\x01 \x01(\x05R\x1acurrentVersionHistoryIndex\x12L\n" + + "\thistories\x18\x02 \x03(\v2..temporal.server.api.history.v1.VersionHistoryR\thistories\"[\n" + + "\aTaskKey\x12\x17\n" + + "\atask_id\x18\x01 \x01(\x03R\x06taskId\x127\n" + + "\tfire_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\bfireTime\"\xc7\x01\n" + + "\tTaskRange\x12\\\n" + + "\x16inclusive_min_task_key\x18\x01 \x01(\v2'.temporal.server.api.history.v1.TaskKeyR\x13inclusiveMinTaskKey\x12\\\n" + + "\x16exclusive_max_task_key\x18\x02 \x01(\v2'.temporal.server.api.history.v1.TaskKeyR\x13exclusiveMaxTaskKey\"K\n" + + "\x14StrippedHistoryEvent\x12\x19\n" + + "\bevent_id\x18\x01 \x01(\x03R\aeventId\x12\x18\n" + + "\aversion\x18\x04 \x01(\x03R\aversion\"e\n" + + "\x15StrippedHistoryEvents\x12L\n" + + "\x06events\x18\x01 \x03(\v24.temporal.server.api.history.v1.StrippedHistoryEventR\x06eventsB.Z,go.temporal.io/server/api/history/v1;historyb\x06proto3" + var ( file_temporal_server_api_history_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_history_v1_message_proto_rawDescData = file_temporal_server_api_history_v1_message_proto_rawDesc + file_temporal_server_api_history_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_history_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_history_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_history_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_history_v1_message_proto_rawDescData) + file_temporal_server_api_history_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_history_v1_message_proto_rawDesc), len(file_temporal_server_api_history_v1_message_proto_rawDesc))) }) return file_temporal_server_api_history_v1_message_proto_rawDescData } -var file_temporal_server_api_history_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_temporal_server_api_history_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_history_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_temporal_server_api_history_v1_message_proto_goTypes = []any{ (*TransientWorkflowTaskInfo)(nil), // 0: temporal.server.api.history.v1.TransientWorkflowTaskInfo (*VersionHistoryItem)(nil), // 1: temporal.server.api.history.v1.VersionHistoryItem (*VersionHistory)(nil), // 2: temporal.server.api.history.v1.VersionHistory (*VersionHistories)(nil), // 3: temporal.server.api.history.v1.VersionHistories (*TaskKey)(nil), // 4: temporal.server.api.history.v1.TaskKey (*TaskRange)(nil), // 5: temporal.server.api.history.v1.TaskRange - (*v1.HistoryEvent)(nil), // 6: temporal.api.history.v1.HistoryEvent - (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*StrippedHistoryEvent)(nil), // 6: temporal.server.api.history.v1.StrippedHistoryEvent + (*StrippedHistoryEvents)(nil), // 7: temporal.server.api.history.v1.StrippedHistoryEvents + (*v1.HistoryEvent)(nil), // 8: temporal.api.history.v1.HistoryEvent + (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp } var file_temporal_server_api_history_v1_message_proto_depIdxs = []int32{ - 6, // 0: temporal.server.api.history.v1.TransientWorkflowTaskInfo.history_suffix:type_name -> temporal.api.history.v1.HistoryEvent + 8, // 0: temporal.server.api.history.v1.TransientWorkflowTaskInfo.history_suffix:type_name -> temporal.api.history.v1.HistoryEvent 1, // 1: temporal.server.api.history.v1.VersionHistory.items:type_name -> temporal.server.api.history.v1.VersionHistoryItem 2, // 2: temporal.server.api.history.v1.VersionHistories.histories:type_name -> temporal.server.api.history.v1.VersionHistory - 7, // 3: temporal.server.api.history.v1.TaskKey.fire_time:type_name -> google.protobuf.Timestamp + 9, // 3: temporal.server.api.history.v1.TaskKey.fire_time:type_name -> google.protobuf.Timestamp 4, // 4: temporal.server.api.history.v1.TaskRange.inclusive_min_task_key:type_name -> temporal.server.api.history.v1.TaskKey 4, // 5: temporal.server.api.history.v1.TaskRange.exclusive_max_task_key:type_name -> temporal.server.api.history.v1.TaskKey - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 6, // 6: temporal.server.api.history.v1.StrippedHistoryEvents.events:type_name -> temporal.server.api.history.v1.StrippedHistoryEvent + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_temporal_server_api_history_v1_message_proto_init() } @@ -480,87 +502,13 @@ func file_temporal_server_api_history_v1_message_proto_init() { if File_temporal_server_api_history_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_history_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransientWorkflowTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_history_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionHistoryItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_history_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionHistory); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_history_v1_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionHistories); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_history_v1_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_history_v1_message_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_history_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_history_v1_message_proto_rawDesc), len(file_temporal_server_api_history_v1_message_proto_rawDesc)), NumEnums: 0, - NumMessages: 6, + NumMessages: 8, NumExtensions: 0, NumServices: 0, }, @@ -569,7 +517,6 @@ func file_temporal_server_api_history_v1_message_proto_init() { MessageInfos: file_temporal_server_api_history_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_history_v1_message_proto = out.File - file_temporal_server_api_history_v1_message_proto_rawDesc = nil file_temporal_server_api_history_v1_message_proto_goTypes = nil file_temporal_server_api_history_v1_message_proto_depIdxs = nil } diff --git a/api/historyservice/v1/request_response.go-helpers.pb.go b/api/historyservice/v1/request_response.go-helpers.pb.go index 8e049f7e9ef..dba8b8c3821 100644 --- a/api/historyservice/v1/request_response.go-helpers.pb.go +++ b/api/historyservice/v1/request_response.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package historyservice @@ -29,6 +5,43 @@ import ( "google.golang.org/protobuf/proto" ) +// Marshal an object of type RoutingOptions to the protobuf v3 wire format +func (val *RoutingOptions) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RoutingOptions from the protobuf v3 wire format +func (val *RoutingOptions) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RoutingOptions) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RoutingOptions values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RoutingOptions) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RoutingOptions + switch t := that.(type) { + case *RoutingOptions: + that1 = t + case RoutingOptions: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type StartWorkflowExecutionRequest to the protobuf v3 wire format func (val *StartWorkflowExecutionRequest) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -325,6 +338,80 @@ func (this *ResetStickyTaskQueueResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type ExecuteMultiOperationRequest to the protobuf v3 wire format +func (val *ExecuteMultiOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ExecuteMultiOperationRequest from the protobuf v3 wire format +func (val *ExecuteMultiOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ExecuteMultiOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ExecuteMultiOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ExecuteMultiOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ExecuteMultiOperationRequest + switch t := that.(type) { + case *ExecuteMultiOperationRequest: + that1 = t + case ExecuteMultiOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ExecuteMultiOperationResponse to the protobuf v3 wire format +func (val *ExecuteMultiOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ExecuteMultiOperationResponse from the protobuf v3 wire format +func (val *ExecuteMultiOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ExecuteMultiOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ExecuteMultiOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ExecuteMultiOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ExecuteMultiOperationResponse + switch t := that.(type) { + case *ExecuteMultiOperationResponse: + that1 = t + case ExecuteMultiOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type RecordWorkflowTaskStartedRequest to the protobuf v3 wire format func (val *RecordWorkflowTaskStartedRequest) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -399,6 +486,43 @@ func (this *RecordWorkflowTaskStartedResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type RecordWorkflowTaskStartedResponseWithRawHistory to the protobuf v3 wire format +func (val *RecordWorkflowTaskStartedResponseWithRawHistory) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RecordWorkflowTaskStartedResponseWithRawHistory from the protobuf v3 wire format +func (val *RecordWorkflowTaskStartedResponseWithRawHistory) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RecordWorkflowTaskStartedResponseWithRawHistory) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RecordWorkflowTaskStartedResponseWithRawHistory values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RecordWorkflowTaskStartedResponseWithRawHistory) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RecordWorkflowTaskStartedResponseWithRawHistory + switch t := that.(type) { + case *RecordWorkflowTaskStartedResponseWithRawHistory: + that1 = t + case RecordWorkflowTaskStartedResponseWithRawHistory: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type RecordActivityTaskStartedRequest to the protobuf v3 wire format func (val *RecordActivityTaskStartedRequest) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -4062,6 +4186,43 @@ func (this *GetWorkflowExecutionHistoryResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type GetWorkflowExecutionHistoryResponseWithRaw to the protobuf v3 wire format +func (val *GetWorkflowExecutionHistoryResponseWithRaw) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetWorkflowExecutionHistoryResponseWithRaw from the protobuf v3 wire format +func (val *GetWorkflowExecutionHistoryResponseWithRaw) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetWorkflowExecutionHistoryResponseWithRaw) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetWorkflowExecutionHistoryResponseWithRaw values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetWorkflowExecutionHistoryResponseWithRaw) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetWorkflowExecutionHistoryResponseWithRaw + switch t := that.(type) { + case *GetWorkflowExecutionHistoryResponseWithRaw: + that1 = t + case GetWorkflowExecutionHistoryResponseWithRaw: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type GetWorkflowExecutionHistoryReverseRequest to the protobuf v3 wire format func (val *GetWorkflowExecutionHistoryReverseRequest) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -4727,3 +4888,1039 @@ func (this *ListTasksResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type CompleteNexusOperationChasmRequest to the protobuf v3 wire format +func (val *CompleteNexusOperationChasmRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CompleteNexusOperationChasmRequest from the protobuf v3 wire format +func (val *CompleteNexusOperationChasmRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CompleteNexusOperationChasmRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CompleteNexusOperationChasmRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CompleteNexusOperationChasmRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CompleteNexusOperationChasmRequest + switch t := that.(type) { + case *CompleteNexusOperationChasmRequest: + that1 = t + case CompleteNexusOperationChasmRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CompleteNexusOperationChasmResponse to the protobuf v3 wire format +func (val *CompleteNexusOperationChasmResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CompleteNexusOperationChasmResponse from the protobuf v3 wire format +func (val *CompleteNexusOperationChasmResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CompleteNexusOperationChasmResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CompleteNexusOperationChasmResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CompleteNexusOperationChasmResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CompleteNexusOperationChasmResponse + switch t := that.(type) { + case *CompleteNexusOperationChasmResponse: + that1 = t + case CompleteNexusOperationChasmResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CompleteNexusOperationRequest to the protobuf v3 wire format +func (val *CompleteNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CompleteNexusOperationRequest from the protobuf v3 wire format +func (val *CompleteNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CompleteNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CompleteNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CompleteNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CompleteNexusOperationRequest + switch t := that.(type) { + case *CompleteNexusOperationRequest: + that1 = t + case CompleteNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CompleteNexusOperationResponse to the protobuf v3 wire format +func (val *CompleteNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CompleteNexusOperationResponse from the protobuf v3 wire format +func (val *CompleteNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CompleteNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CompleteNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CompleteNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CompleteNexusOperationResponse + switch t := that.(type) { + case *CompleteNexusOperationResponse: + that1 = t + case CompleteNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InvokeStateMachineMethodRequest to the protobuf v3 wire format +func (val *InvokeStateMachineMethodRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InvokeStateMachineMethodRequest from the protobuf v3 wire format +func (val *InvokeStateMachineMethodRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InvokeStateMachineMethodRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InvokeStateMachineMethodRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InvokeStateMachineMethodRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InvokeStateMachineMethodRequest + switch t := that.(type) { + case *InvokeStateMachineMethodRequest: + that1 = t + case InvokeStateMachineMethodRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InvokeStateMachineMethodResponse to the protobuf v3 wire format +func (val *InvokeStateMachineMethodResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InvokeStateMachineMethodResponse from the protobuf v3 wire format +func (val *InvokeStateMachineMethodResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InvokeStateMachineMethodResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InvokeStateMachineMethodResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InvokeStateMachineMethodResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InvokeStateMachineMethodResponse + switch t := that.(type) { + case *InvokeStateMachineMethodResponse: + that1 = t + case InvokeStateMachineMethodResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeepHealthCheckRequest to the protobuf v3 wire format +func (val *DeepHealthCheckRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeepHealthCheckRequest from the protobuf v3 wire format +func (val *DeepHealthCheckRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeepHealthCheckRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeepHealthCheckRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeepHealthCheckRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeepHealthCheckRequest + switch t := that.(type) { + case *DeepHealthCheckRequest: + that1 = t + case DeepHealthCheckRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeepHealthCheckResponse to the protobuf v3 wire format +func (val *DeepHealthCheckResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeepHealthCheckResponse from the protobuf v3 wire format +func (val *DeepHealthCheckResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeepHealthCheckResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeepHealthCheckResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeepHealthCheckResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeepHealthCheckResponse + switch t := that.(type) { + case *DeepHealthCheckResponse: + that1 = t + case DeepHealthCheckResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncWorkflowStateRequest to the protobuf v3 wire format +func (val *SyncWorkflowStateRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncWorkflowStateRequest from the protobuf v3 wire format +func (val *SyncWorkflowStateRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncWorkflowStateRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncWorkflowStateRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncWorkflowStateRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncWorkflowStateRequest + switch t := that.(type) { + case *SyncWorkflowStateRequest: + that1 = t + case SyncWorkflowStateRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncWorkflowStateResponse to the protobuf v3 wire format +func (val *SyncWorkflowStateResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncWorkflowStateResponse from the protobuf v3 wire format +func (val *SyncWorkflowStateResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncWorkflowStateResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncWorkflowStateResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncWorkflowStateResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncWorkflowStateResponse + switch t := that.(type) { + case *SyncWorkflowStateResponse: + that1 = t + case SyncWorkflowStateResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateActivityOptionsRequest to the protobuf v3 wire format +func (val *UpdateActivityOptionsRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateActivityOptionsRequest from the protobuf v3 wire format +func (val *UpdateActivityOptionsRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateActivityOptionsRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateActivityOptionsRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateActivityOptionsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateActivityOptionsRequest + switch t := that.(type) { + case *UpdateActivityOptionsRequest: + that1 = t + case UpdateActivityOptionsRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateActivityOptionsResponse to the protobuf v3 wire format +func (val *UpdateActivityOptionsResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateActivityOptionsResponse from the protobuf v3 wire format +func (val *UpdateActivityOptionsResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateActivityOptionsResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateActivityOptionsResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateActivityOptionsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateActivityOptionsResponse + switch t := that.(type) { + case *UpdateActivityOptionsResponse: + that1 = t + case UpdateActivityOptionsResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PauseActivityRequest to the protobuf v3 wire format +func (val *PauseActivityRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PauseActivityRequest from the protobuf v3 wire format +func (val *PauseActivityRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PauseActivityRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PauseActivityRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PauseActivityRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PauseActivityRequest + switch t := that.(type) { + case *PauseActivityRequest: + that1 = t + case PauseActivityRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PauseActivityResponse to the protobuf v3 wire format +func (val *PauseActivityResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PauseActivityResponse from the protobuf v3 wire format +func (val *PauseActivityResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PauseActivityResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PauseActivityResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PauseActivityResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PauseActivityResponse + switch t := that.(type) { + case *PauseActivityResponse: + that1 = t + case PauseActivityResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UnpauseActivityRequest to the protobuf v3 wire format +func (val *UnpauseActivityRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UnpauseActivityRequest from the protobuf v3 wire format +func (val *UnpauseActivityRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UnpauseActivityRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UnpauseActivityRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UnpauseActivityRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UnpauseActivityRequest + switch t := that.(type) { + case *UnpauseActivityRequest: + that1 = t + case UnpauseActivityRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UnpauseActivityResponse to the protobuf v3 wire format +func (val *UnpauseActivityResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UnpauseActivityResponse from the protobuf v3 wire format +func (val *UnpauseActivityResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UnpauseActivityResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UnpauseActivityResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UnpauseActivityResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UnpauseActivityResponse + switch t := that.(type) { + case *UnpauseActivityResponse: + that1 = t + case UnpauseActivityResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ResetActivityRequest to the protobuf v3 wire format +func (val *ResetActivityRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ResetActivityRequest from the protobuf v3 wire format +func (val *ResetActivityRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ResetActivityRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ResetActivityRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ResetActivityRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ResetActivityRequest + switch t := that.(type) { + case *ResetActivityRequest: + that1 = t + case ResetActivityRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ResetActivityResponse to the protobuf v3 wire format +func (val *ResetActivityResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ResetActivityResponse from the protobuf v3 wire format +func (val *ResetActivityResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ResetActivityResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ResetActivityResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ResetActivityResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ResetActivityResponse + switch t := that.(type) { + case *ResetActivityResponse: + that1 = t + case ResetActivityResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateWorkflowExecutionOptionsRequest to the protobuf v3 wire format +func (val *UpdateWorkflowExecutionOptionsRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateWorkflowExecutionOptionsRequest from the protobuf v3 wire format +func (val *UpdateWorkflowExecutionOptionsRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateWorkflowExecutionOptionsRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateWorkflowExecutionOptionsRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateWorkflowExecutionOptionsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateWorkflowExecutionOptionsRequest + switch t := that.(type) { + case *UpdateWorkflowExecutionOptionsRequest: + that1 = t + case UpdateWorkflowExecutionOptionsRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateWorkflowExecutionOptionsResponse to the protobuf v3 wire format +func (val *UpdateWorkflowExecutionOptionsResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateWorkflowExecutionOptionsResponse from the protobuf v3 wire format +func (val *UpdateWorkflowExecutionOptionsResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateWorkflowExecutionOptionsResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateWorkflowExecutionOptionsResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateWorkflowExecutionOptionsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateWorkflowExecutionOptionsResponse + switch t := that.(type) { + case *UpdateWorkflowExecutionOptionsResponse: + that1 = t + case UpdateWorkflowExecutionOptionsResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PauseWorkflowExecutionRequest to the protobuf v3 wire format +func (val *PauseWorkflowExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PauseWorkflowExecutionRequest from the protobuf v3 wire format +func (val *PauseWorkflowExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PauseWorkflowExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PauseWorkflowExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PauseWorkflowExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PauseWorkflowExecutionRequest + switch t := that.(type) { + case *PauseWorkflowExecutionRequest: + that1 = t + case PauseWorkflowExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PauseWorkflowExecutionResponse to the protobuf v3 wire format +func (val *PauseWorkflowExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PauseWorkflowExecutionResponse from the protobuf v3 wire format +func (val *PauseWorkflowExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PauseWorkflowExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PauseWorkflowExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PauseWorkflowExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PauseWorkflowExecutionResponse + switch t := that.(type) { + case *PauseWorkflowExecutionResponse: + that1 = t + case PauseWorkflowExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UnpauseWorkflowExecutionRequest to the protobuf v3 wire format +func (val *UnpauseWorkflowExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UnpauseWorkflowExecutionRequest from the protobuf v3 wire format +func (val *UnpauseWorkflowExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UnpauseWorkflowExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UnpauseWorkflowExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UnpauseWorkflowExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UnpauseWorkflowExecutionRequest + switch t := that.(type) { + case *UnpauseWorkflowExecutionRequest: + that1 = t + case UnpauseWorkflowExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UnpauseWorkflowExecutionResponse to the protobuf v3 wire format +func (val *UnpauseWorkflowExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UnpauseWorkflowExecutionResponse from the protobuf v3 wire format +func (val *UnpauseWorkflowExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UnpauseWorkflowExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UnpauseWorkflowExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UnpauseWorkflowExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UnpauseWorkflowExecutionResponse + switch t := that.(type) { + case *UnpauseWorkflowExecutionResponse: + that1 = t + case UnpauseWorkflowExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartNexusOperationRequest to the protobuf v3 wire format +func (val *StartNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartNexusOperationRequest from the protobuf v3 wire format +func (val *StartNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartNexusOperationRequest + switch t := that.(type) { + case *StartNexusOperationRequest: + that1 = t + case StartNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartNexusOperationResponse to the protobuf v3 wire format +func (val *StartNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartNexusOperationResponse from the protobuf v3 wire format +func (val *StartNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartNexusOperationResponse + switch t := that.(type) { + case *StartNexusOperationResponse: + that1 = t + case StartNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CancelNexusOperationRequest to the protobuf v3 wire format +func (val *CancelNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CancelNexusOperationRequest from the protobuf v3 wire format +func (val *CancelNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CancelNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CancelNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CancelNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CancelNexusOperationRequest + switch t := that.(type) { + case *CancelNexusOperationRequest: + that1 = t + case CancelNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CancelNexusOperationResponse to the protobuf v3 wire format +func (val *CancelNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CancelNexusOperationResponse from the protobuf v3 wire format +func (val *CancelNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CancelNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CancelNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CancelNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CancelNexusOperationResponse + switch t := that.(type) { + case *CancelNexusOperationResponse: + that1 = t + case CancelNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/historyservice/v1/request_response.pb.go b/api/historyservice/v1/request_response.pb.go index f5ceb827d5f..46d476bcba2 100644 --- a/api/historyservice/v1/request_response.pb.go +++ b/api/historyservice/v1/request_response.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,27 +9,35 @@ package historyservice import ( reflect "reflect" sync "sync" + unsafe "unsafe" + v123 "go.temporal.io/api/activity/v1" v14 "go.temporal.io/api/common/v1" + v16 "go.temporal.io/api/deployment/v1" v12 "go.temporal.io/api/enums/v1" v13 "go.temporal.io/api/failure/v1" - v111 "go.temporal.io/api/history/v1" - v110 "go.temporal.io/api/protocol/v1" - v19 "go.temporal.io/api/query/v1" - v17 "go.temporal.io/api/taskqueue/v1" - v112 "go.temporal.io/api/workflow/v1" + v17 "go.temporal.io/api/history/v1" + v121 "go.temporal.io/api/nexus/v1" + v115 "go.temporal.io/api/protocol/v1" + v114 "go.temporal.io/api/query/v1" + v111 "go.temporal.io/api/taskqueue/v1" + v15 "go.temporal.io/api/workflow/v1" v1 "go.temporal.io/api/workflowservice/v1" - v116 "go.temporal.io/server/api/adminservice/v1" - v15 "go.temporal.io/server/api/clock/v1" - v117 "go.temporal.io/server/api/common/v1" - v18 "go.temporal.io/server/api/enums/v1" - v16 "go.temporal.io/server/api/history/v1" - v114 "go.temporal.io/server/api/namespace/v1" - v113 "go.temporal.io/server/api/persistence/v1" - v115 "go.temporal.io/server/api/replication/v1" + v118 "go.temporal.io/server/api/adminservice/v1" + v18 "go.temporal.io/server/api/clock/v1" + v119 "go.temporal.io/server/api/common/v1" + v112 "go.temporal.io/server/api/enums/v1" + v122 "go.temporal.io/server/api/health/v1" + v19 "go.temporal.io/server/api/history/v1" + v116 "go.temporal.io/server/api/namespace/v1" + v110 "go.temporal.io/server/api/persistence/v1" + v117 "go.temporal.io/server/api/replication/v1" + v113 "go.temporal.io/server/api/taskqueue/v1" + v120 "go.temporal.io/server/api/token/v1" v11 "go.temporal.io/server/api/workflow/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" durationpb "google.golang.org/protobuf/types/known/durationpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) @@ -63,11 +49,118 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type StartWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +// RoutingOptions define how a request is routed to the appropriate host. +type RoutingOptions struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Routing is custom and implemented in the non-generated client/history/client.go. + Custom bool `protobuf:"varint,1,opt,name=custom,proto3" json:"custom,omitempty"` + // Request will be routed to a random host. + AnyHost bool `protobuf:"varint,2,opt,name=any_host,json=anyHost,proto3" json:"any_host,omitempty"` + // Request will be routed according to the specified shard ID field. + ShardId string `protobuf:"bytes,3,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + // Requested routed by task token or workflow ID may also specify how to obtain the namespace ID. Defaults to the + // "namespace_id" field. + NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // Request will be routed by resolving the namespace ID and workflow ID to a given shard. + WorkflowId string `protobuf:"bytes,5,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + // Request will be routed by resolving the namespace ID and the workflow ID from this task token to a given shard. + TaskToken string `protobuf:"bytes,6,opt,name=task_token,json=taskToken,proto3" json:"task_token,omitempty"` + // Request will be routed by resolving the namespace ID and the workflow ID from the first task info element. + TaskInfos string `protobuf:"bytes,7,opt,name=task_infos,json=taskInfos,proto3" json:"task_infos,omitempty"` + // Request will be routed by resolving the namespace ID and the workflow ID from this chasm ref to a given shard. + ChasmComponentRef string `protobuf:"bytes,8,opt,name=chasm_component_ref,json=chasmComponentRef,proto3" json:"chasm_component_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RoutingOptions) Reset() { + *x = RoutingOptions{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RoutingOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingOptions) ProtoMessage() {} + +func (x *RoutingOptions) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingOptions.ProtoReflect.Descriptor instead. +func (*RoutingOptions) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *RoutingOptions) GetCustom() bool { + if x != nil { + return x.Custom + } + return false +} + +func (x *RoutingOptions) GetAnyHost() bool { + if x != nil { + return x.AnyHost + } + return false +} + +func (x *RoutingOptions) GetShardId() string { + if x != nil { + return x.ShardId + } + return "" +} + +func (x *RoutingOptions) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *RoutingOptions) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *RoutingOptions) GetTaskToken() string { + if x != nil { + return x.TaskToken + } + return "" +} + +func (x *RoutingOptions) GetTaskInfos() string { + if x != nil { + return x.TaskInfos + } + return "" +} + +func (x *RoutingOptions) GetChasmComponentRef() string { + if x != nil { + return x.ChasmComponentRef + } + return "" +} +type StartWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` StartRequest *v1.StartWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=start_request,json=startRequest,proto3" json:"start_request,omitempty"` ParentExecutionInfo *v11.ParentExecutionInfo `protobuf:"bytes,3,opt,name=parent_execution_info,json=parentExecutionInfo,proto3" json:"parent_execution_info,omitempty"` @@ -81,16 +174,46 @@ type StartWorkflowExecutionRequest struct { FirstWorkflowTaskBackoff *durationpb.Duration `protobuf:"bytes,9,opt,name=first_workflow_task_backoff,json=firstWorkflowTaskBackoff,proto3" json:"first_workflow_task_backoff,omitempty"` // For child or continued-as-new workflows, including a version here from the source // (parent/previous) will set the initial version stamp of this workflow. + // Deprecated. use `inherited_build_id` SourceVersionStamp *v14.WorkerVersionStamp `protobuf:"bytes,10,opt,name=source_version_stamp,json=sourceVersionStamp,proto3" json:"source_version_stamp,omitempty"` + // The root execution info of the new workflow. + // For top-level workflows (ie., without parent), this field must be nil. + RootExecutionInfo *v11.RootExecutionInfo `protobuf:"bytes,11,opt,name=root_execution_info,json=rootExecutionInfo,proto3" json:"root_execution_info,omitempty"` + // inherited build ID from parent/previous execution + // Deprecated. Use behavior, version, and task queue fields in `parent_execution_info`. + InheritedBuildId string `protobuf:"bytes,12,opt,name=inherited_build_id,json=inheritedBuildId,proto3" json:"inherited_build_id,omitempty"` + // If set, takes precedence over the Versioning Behavior sent by the SDK on Workflow Task completion. + // To unset the override after the workflow is running, use UpdateWorkflowExecutionOptions. + VersioningOverride *v15.VersioningOverride `protobuf:"bytes,13,opt,name=versioning_override,json=versioningOverride,proto3" json:"versioning_override,omitempty"` + // If set, we verify the parent-child relationship before applying ID conflict policy WORKFLOW_ID_CONFLICT_POLICY_TERMINATE_EXISTING + ChildWorkflowOnly bool `protobuf:"varint,14,opt,name=child_workflow_only,json=childWorkflowOnly,proto3" json:"child_workflow_only,omitempty"` + // If present, the new workflow should start on this version with pinned base behavior. + InheritedPinnedVersion *v16.WorkerDeploymentVersion `protobuf:"bytes,15,opt,name=inherited_pinned_version,json=inheritedPinnedVersion,proto3" json:"inherited_pinned_version,omitempty"` + // Passes deployment version and revision number from a parent/previous workflow with AutoUpgrade behavior + // to its child/continued-as-new workflow. The first workflow task of the child/CAN workflow is dispatched to + // either this deployment version or the current version of the task queue, depending on which is the more recent version. + // After the first workflow task, the effective behavior of the workflow is determined by worker-sent values in + // subsequent workflow tasks. + InheritedAutoUpgradeInfo *v16.InheritedAutoUpgradeInfo `protobuf:"bytes,16,opt,name=inherited_auto_upgrade_info,json=inheritedAutoUpgradeInfo,proto3" json:"inherited_auto_upgrade_info,omitempty"` + // The target version that the previous run implicitly declined to upgrade to. + // Computed at continue-as-new time from the previous run's last_notified_target_version + // (if set) or its existing declined value (CaN chain). For retries, passed through + // directly from the started event. Written onto the new run's + // WorkflowExecutionStartedEvent. + DeclinedTargetVersionUpgrade *v17.DeclinedTargetVersionUpgrade `protobuf:"bytes,17,opt,name=declined_target_version_upgrade,json=declinedTargetVersionUpgrade,proto3" json:"declined_target_version_upgrade,omitempty"` + // If a workflow execution is started by a previous execution (parent-child workflow or continue-as-new) + // that has already skipped some time, the accumulated skipped duration from that execution + // can be passed to the new workflow execution through this field. + InitialSkippedDuration *durationpb.Duration `protobuf:"bytes,18,opt,name=initial_skipped_duration,json=initialSkippedDuration,proto3" json:"initial_skipped_duration,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StartWorkflowExecutionRequest) Reset() { *x = StartWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartWorkflowExecutionRequest) String() string { @@ -100,8 +223,8 @@ func (x *StartWorkflowExecutionRequest) String() string { func (*StartWorkflowExecutionRequest) ProtoMessage() {} func (x *StartWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[1] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -113,7 +236,7 @@ func (x *StartWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartWorkflowExecutionRequest.ProtoReflect.Descriptor instead. func (*StartWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{0} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{1} } func (x *StartWorkflowExecutionRequest) GetNamespaceId() string { @@ -186,25 +309,80 @@ func (x *StartWorkflowExecutionRequest) GetSourceVersionStamp() *v14.WorkerVersi return nil } -type StartWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *StartWorkflowExecutionRequest) GetRootExecutionInfo() *v11.RootExecutionInfo { + if x != nil { + return x.RootExecutionInfo + } + return nil +} + +func (x *StartWorkflowExecutionRequest) GetInheritedBuildId() string { + if x != nil { + return x.InheritedBuildId + } + return "" +} + +func (x *StartWorkflowExecutionRequest) GetVersioningOverride() *v15.VersioningOverride { + if x != nil { + return x.VersioningOverride + } + return nil +} + +func (x *StartWorkflowExecutionRequest) GetChildWorkflowOnly() bool { + if x != nil { + return x.ChildWorkflowOnly + } + return false +} + +func (x *StartWorkflowExecutionRequest) GetInheritedPinnedVersion() *v16.WorkerDeploymentVersion { + if x != nil { + return x.InheritedPinnedVersion + } + return nil +} + +func (x *StartWorkflowExecutionRequest) GetInheritedAutoUpgradeInfo() *v16.InheritedAutoUpgradeInfo { + if x != nil { + return x.InheritedAutoUpgradeInfo + } + return nil +} + +func (x *StartWorkflowExecutionRequest) GetDeclinedTargetVersionUpgrade() *v17.DeclinedTargetVersionUpgrade { + if x != nil { + return x.DeclinedTargetVersionUpgrade + } + return nil +} + +func (x *StartWorkflowExecutionRequest) GetInitialSkippedDuration() *durationpb.Duration { + if x != nil { + return x.InitialSkippedDuration + } + return nil +} - RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,2,opt,name=clock,proto3" json:"clock,omitempty"` +type StartWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,2,opt,name=clock,proto3" json:"clock,omitempty"` // Set if request_eager_execution is set on the start request EagerWorkflowTask *v1.PollWorkflowTaskQueueResponse `protobuf:"bytes,3,opt,name=eager_workflow_task,json=eagerWorkflowTask,proto3" json:"eager_workflow_task,omitempty"` Started bool `protobuf:"varint,4,opt,name=started,proto3" json:"started,omitempty"` + Status v12.WorkflowExecutionStatus `protobuf:"varint,5,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"status,omitempty"` + Link *v14.Link `protobuf:"bytes,6,opt,name=link,proto3" json:"link,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StartWorkflowExecutionResponse) Reset() { *x = StartWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartWorkflowExecutionResponse) String() string { @@ -214,8 +392,8 @@ func (x *StartWorkflowExecutionResponse) String() string { func (*StartWorkflowExecutionResponse) ProtoMessage() {} func (x *StartWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -227,7 +405,7 @@ func (x *StartWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartWorkflowExecutionResponse.ProtoReflect.Descriptor instead. func (*StartWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{1} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{2} } func (x *StartWorkflowExecutionResponse) GetRunId() string { @@ -237,7 +415,7 @@ func (x *StartWorkflowExecutionResponse) GetRunId() string { return "" } -func (x *StartWorkflowExecutionResponse) GetClock() *v15.VectorClock { +func (x *StartWorkflowExecutionResponse) GetClock() *v18.VectorClock { if x != nil { return x.Clock } @@ -258,25 +436,37 @@ func (x *StartWorkflowExecutionResponse) GetStarted() bool { return false } -type GetMutableStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *StartWorkflowExecutionResponse) GetStatus() v12.WorkflowExecutionStatus { + if x != nil { + return x.Status + } + return v12.WorkflowExecutionStatus(0) +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - ExpectedNextEventId int64 `protobuf:"varint,3,opt,name=expected_next_event_id,json=expectedNextEventId,proto3" json:"expected_next_event_id,omitempty"` - CurrentBranchToken []byte `protobuf:"bytes,4,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` - VersionHistoryItem *v16.VersionHistoryItem `protobuf:"bytes,5,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` +func (x *StartWorkflowExecutionResponse) GetLink() *v14.Link { + if x != nil { + return x.Link + } + return nil +} + +type GetMutableStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + ExpectedNextEventId int64 `protobuf:"varint,3,opt,name=expected_next_event_id,json=expectedNextEventId,proto3" json:"expected_next_event_id,omitempty"` + CurrentBranchToken []byte `protobuf:"bytes,4,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` + VersionHistoryItem *v19.VersionHistoryItem `protobuf:"bytes,5,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` + VersionedTransition *v110.VersionedTransition `protobuf:"bytes,6,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetMutableStateRequest) Reset() { *x = GetMutableStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetMutableStateRequest) String() string { @@ -286,8 +476,8 @@ func (x *GetMutableStateRequest) String() string { func (*GetMutableStateRequest) ProtoMessage() {} func (x *GetMutableStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -299,7 +489,7 @@ func (x *GetMutableStateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMutableStateRequest.ProtoReflect.Descriptor instead. func (*GetMutableStateRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{2} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{3} } func (x *GetMutableStateRequest) GetNamespaceId() string { @@ -330,48 +520,61 @@ func (x *GetMutableStateRequest) GetCurrentBranchToken() []byte { return nil } -func (x *GetMutableStateRequest) GetVersionHistoryItem() *v16.VersionHistoryItem { +func (x *GetMutableStateRequest) GetVersionHistoryItem() *v19.VersionHistoryItem { if x != nil { return x.VersionHistoryItem } return nil } -type GetMutableStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *GetMutableStateRequest) GetVersionedTransition() *v110.VersionedTransition { + if x != nil { + return x.VersionedTransition + } + return nil +} +type GetMutableStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` Execution *v14.WorkflowExecution `protobuf:"bytes,1,opt,name=execution,proto3" json:"execution,omitempty"` WorkflowType *v14.WorkflowType `protobuf:"bytes,2,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` NextEventId int64 `protobuf:"varint,3,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` PreviousStartedEventId int64 `protobuf:"varint,4,opt,name=previous_started_event_id,json=previousStartedEventId,proto3" json:"previous_started_event_id,omitempty"` LastFirstEventId int64 `protobuf:"varint,5,opt,name=last_first_event_id,json=lastFirstEventId,proto3" json:"last_first_event_id,omitempty"` - TaskQueue *v17.TaskQueue `protobuf:"bytes,6,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - StickyTaskQueue *v17.TaskQueue `protobuf:"bytes,7,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` + TaskQueue *v111.TaskQueue `protobuf:"bytes,6,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + StickyTaskQueue *v111.TaskQueue `protobuf:"bytes,7,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "to" is used to indicate interval. --) StickyTaskQueueScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,11,opt,name=sticky_task_queue_schedule_to_start_timeout,json=stickyTaskQueueScheduleToStartTimeout,proto3" json:"sticky_task_queue_schedule_to_start_timeout,omitempty"` CurrentBranchToken []byte `protobuf:"bytes,13,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` - WorkflowState v18.WorkflowExecutionState `protobuf:"varint,15,opt,name=workflow_state,json=workflowState,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"workflow_state,omitempty"` + WorkflowState v112.WorkflowExecutionState `protobuf:"varint,15,opt,name=workflow_state,json=workflowState,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"workflow_state,omitempty"` WorkflowStatus v12.WorkflowExecutionStatus `protobuf:"varint,16,opt,name=workflow_status,json=workflowStatus,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"workflow_status,omitempty"` - VersionHistories *v16.VersionHistories `protobuf:"bytes,17,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + VersionHistories *v19.VersionHistories `protobuf:"bytes,17,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` IsStickyTaskQueueEnabled bool `protobuf:"varint,18,opt,name=is_sticky_task_queue_enabled,json=isStickyTaskQueueEnabled,proto3" json:"is_sticky_task_queue_enabled,omitempty"` LastFirstEventTxnId int64 `protobuf:"varint,19,opt,name=last_first_event_txn_id,json=lastFirstEventTxnId,proto3" json:"last_first_event_txn_id,omitempty"` FirstExecutionRunId string `protobuf:"bytes,20,opt,name=first_execution_run_id,json=firstExecutionRunId,proto3" json:"first_execution_run_id,omitempty"` // If using build-id based versioning: version stamp of last worker to complete a workflow // task for this workflow. - WorkerVersionStamp *v14.WorkerVersionStamp `protobuf:"bytes,21,opt,name=worker_version_stamp,json=workerVersionStamp,proto3" json:"worker_version_stamp,omitempty"` + MostRecentWorkerVersionStamp *v14.WorkerVersionStamp `protobuf:"bytes,21,opt,name=most_recent_worker_version_stamp,json=mostRecentWorkerVersionStamp,proto3" json:"most_recent_worker_version_stamp,omitempty"` + // The currently assigned build ID for this execution. Presence of this value means worker versioning is used + // for this execution. + AssignedBuildId string `protobuf:"bytes,22,opt,name=assigned_build_id,json=assignedBuildId,proto3" json:"assigned_build_id,omitempty"` + InheritedBuildId string `protobuf:"bytes,23,opt,name=inherited_build_id,json=inheritedBuildId,proto3" json:"inherited_build_id,omitempty"` + TransitionHistory []*v110.VersionedTransition `protobuf:"bytes,24,rep,name=transition_history,json=transitionHistory,proto3" json:"transition_history,omitempty"` + VersioningInfo *v15.WorkflowExecutionVersioningInfo `protobuf:"bytes,25,opt,name=versioning_info,json=versioningInfo,proto3" json:"versioning_info,omitempty"` + // Transient or speculative workflow task events which are not yet persisted in the history. + // These events should be appended to the history when it is returned to the worker. + TransientOrSpeculativeTasks *v19.TransientWorkflowTaskInfo `protobuf:"bytes,26,opt,name=transient_or_speculative_tasks,json=transientOrSpeculativeTasks,proto3" json:"transient_or_speculative_tasks,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetMutableStateResponse) Reset() { *x = GetMutableStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetMutableStateResponse) String() string { @@ -381,8 +584,8 @@ func (x *GetMutableStateResponse) String() string { func (*GetMutableStateResponse) ProtoMessage() {} func (x *GetMutableStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -394,7 +597,7 @@ func (x *GetMutableStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMutableStateResponse.ProtoReflect.Descriptor instead. func (*GetMutableStateResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{3} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{4} } func (x *GetMutableStateResponse) GetExecution() *v14.WorkflowExecution { @@ -432,14 +635,14 @@ func (x *GetMutableStateResponse) GetLastFirstEventId() int64 { return 0 } -func (x *GetMutableStateResponse) GetTaskQueue() *v17.TaskQueue { +func (x *GetMutableStateResponse) GetTaskQueue() *v111.TaskQueue { if x != nil { return x.TaskQueue } return nil } -func (x *GetMutableStateResponse) GetStickyTaskQueue() *v17.TaskQueue { +func (x *GetMutableStateResponse) GetStickyTaskQueue() *v111.TaskQueue { if x != nil { return x.StickyTaskQueue } @@ -460,11 +663,11 @@ func (x *GetMutableStateResponse) GetCurrentBranchToken() []byte { return nil } -func (x *GetMutableStateResponse) GetWorkflowState() v18.WorkflowExecutionState { +func (x *GetMutableStateResponse) GetWorkflowState() v112.WorkflowExecutionState { if x != nil { return x.WorkflowState } - return v18.WorkflowExecutionState(0) + return v112.WorkflowExecutionState(0) } func (x *GetMutableStateResponse) GetWorkflowStatus() v12.WorkflowExecutionStatus { @@ -474,7 +677,7 @@ func (x *GetMutableStateResponse) GetWorkflowStatus() v12.WorkflowExecutionStatu return v12.WorkflowExecutionStatus(0) } -func (x *GetMutableStateResponse) GetVersionHistories() *v16.VersionHistories { +func (x *GetMutableStateResponse) GetVersionHistories() *v19.VersionHistories { if x != nil { return x.VersionHistories } @@ -502,32 +705,64 @@ func (x *GetMutableStateResponse) GetFirstExecutionRunId() string { return "" } -func (x *GetMutableStateResponse) GetWorkerVersionStamp() *v14.WorkerVersionStamp { +func (x *GetMutableStateResponse) GetMostRecentWorkerVersionStamp() *v14.WorkerVersionStamp { if x != nil { - return x.WorkerVersionStamp + return x.MostRecentWorkerVersionStamp } return nil } -type PollMutableStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *GetMutableStateResponse) GetAssignedBuildId() string { + if x != nil { + return x.AssignedBuildId + } + return "" +} + +func (x *GetMutableStateResponse) GetInheritedBuildId() string { + if x != nil { + return x.InheritedBuildId + } + return "" +} + +func (x *GetMutableStateResponse) GetTransitionHistory() []*v110.VersionedTransition { + if x != nil { + return x.TransitionHistory + } + return nil +} +func (x *GetMutableStateResponse) GetVersioningInfo() *v15.WorkflowExecutionVersioningInfo { + if x != nil { + return x.VersioningInfo + } + return nil +} + +func (x *GetMutableStateResponse) GetTransientOrSpeculativeTasks() *v19.TransientWorkflowTaskInfo { + if x != nil { + return x.TransientOrSpeculativeTasks + } + return nil +} + +type PollMutableStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` ExpectedNextEventId int64 `protobuf:"varint,3,opt,name=expected_next_event_id,json=expectedNextEventId,proto3" json:"expected_next_event_id,omitempty"` CurrentBranchToken []byte `protobuf:"bytes,4,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` - VersionHistoryItem *v16.VersionHistoryItem `protobuf:"bytes,5,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` + VersionHistoryItem *v19.VersionHistoryItem `protobuf:"bytes,5,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PollMutableStateRequest) Reset() { *x = PollMutableStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PollMutableStateRequest) String() string { @@ -537,8 +772,8 @@ func (x *PollMutableStateRequest) String() string { func (*PollMutableStateRequest) ProtoMessage() {} func (x *PollMutableStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -550,7 +785,7 @@ func (x *PollMutableStateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PollMutableStateRequest.ProtoReflect.Descriptor instead. func (*PollMutableStateRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{4} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{5} } func (x *PollMutableStateRequest) GetNamespaceId() string { @@ -581,7 +816,7 @@ func (x *PollMutableStateRequest) GetCurrentBranchToken() []byte { return nil } -func (x *PollMutableStateRequest) GetVersionHistoryItem() *v16.VersionHistoryItem { +func (x *PollMutableStateRequest) GetVersionHistoryItem() *v19.VersionHistoryItem { if x != nil { return x.VersionHistoryItem } @@ -589,36 +824,33 @@ func (x *PollMutableStateRequest) GetVersionHistoryItem() *v16.VersionHistoryIte } type PollMutableStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Execution *v14.WorkflowExecution `protobuf:"bytes,1,opt,name=execution,proto3" json:"execution,omitempty"` WorkflowType *v14.WorkflowType `protobuf:"bytes,2,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` NextEventId int64 `protobuf:"varint,3,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` PreviousStartedEventId int64 `protobuf:"varint,4,opt,name=previous_started_event_id,json=previousStartedEventId,proto3" json:"previous_started_event_id,omitempty"` LastFirstEventId int64 `protobuf:"varint,5,opt,name=last_first_event_id,json=lastFirstEventId,proto3" json:"last_first_event_id,omitempty"` - TaskQueue *v17.TaskQueue `protobuf:"bytes,6,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - StickyTaskQueue *v17.TaskQueue `protobuf:"bytes,7,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` + TaskQueue *v111.TaskQueue `protobuf:"bytes,6,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + StickyTaskQueue *v111.TaskQueue `protobuf:"bytes,7,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "to" is used to indicate interval. --) StickyTaskQueueScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,11,opt,name=sticky_task_queue_schedule_to_start_timeout,json=stickyTaskQueueScheduleToStartTimeout,proto3" json:"sticky_task_queue_schedule_to_start_timeout,omitempty"` CurrentBranchToken []byte `protobuf:"bytes,12,opt,name=current_branch_token,json=currentBranchToken,proto3" json:"current_branch_token,omitempty"` - VersionHistories *v16.VersionHistories `protobuf:"bytes,14,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` - WorkflowState v18.WorkflowExecutionState `protobuf:"varint,15,opt,name=workflow_state,json=workflowState,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"workflow_state,omitempty"` + VersionHistories *v19.VersionHistories `protobuf:"bytes,14,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + WorkflowState v112.WorkflowExecutionState `protobuf:"varint,15,opt,name=workflow_state,json=workflowState,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"workflow_state,omitempty"` WorkflowStatus v12.WorkflowExecutionStatus `protobuf:"varint,16,opt,name=workflow_status,json=workflowStatus,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"workflow_status,omitempty"` LastFirstEventTxnId int64 `protobuf:"varint,17,opt,name=last_first_event_txn_id,json=lastFirstEventTxnId,proto3" json:"last_first_event_txn_id,omitempty"` FirstExecutionRunId string `protobuf:"bytes,18,opt,name=first_execution_run_id,json=firstExecutionRunId,proto3" json:"first_execution_run_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PollMutableStateResponse) Reset() { *x = PollMutableStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PollMutableStateResponse) String() string { @@ -628,8 +860,8 @@ func (x *PollMutableStateResponse) String() string { func (*PollMutableStateResponse) ProtoMessage() {} func (x *PollMutableStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -641,7 +873,7 @@ func (x *PollMutableStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PollMutableStateResponse.ProtoReflect.Descriptor instead. func (*PollMutableStateResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{5} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{6} } func (x *PollMutableStateResponse) GetExecution() *v14.WorkflowExecution { @@ -679,14 +911,14 @@ func (x *PollMutableStateResponse) GetLastFirstEventId() int64 { return 0 } -func (x *PollMutableStateResponse) GetTaskQueue() *v17.TaskQueue { +func (x *PollMutableStateResponse) GetTaskQueue() *v111.TaskQueue { if x != nil { return x.TaskQueue } return nil } -func (x *PollMutableStateResponse) GetStickyTaskQueue() *v17.TaskQueue { +func (x *PollMutableStateResponse) GetStickyTaskQueue() *v111.TaskQueue { if x != nil { return x.StickyTaskQueue } @@ -707,18 +939,18 @@ func (x *PollMutableStateResponse) GetCurrentBranchToken() []byte { return nil } -func (x *PollMutableStateResponse) GetVersionHistories() *v16.VersionHistories { +func (x *PollMutableStateResponse) GetVersionHistories() *v19.VersionHistories { if x != nil { return x.VersionHistories } return nil } -func (x *PollMutableStateResponse) GetWorkflowState() v18.WorkflowExecutionState { +func (x *PollMutableStateResponse) GetWorkflowState() v112.WorkflowExecutionState { if x != nil { return x.WorkflowState } - return v18.WorkflowExecutionState(0) + return v112.WorkflowExecutionState(0) } func (x *PollMutableStateResponse) GetWorkflowStatus() v12.WorkflowExecutionStatus { @@ -743,21 +975,18 @@ func (x *PollMutableStateResponse) GetFirstExecutionRunId() string { } type ResetStickyTaskQueueRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ResetStickyTaskQueueRequest) Reset() { *x = ResetStickyTaskQueueRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResetStickyTaskQueueRequest) String() string { @@ -767,8 +996,8 @@ func (x *ResetStickyTaskQueueRequest) String() string { func (*ResetStickyTaskQueueRequest) ProtoMessage() {} func (x *ResetStickyTaskQueueRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[7] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -780,7 +1009,7 @@ func (x *ResetStickyTaskQueueRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ResetStickyTaskQueueRequest.ProtoReflect.Descriptor instead. func (*ResetStickyTaskQueueRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{6} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{7} } func (x *ResetStickyTaskQueueRequest) GetNamespaceId() string { @@ -798,18 +1027,16 @@ func (x *ResetStickyTaskQueueRequest) GetExecution() *v14.WorkflowExecution { } type ResetStickyTaskQueueResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ResetStickyTaskQueueResponse) Reset() { *x = ResetStickyTaskQueueResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ResetStickyTaskQueueResponse) String() string { @@ -819,8 +1046,8 @@ func (x *ResetStickyTaskQueueResponse) String() string { func (*ResetStickyTaskQueueResponse) ProtoMessage() {} func (x *ResetStickyTaskQueueResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -832,30 +1059,146 @@ func (x *ResetStickyTaskQueueResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ResetStickyTaskQueueResponse.ProtoReflect.Descriptor instead. func (*ResetStickyTaskQueueResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{7} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{8} } -type RecordWorkflowTaskStartedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ExecuteMultiOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + Operations []*ExecuteMultiOperationRequest_Operation `protobuf:"bytes,3,rep,name=operations,proto3" json:"operations,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - ScheduledEventId int64 `protobuf:"varint,3,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - // Unique id of each poll request. Used to ensure at most once delivery of tasks. - RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - PollRequest *v1.PollWorkflowTaskQueueRequest `protobuf:"bytes,6,opt,name=poll_request,json=pollRequest,proto3" json:"poll_request,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,7,opt,name=clock,proto3" json:"clock,omitempty"` +func (x *ExecuteMultiOperationRequest) Reset() { + *x = ExecuteMultiOperationRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RecordWorkflowTaskStartedRequest) Reset() { - *x = RecordWorkflowTaskStartedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ExecuteMultiOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteMultiOperationRequest) ProtoMessage() {} + +func (x *ExecuteMultiOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteMultiOperationRequest.ProtoReflect.Descriptor instead. +func (*ExecuteMultiOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{9} +} + +func (x *ExecuteMultiOperationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ExecuteMultiOperationRequest) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *ExecuteMultiOperationRequest) GetOperations() []*ExecuteMultiOperationRequest_Operation { + if x != nil { + return x.Operations + } + return nil +} + +type ExecuteMultiOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Responses []*ExecuteMultiOperationResponse_Response `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExecuteMultiOperationResponse) Reset() { + *x = ExecuteMultiOperationResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExecuteMultiOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecuteMultiOperationResponse) ProtoMessage() {} + +func (x *ExecuteMultiOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecuteMultiOperationResponse.ProtoReflect.Descriptor instead. +func (*ExecuteMultiOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{10} +} + +func (x *ExecuteMultiOperationResponse) GetResponses() []*ExecuteMultiOperationResponse_Response { + if x != nil { + return x.Responses } + return nil +} + +type RecordWorkflowTaskStartedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + ScheduledEventId int64 `protobuf:"varint,3,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + // Unique id of each poll request. Used to ensure at most once delivery of tasks. + RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + PollRequest *v1.PollWorkflowTaskQueueRequest `protobuf:"bytes,6,opt,name=poll_request,json=pollRequest,proto3" json:"poll_request,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,7,opt,name=clock,proto3" json:"clock,omitempty"` + BuildIdRedirectInfo *v113.BuildIdRedirectInfo `protobuf:"bytes,8,opt,name=build_id_redirect_info,json=buildIdRedirectInfo,proto3" json:"build_id_redirect_info,omitempty"` + // The deployment passed by History when the task was scheduled. + // Deprecated. use `version_directive.deployment`. + ScheduledDeployment *v16.Deployment `protobuf:"bytes,9,opt,name=scheduled_deployment,json=scheduledDeployment,proto3" json:"scheduled_deployment,omitempty"` + // Versioning directive that was sent by history when scheduling the task. + VersionDirective *v113.TaskVersionDirective `protobuf:"bytes,10,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` + // Stamp value from when the workflow task was scheduled. Used to validate the task is still relevant. + Stamp int32 `protobuf:"varint,11,opt,name=stamp,proto3" json:"stamp,omitempty"` + // Revision number that was sent by matching when the task was dispatched. Used to resolve eventual consistency issues + // that may arise due to stale routing configs in task queue partitions. + TaskDispatchRevisionNumber int64 `protobuf:"varint,12,opt,name=task_dispatch_revision_number,json=taskDispatchRevisionNumber,proto3" json:"task_dispatch_revision_number,omitempty"` + // Target worker deployment version according to matching when starting the task. + // Computed after matching with a poller, right before calling RecordWorkflowTaskStarted. + // Sent only if the target version is different from the poller's version. + TargetDeploymentVersion *v16.WorkerDeploymentVersion `protobuf:"bytes,13,opt,name=target_deployment_version,json=targetDeploymentVersion,proto3" json:"target_deployment_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordWorkflowTaskStartedRequest) Reset() { + *x = RecordWorkflowTaskStartedRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RecordWorkflowTaskStartedRequest) String() string { @@ -865,8 +1208,8 @@ func (x *RecordWorkflowTaskStartedRequest) String() string { func (*RecordWorkflowTaskStartedRequest) ProtoMessage() {} func (x *RecordWorkflowTaskStartedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[11] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -878,7 +1221,7 @@ func (x *RecordWorkflowTaskStartedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RecordWorkflowTaskStartedRequest.ProtoReflect.Descriptor instead. func (*RecordWorkflowTaskStartedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{8} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{11} } func (x *RecordWorkflowTaskStartedRequest) GetNamespaceId() string { @@ -916,18 +1259,57 @@ func (x *RecordWorkflowTaskStartedRequest) GetPollRequest() *v1.PollWorkflowTask return nil } -func (x *RecordWorkflowTaskStartedRequest) GetClock() *v15.VectorClock { +func (x *RecordWorkflowTaskStartedRequest) GetClock() *v18.VectorClock { if x != nil { return x.Clock } return nil } -type RecordWorkflowTaskStartedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *RecordWorkflowTaskStartedRequest) GetBuildIdRedirectInfo() *v113.BuildIdRedirectInfo { + if x != nil { + return x.BuildIdRedirectInfo + } + return nil +} + +func (x *RecordWorkflowTaskStartedRequest) GetScheduledDeployment() *v16.Deployment { + if x != nil { + return x.ScheduledDeployment + } + return nil +} + +func (x *RecordWorkflowTaskStartedRequest) GetVersionDirective() *v113.TaskVersionDirective { + if x != nil { + return x.VersionDirective + } + return nil +} +func (x *RecordWorkflowTaskStartedRequest) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +func (x *RecordWorkflowTaskStartedRequest) GetTaskDispatchRevisionNumber() int64 { + if x != nil { + return x.TaskDispatchRevisionNumber + } + return 0 +} + +func (x *RecordWorkflowTaskStartedRequest) GetTargetDeploymentVersion() *v16.WorkerDeploymentVersion { + if x != nil { + return x.TargetDeploymentVersion + } + return nil +} + +type RecordWorkflowTaskStartedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` WorkflowType *v14.WorkflowType `protobuf:"bytes,1,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` PreviousStartedEventId int64 `protobuf:"varint,2,opt,name=previous_started_event_id,json=previousStartedEventId,proto3" json:"previous_started_event_id,omitempty"` ScheduledEventId int64 `protobuf:"varint,3,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` @@ -935,26 +1317,43 @@ type RecordWorkflowTaskStartedResponse struct { NextEventId int64 `protobuf:"varint,5,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` Attempt int32 `protobuf:"varint,6,opt,name=attempt,proto3" json:"attempt,omitempty"` StickyExecutionEnabled bool `protobuf:"varint,7,opt,name=sticky_execution_enabled,json=stickyExecutionEnabled,proto3" json:"sticky_execution_enabled,omitempty"` - TransientWorkflowTask *v16.TransientWorkflowTaskInfo `protobuf:"bytes,8,opt,name=transient_workflow_task,json=transientWorkflowTask,proto3" json:"transient_workflow_task,omitempty"` - WorkflowExecutionTaskQueue *v17.TaskQueue `protobuf:"bytes,9,opt,name=workflow_execution_task_queue,json=workflowExecutionTaskQueue,proto3" json:"workflow_execution_task_queue,omitempty"` + TransientWorkflowTask *v19.TransientWorkflowTaskInfo `protobuf:"bytes,8,opt,name=transient_workflow_task,json=transientWorkflowTask,proto3" json:"transient_workflow_task,omitempty"` + WorkflowExecutionTaskQueue *v111.TaskQueue `protobuf:"bytes,9,opt,name=workflow_execution_task_queue,json=workflowExecutionTaskQueue,proto3" json:"workflow_execution_task_queue,omitempty"` BranchToken []byte `protobuf:"bytes,11,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` StartedTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` - Queries map[string]*v19.WorkflowQuery `protobuf:"bytes,14,rep,name=queries,proto3" json:"queries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Clock *v15.VectorClock `protobuf:"bytes,15,opt,name=clock,proto3" json:"clock,omitempty"` - Messages []*v110.Message `protobuf:"bytes,16,rep,name=messages,proto3" json:"messages,omitempty"` + Queries map[string]*v114.WorkflowQuery `protobuf:"bytes,14,rep,name=queries,proto3" json:"queries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Clock *v18.VectorClock `protobuf:"bytes,15,opt,name=clock,proto3" json:"clock,omitempty"` + Messages []*v115.Message `protobuf:"bytes,16,rep,name=messages,proto3" json:"messages,omitempty"` Version int64 `protobuf:"varint,17,opt,name=version,proto3" json:"version,omitempty"` - History *v111.History `protobuf:"bytes,18,opt,name=history,proto3" json:"history,omitempty"` + History *v17.History `protobuf:"bytes,18,opt,name=history,proto3" json:"history,omitempty"` NextPageToken []byte `protobuf:"bytes,19,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Deprecated: This field is being replaced by raw_history_bytes which sends raw bytes + // instead of a proto-decoded History. This avoids matching service having to decode history. + // TODO: PRATHYUSH + // DEPRECATION PLAN: + // Two dynamic config flags control the raw history optimization: + // - history.sendRawHistoryBetweenInternalServices: enables raw history (uses field 18 when OFF, field 20/21 when ON) + // - history.sendRawHistoryBytesToMatchingService: selects field 20 (OFF) vs field 21 (ON) + // + // Version timeline (current version: v1.29): + // - v1.31: This change is released. Both flags default to false for backward compatibility. + // - v1.32: Both flags will be enabled by default in code. + // - v1.33: raw_history (field 20) and history (field 18) will be deprecated and removed, + // as raw_history_bytes (field 21) will be the only field used. + // + // Deprecated: Marked as deprecated in temporal/server/api/historyservice/v1/request_response.proto. + RawHistory *v17.History `protobuf:"bytes,20,opt,name=raw_history,json=rawHistory,proto3" json:"raw_history,omitempty"` + RawHistoryBytes [][]byte `protobuf:"bytes,21,rep,name=raw_history_bytes,json=rawHistoryBytes,proto3" json:"raw_history_bytes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RecordWorkflowTaskStartedResponse) Reset() { *x = RecordWorkflowTaskStartedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RecordWorkflowTaskStartedResponse) String() string { @@ -964,8 +1363,8 @@ func (x *RecordWorkflowTaskStartedResponse) String() string { func (*RecordWorkflowTaskStartedResponse) ProtoMessage() {} func (x *RecordWorkflowTaskStartedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[12] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -977,7 +1376,7 @@ func (x *RecordWorkflowTaskStartedResponse) ProtoReflect() protoreflect.Message // Deprecated: Use RecordWorkflowTaskStartedResponse.ProtoReflect.Descriptor instead. func (*RecordWorkflowTaskStartedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{9} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{12} } func (x *RecordWorkflowTaskStartedResponse) GetWorkflowType() *v14.WorkflowType { @@ -1029,14 +1428,14 @@ func (x *RecordWorkflowTaskStartedResponse) GetStickyExecutionEnabled() bool { return false } -func (x *RecordWorkflowTaskStartedResponse) GetTransientWorkflowTask() *v16.TransientWorkflowTaskInfo { +func (x *RecordWorkflowTaskStartedResponse) GetTransientWorkflowTask() *v19.TransientWorkflowTaskInfo { if x != nil { return x.TransientWorkflowTask } return nil } -func (x *RecordWorkflowTaskStartedResponse) GetWorkflowExecutionTaskQueue() *v17.TaskQueue { +func (x *RecordWorkflowTaskStartedResponse) GetWorkflowExecutionTaskQueue() *v111.TaskQueue { if x != nil { return x.WorkflowExecutionTaskQueue } @@ -1064,21 +1463,21 @@ func (x *RecordWorkflowTaskStartedResponse) GetStartedTime() *timestamppb.Timest return nil } -func (x *RecordWorkflowTaskStartedResponse) GetQueries() map[string]*v19.WorkflowQuery { +func (x *RecordWorkflowTaskStartedResponse) GetQueries() map[string]*v114.WorkflowQuery { if x != nil { return x.Queries } return nil } -func (x *RecordWorkflowTaskStartedResponse) GetClock() *v15.VectorClock { +func (x *RecordWorkflowTaskStartedResponse) GetClock() *v18.VectorClock { if x != nil { return x.Clock } return nil } -func (x *RecordWorkflowTaskStartedResponse) GetMessages() []*v110.Message { +func (x *RecordWorkflowTaskStartedResponse) GetMessages() []*v115.Message { if x != nil { return x.Messages } @@ -1092,7 +1491,7 @@ func (x *RecordWorkflowTaskStartedResponse) GetVersion() int64 { return 0 } -func (x *RecordWorkflowTaskStartedResponse) GetHistory() *v111.History { +func (x *RecordWorkflowTaskStartedResponse) GetHistory() *v17.History { if x != nil { return x.History } @@ -1106,38 +1505,85 @@ func (x *RecordWorkflowTaskStartedResponse) GetNextPageToken() []byte { return nil } -type RecordActivityTaskStartedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - ScheduledEventId int64 `protobuf:"varint,3,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - // Unique id of each poll request. Used to ensure at most once delivery of tasks. - RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - PollRequest *v1.PollActivityTaskQueueRequest `protobuf:"bytes,6,opt,name=poll_request,json=pollRequest,proto3" json:"poll_request,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,7,opt,name=clock,proto3" json:"clock,omitempty"` +// Deprecated: Marked as deprecated in temporal/server/api/historyservice/v1/request_response.proto. +func (x *RecordWorkflowTaskStartedResponse) GetRawHistory() *v17.History { + if x != nil { + return x.RawHistory + } + return nil } -func (x *RecordActivityTaskStartedRequest) Reset() { - *x = RecordActivityTaskStartedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *RecordWorkflowTaskStartedResponse) GetRawHistoryBytes() [][]byte { + if x != nil { + return x.RawHistoryBytes } + return nil } -func (x *RecordActivityTaskStartedRequest) String() string { +// RecordWorkflowTaskStartedResponseWithRawHistory is wire-compatible with RecordWorkflowTaskStartedResponse. +// +// WIRE COMPATIBILITY PATTERN: +// This message uses the same field numbers as RecordWorkflowTaskStartedResponse (1-19 are identical), +// but fields 20 and 21 differ in type: +// - Field 20: `repeated bytes raw_history` (deprecated) vs `History raw_history` (deprecated) +// - Field 21: `repeated bytes raw_history_bytes` (same in both) +// +// This enables the following optimization when SendRawHistoryBetweenInternalServices is enabled: +// 1. History service serializes raw_history_bytes as [][]byte (raw proto-encoded history batches) +// 2. History client receives raw bytes and deserializes as RecordWorkflowTaskStartedResponse +// 3. Matching service passes these raw bytes through to frontend without deserializing +// 4. Frontend client deserializes the final response with History field populated +// +// This pattern avoids deserialization in matching service, reducing CPU usage. +// +// IMPORTANT: Field numbers and all other fields must remain identical between these two messages. +// Any change to RecordWorkflowTaskStartedResponse must be mirrored here. +type RecordWorkflowTaskStartedResponseWithRawHistory struct { + state protoimpl.MessageState `protogen:"open.v1"` + WorkflowType *v14.WorkflowType `protobuf:"bytes,1,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + PreviousStartedEventId int64 `protobuf:"varint,2,opt,name=previous_started_event_id,json=previousStartedEventId,proto3" json:"previous_started_event_id,omitempty"` + ScheduledEventId int64 `protobuf:"varint,3,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + StartedEventId int64 `protobuf:"varint,4,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` + NextEventId int64 `protobuf:"varint,5,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` + Attempt int32 `protobuf:"varint,6,opt,name=attempt,proto3" json:"attempt,omitempty"` + StickyExecutionEnabled bool `protobuf:"varint,7,opt,name=sticky_execution_enabled,json=stickyExecutionEnabled,proto3" json:"sticky_execution_enabled,omitempty"` + TransientWorkflowTask *v19.TransientWorkflowTaskInfo `protobuf:"bytes,8,opt,name=transient_workflow_task,json=transientWorkflowTask,proto3" json:"transient_workflow_task,omitempty"` + WorkflowExecutionTaskQueue *v111.TaskQueue `protobuf:"bytes,9,opt,name=workflow_execution_task_queue,json=workflowExecutionTaskQueue,proto3" json:"workflow_execution_task_queue,omitempty"` + BranchToken []byte `protobuf:"bytes,11,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + StartedTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + Queries map[string]*v114.WorkflowQuery `protobuf:"bytes,14,rep,name=queries,proto3" json:"queries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Clock *v18.VectorClock `protobuf:"bytes,15,opt,name=clock,proto3" json:"clock,omitempty"` + Messages []*v115.Message `protobuf:"bytes,16,rep,name=messages,proto3" json:"messages,omitempty"` + Version int64 `protobuf:"varint,17,opt,name=version,proto3" json:"version,omitempty"` + History *v17.History `protobuf:"bytes,18,opt,name=history,proto3" json:"history,omitempty"` + NextPageToken []byte `protobuf:"bytes,19,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + // Deprecated: This field is being replaced by raw_history_bytes which sends raw bytes + // instead of a proto-decoded History. This avoids matching service having to decode history. + // + // Deprecated: Marked as deprecated in temporal/server/api/historyservice/v1/request_response.proto. + RawHistory [][]byte `protobuf:"bytes,20,rep,name=raw_history,json=rawHistory,proto3" json:"raw_history,omitempty"` + RawHistoryBytes [][]byte `protobuf:"bytes,21,rep,name=raw_history_bytes,json=rawHistoryBytes,proto3" json:"raw_history_bytes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) Reset() { + *x = RecordWorkflowTaskStartedResponseWithRawHistory{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RecordActivityTaskStartedRequest) ProtoMessage() {} +func (*RecordWorkflowTaskStartedResponseWithRawHistory) ProtoMessage() {} -func (x *RecordActivityTaskStartedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[13] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1147,249 +1593,196 @@ func (x *RecordActivityTaskStartedRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RecordActivityTaskStartedRequest.ProtoReflect.Descriptor instead. -func (*RecordActivityTaskStartedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{10} +// Deprecated: Use RecordWorkflowTaskStartedResponseWithRawHistory.ProtoReflect.Descriptor instead. +func (*RecordWorkflowTaskStartedResponseWithRawHistory) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{13} } -func (x *RecordActivityTaskStartedRequest) GetNamespaceId() string { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetWorkflowType() *v14.WorkflowType { if x != nil { - return x.NamespaceId + return x.WorkflowType } - return "" + return nil } -func (x *RecordActivityTaskStartedRequest) GetWorkflowExecution() *v14.WorkflowExecution { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetPreviousStartedEventId() int64 { if x != nil { - return x.WorkflowExecution + return x.PreviousStartedEventId } - return nil + return 0 } -func (x *RecordActivityTaskStartedRequest) GetScheduledEventId() int64 { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetScheduledEventId() int64 { if x != nil { return x.ScheduledEventId } return 0 } -func (x *RecordActivityTaskStartedRequest) GetRequestId() string { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetStartedEventId() int64 { if x != nil { - return x.RequestId + return x.StartedEventId } - return "" + return 0 } -func (x *RecordActivityTaskStartedRequest) GetPollRequest() *v1.PollActivityTaskQueueRequest { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetNextEventId() int64 { if x != nil { - return x.PollRequest + return x.NextEventId } - return nil + return 0 } -func (x *RecordActivityTaskStartedRequest) GetClock() *v15.VectorClock { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetAttempt() int32 { if x != nil { - return x.Clock + return x.Attempt } - return nil -} - -type RecordActivityTaskStartedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ScheduledEvent *v111.HistoryEvent `protobuf:"bytes,1,opt,name=scheduled_event,json=scheduledEvent,proto3" json:"scheduled_event,omitempty"` - StartedTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` - Attempt int32 `protobuf:"varint,3,opt,name=attempt,proto3" json:"attempt,omitempty"` - CurrentAttemptScheduledTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=current_attempt_scheduled_time,json=currentAttemptScheduledTime,proto3" json:"current_attempt_scheduled_time,omitempty"` - HeartbeatDetails *v14.Payloads `protobuf:"bytes,5,opt,name=heartbeat_details,json=heartbeatDetails,proto3" json:"heartbeat_details,omitempty"` - WorkflowType *v14.WorkflowType `protobuf:"bytes,6,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` - WorkflowNamespace string `protobuf:"bytes,7,opt,name=workflow_namespace,json=workflowNamespace,proto3" json:"workflow_namespace,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,8,opt,name=clock,proto3" json:"clock,omitempty"` - Version int64 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + return 0 } -func (x *RecordActivityTaskStartedResponse) Reset() { - *x = RecordActivityTaskStartedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetStickyExecutionEnabled() bool { + if x != nil { + return x.StickyExecutionEnabled } + return false } -func (x *RecordActivityTaskStartedResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RecordActivityTaskStartedResponse) ProtoMessage() {} - -func (x *RecordActivityTaskStartedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetTransientWorkflowTask() *v19.TransientWorkflowTaskInfo { + if x != nil { + return x.TransientWorkflowTask } - return mi.MessageOf(x) -} - -// Deprecated: Use RecordActivityTaskStartedResponse.ProtoReflect.Descriptor instead. -func (*RecordActivityTaskStartedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{11} + return nil } -func (x *RecordActivityTaskStartedResponse) GetScheduledEvent() *v111.HistoryEvent { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetWorkflowExecutionTaskQueue() *v111.TaskQueue { if x != nil { - return x.ScheduledEvent + return x.WorkflowExecutionTaskQueue } return nil } -func (x *RecordActivityTaskStartedResponse) GetStartedTime() *timestamppb.Timestamp { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetBranchToken() []byte { if x != nil { - return x.StartedTime + return x.BranchToken } return nil } -func (x *RecordActivityTaskStartedResponse) GetAttempt() int32 { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetScheduledTime() *timestamppb.Timestamp { if x != nil { - return x.Attempt + return x.ScheduledTime } - return 0 + return nil } -func (x *RecordActivityTaskStartedResponse) GetCurrentAttemptScheduledTime() *timestamppb.Timestamp { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetStartedTime() *timestamppb.Timestamp { if x != nil { - return x.CurrentAttemptScheduledTime + return x.StartedTime } return nil } -func (x *RecordActivityTaskStartedResponse) GetHeartbeatDetails() *v14.Payloads { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetQueries() map[string]*v114.WorkflowQuery { if x != nil { - return x.HeartbeatDetails + return x.Queries } return nil } -func (x *RecordActivityTaskStartedResponse) GetWorkflowType() *v14.WorkflowType { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetClock() *v18.VectorClock { if x != nil { - return x.WorkflowType + return x.Clock } return nil } -func (x *RecordActivityTaskStartedResponse) GetWorkflowNamespace() string { - if x != nil { - return x.WorkflowNamespace - } - return "" -} - -func (x *RecordActivityTaskStartedResponse) GetClock() *v15.VectorClock { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetMessages() []*v115.Message { if x != nil { - return x.Clock + return x.Messages } return nil } -func (x *RecordActivityTaskStartedResponse) GetVersion() int64 { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetVersion() int64 { if x != nil { return x.Version } return 0 } -type RespondWorkflowTaskCompletedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - CompleteRequest *v1.RespondWorkflowTaskCompletedRequest `protobuf:"bytes,2,opt,name=complete_request,json=completeRequest,proto3" json:"complete_request,omitempty"` -} - -func (x *RespondWorkflowTaskCompletedRequest) Reset() { - *x = RespondWorkflowTaskCompletedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetHistory() *v17.History { + if x != nil { + return x.History } + return nil } -func (x *RespondWorkflowTaskCompletedRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RespondWorkflowTaskCompletedRequest) ProtoMessage() {} - -func (x *RespondWorkflowTaskCompletedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken } - return mi.MessageOf(x) -} - -// Deprecated: Use RespondWorkflowTaskCompletedRequest.ProtoReflect.Descriptor instead. -func (*RespondWorkflowTaskCompletedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{12} + return nil } -func (x *RespondWorkflowTaskCompletedRequest) GetNamespaceId() string { +// Deprecated: Marked as deprecated in temporal/server/api/historyservice/v1/request_response.proto. +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetRawHistory() [][]byte { if x != nil { - return x.NamespaceId + return x.RawHistory } - return "" + return nil } -func (x *RespondWorkflowTaskCompletedRequest) GetCompleteRequest() *v1.RespondWorkflowTaskCompletedRequest { +func (x *RecordWorkflowTaskStartedResponseWithRawHistory) GetRawHistoryBytes() [][]byte { if x != nil { - return x.CompleteRequest + return x.RawHistoryBytes } return nil } -type RespondWorkflowTaskCompletedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RecordActivityTaskStartedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + ScheduledEventId int64 `protobuf:"varint,3,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + // Unique id of each poll request. Used to ensure at most once delivery of tasks. + RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + PollRequest *v1.PollActivityTaskQueueRequest `protobuf:"bytes,6,opt,name=poll_request,json=pollRequest,proto3" json:"poll_request,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,7,opt,name=clock,proto3" json:"clock,omitempty"` + BuildIdRedirectInfo *v113.BuildIdRedirectInfo `protobuf:"bytes,8,opt,name=build_id_redirect_info,json=buildIdRedirectInfo,proto3" json:"build_id_redirect_info,omitempty"` + // Stamp represents the internal “version” of the activity options and can/will be changed with Activity API. + Stamp int32 `protobuf:"varint,9,opt,name=stamp,proto3" json:"stamp,omitempty"` + // The deployment passed by History when the task was scheduled. + // Deprecated. use `version_directive.deployment`. + ScheduledDeployment *v16.Deployment `protobuf:"bytes,10,opt,name=scheduled_deployment,json=scheduledDeployment,proto3" json:"scheduled_deployment,omitempty"` + // Versioning directive that was sent by history when scheduling the task. + VersionDirective *v113.TaskVersionDirective `protobuf:"bytes,12,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` + // Revision number that was sent by matching when the task was dispatched. Used to resolve eventual consistency issues + // that may arise due to stale routing configs in task queue partitions. + TaskDispatchRevisionNumber int64 `protobuf:"varint,13,opt,name=task_dispatch_revision_number,json=taskDispatchRevisionNumber,proto3" json:"task_dispatch_revision_number,omitempty"` + // Reference to the Chasm component for activity execution (if applicable). For standalone activities, all necessary + // start information is carried within this component, obviating the need to use the fields that apply to embedded + // activities with the exception of version_directive. + ComponentRef []byte `protobuf:"bytes,14,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` unknownFields protoimpl.UnknownFields - - // Deprecated: Marked as deprecated in temporal/server/api/historyservice/v1/request_response.proto. - StartedResponse *RecordWorkflowTaskStartedResponse `protobuf:"bytes,1,opt,name=started_response,json=startedResponse,proto3" json:"started_response,omitempty"` - ActivityTasks []*v1.PollActivityTaskQueueResponse `protobuf:"bytes,2,rep,name=activity_tasks,json=activityTasks,proto3" json:"activity_tasks,omitempty"` - ResetHistoryEventId int64 `protobuf:"varint,3,opt,name=reset_history_event_id,json=resetHistoryEventId,proto3" json:"reset_history_event_id,omitempty"` - NewWorkflowTask *v1.PollWorkflowTaskQueueResponse `protobuf:"bytes,4,opt,name=new_workflow_task,json=newWorkflowTask,proto3" json:"new_workflow_task,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *RespondWorkflowTaskCompletedResponse) Reset() { - *x = RespondWorkflowTaskCompletedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RecordActivityTaskStartedRequest) Reset() { + *x = RecordActivityTaskStartedRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondWorkflowTaskCompletedResponse) String() string { +func (x *RecordActivityTaskStartedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondWorkflowTaskCompletedResponse) ProtoMessage() {} +func (*RecordActivityTaskStartedRequest) ProtoMessage() {} -func (x *RespondWorkflowTaskCompletedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RecordActivityTaskStartedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[14] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1399,119 +1792,131 @@ func (x *RespondWorkflowTaskCompletedResponse) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use RespondWorkflowTaskCompletedResponse.ProtoReflect.Descriptor instead. -func (*RespondWorkflowTaskCompletedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{13} +// Deprecated: Use RecordActivityTaskStartedRequest.ProtoReflect.Descriptor instead. +func (*RecordActivityTaskStartedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{14} } -// Deprecated: Marked as deprecated in temporal/server/api/historyservice/v1/request_response.proto. -func (x *RespondWorkflowTaskCompletedResponse) GetStartedResponse() *RecordWorkflowTaskStartedResponse { +func (x *RecordActivityTaskStartedRequest) GetNamespaceId() string { if x != nil { - return x.StartedResponse + return x.NamespaceId } - return nil + return "" } -func (x *RespondWorkflowTaskCompletedResponse) GetActivityTasks() []*v1.PollActivityTaskQueueResponse { +func (x *RecordActivityTaskStartedRequest) GetWorkflowExecution() *v14.WorkflowExecution { if x != nil { - return x.ActivityTasks + return x.WorkflowExecution } return nil } -func (x *RespondWorkflowTaskCompletedResponse) GetResetHistoryEventId() int64 { +func (x *RecordActivityTaskStartedRequest) GetScheduledEventId() int64 { if x != nil { - return x.ResetHistoryEventId + return x.ScheduledEventId } return 0 } -func (x *RespondWorkflowTaskCompletedResponse) GetNewWorkflowTask() *v1.PollWorkflowTaskQueueResponse { +func (x *RecordActivityTaskStartedRequest) GetRequestId() string { if x != nil { - return x.NewWorkflowTask + return x.RequestId } - return nil + return "" } -type RespondWorkflowTaskFailedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - FailedRequest *v1.RespondWorkflowTaskFailedRequest `protobuf:"bytes,2,opt,name=failed_request,json=failedRequest,proto3" json:"failed_request,omitempty"` +func (x *RecordActivityTaskStartedRequest) GetPollRequest() *v1.PollActivityTaskQueueRequest { + if x != nil { + return x.PollRequest + } + return nil } -func (x *RespondWorkflowTaskFailedRequest) Reset() { - *x = RespondWorkflowTaskFailedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *RecordActivityTaskStartedRequest) GetClock() *v18.VectorClock { + if x != nil { + return x.Clock } + return nil } -func (x *RespondWorkflowTaskFailedRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *RecordActivityTaskStartedRequest) GetBuildIdRedirectInfo() *v113.BuildIdRedirectInfo { + if x != nil { + return x.BuildIdRedirectInfo + } + return nil } -func (*RespondWorkflowTaskFailedRequest) ProtoMessage() {} +func (x *RecordActivityTaskStartedRequest) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} -func (x *RespondWorkflowTaskFailedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *RecordActivityTaskStartedRequest) GetScheduledDeployment() *v16.Deployment { + if x != nil { + return x.ScheduledDeployment } - return mi.MessageOf(x) + return nil } -// Deprecated: Use RespondWorkflowTaskFailedRequest.ProtoReflect.Descriptor instead. -func (*RespondWorkflowTaskFailedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{14} +func (x *RecordActivityTaskStartedRequest) GetVersionDirective() *v113.TaskVersionDirective { + if x != nil { + return x.VersionDirective + } + return nil } -func (x *RespondWorkflowTaskFailedRequest) GetNamespaceId() string { +func (x *RecordActivityTaskStartedRequest) GetTaskDispatchRevisionNumber() int64 { if x != nil { - return x.NamespaceId + return x.TaskDispatchRevisionNumber } - return "" + return 0 } -func (x *RespondWorkflowTaskFailedRequest) GetFailedRequest() *v1.RespondWorkflowTaskFailedRequest { +func (x *RecordActivityTaskStartedRequest) GetComponentRef() []byte { if x != nil { - return x.FailedRequest + return x.ComponentRef } return nil } -type RespondWorkflowTaskFailedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RecordActivityTaskStartedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ScheduledEvent *v17.HistoryEvent `protobuf:"bytes,1,opt,name=scheduled_event,json=scheduledEvent,proto3" json:"scheduled_event,omitempty"` + StartedTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + Attempt int32 `protobuf:"varint,3,opt,name=attempt,proto3" json:"attempt,omitempty"` + CurrentAttemptScheduledTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=current_attempt_scheduled_time,json=currentAttemptScheduledTime,proto3" json:"current_attempt_scheduled_time,omitempty"` + HeartbeatDetails *v14.Payloads `protobuf:"bytes,5,opt,name=heartbeat_details,json=heartbeatDetails,proto3" json:"heartbeat_details,omitempty"` + WorkflowType *v14.WorkflowType `protobuf:"bytes,6,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + WorkflowNamespace string `protobuf:"bytes,7,opt,name=workflow_namespace,json=workflowNamespace,proto3" json:"workflow_namespace,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,8,opt,name=clock,proto3" json:"clock,omitempty"` + Version int64 `protobuf:"varint,9,opt,name=version,proto3" json:"version,omitempty"` + Priority *v14.Priority `protobuf:"bytes,10,opt,name=priority,proto3" json:"priority,omitempty"` + RetryPolicy *v14.RetryPolicy `protobuf:"bytes,11,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + StartVersion int64 `protobuf:"varint,12,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + // ID of the activity run (applicable for standalone activities only) + ActivityRunId string `protobuf:"bytes,13,opt,name=activity_run_id,json=activityRunId,proto3" json:"activity_run_id,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RespondWorkflowTaskFailedResponse) Reset() { - *x = RespondWorkflowTaskFailedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RecordActivityTaskStartedResponse) Reset() { + *x = RecordActivityTaskStartedResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondWorkflowTaskFailedResponse) String() string { +func (x *RecordActivityTaskStartedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondWorkflowTaskFailedResponse) ProtoMessage() {} +func (*RecordActivityTaskStartedResponse) ProtoMessage() {} -func (x *RespondWorkflowTaskFailedResponse) ProtoReflect() protoreflect.Message { +func (x *RecordActivityTaskStartedResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1521,157 +1926,126 @@ func (x *RespondWorkflowTaskFailedResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use RespondWorkflowTaskFailedResponse.ProtoReflect.Descriptor instead. -func (*RespondWorkflowTaskFailedResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use RecordActivityTaskStartedResponse.ProtoReflect.Descriptor instead. +func (*RecordActivityTaskStartedResponse) Descriptor() ([]byte, []int) { return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{15} } -type IsWorkflowTaskValidRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,3,opt,name=clock,proto3" json:"clock,omitempty"` - ScheduledEventId int64 `protobuf:"varint,4,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` +func (x *RecordActivityTaskStartedResponse) GetScheduledEvent() *v17.HistoryEvent { + if x != nil { + return x.ScheduledEvent + } + return nil } -func (x *IsWorkflowTaskValidRequest) Reset() { - *x = IsWorkflowTaskValidRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *RecordActivityTaskStartedResponse) GetStartedTime() *timestamppb.Timestamp { + if x != nil { + return x.StartedTime } + return nil } -func (x *IsWorkflowTaskValidRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *RecordActivityTaskStartedResponse) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 } -func (*IsWorkflowTaskValidRequest) ProtoMessage() {} - -func (x *IsWorkflowTaskValidRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *RecordActivityTaskStartedResponse) GetCurrentAttemptScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.CurrentAttemptScheduledTime } - return mi.MessageOf(x) + return nil } -// Deprecated: Use IsWorkflowTaskValidRequest.ProtoReflect.Descriptor instead. -func (*IsWorkflowTaskValidRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{16} +func (x *RecordActivityTaskStartedResponse) GetHeartbeatDetails() *v14.Payloads { + if x != nil { + return x.HeartbeatDetails + } + return nil } -func (x *IsWorkflowTaskValidRequest) GetNamespaceId() string { +func (x *RecordActivityTaskStartedResponse) GetWorkflowType() *v14.WorkflowType { if x != nil { - return x.NamespaceId + return x.WorkflowType } - return "" + return nil } -func (x *IsWorkflowTaskValidRequest) GetExecution() *v14.WorkflowExecution { +func (x *RecordActivityTaskStartedResponse) GetWorkflowNamespace() string { if x != nil { - return x.Execution + return x.WorkflowNamespace } - return nil + return "" } -func (x *IsWorkflowTaskValidRequest) GetClock() *v15.VectorClock { +func (x *RecordActivityTaskStartedResponse) GetClock() *v18.VectorClock { if x != nil { return x.Clock } return nil } -func (x *IsWorkflowTaskValidRequest) GetScheduledEventId() int64 { +func (x *RecordActivityTaskStartedResponse) GetVersion() int64 { if x != nil { - return x.ScheduledEventId + return x.Version } return 0 } -type IsWorkflowTaskValidResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // whether matching service can call history service to start the workflow task - IsValid bool `protobuf:"varint,1,opt,name=is_valid,json=isValid,proto3" json:"is_valid,omitempty"` -} - -func (x *IsWorkflowTaskValidResponse) Reset() { - *x = IsWorkflowTaskValidResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *RecordActivityTaskStartedResponse) GetPriority() *v14.Priority { + if x != nil { + return x.Priority } + return nil } -func (x *IsWorkflowTaskValidResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IsWorkflowTaskValidResponse) ProtoMessage() {} - -func (x *IsWorkflowTaskValidResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *RecordActivityTaskStartedResponse) GetRetryPolicy() *v14.RetryPolicy { + if x != nil { + return x.RetryPolicy } - return mi.MessageOf(x) + return nil } -// Deprecated: Use IsWorkflowTaskValidResponse.ProtoReflect.Descriptor instead. -func (*IsWorkflowTaskValidResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{17} +func (x *RecordActivityTaskStartedResponse) GetStartVersion() int64 { + if x != nil { + return x.StartVersion + } + return 0 } -func (x *IsWorkflowTaskValidResponse) GetIsValid() bool { +func (x *RecordActivityTaskStartedResponse) GetActivityRunId() string { if x != nil { - return x.IsValid + return x.ActivityRunId } - return false + return "" } -type RecordActivityTaskHeartbeatRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - HeartbeatRequest *v1.RecordActivityTaskHeartbeatRequest `protobuf:"bytes,2,opt,name=heartbeat_request,json=heartbeatRequest,proto3" json:"heartbeat_request,omitempty"` +type RespondWorkflowTaskCompletedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + CompleteRequest *v1.RespondWorkflowTaskCompletedRequest `protobuf:"bytes,2,opt,name=complete_request,json=completeRequest,proto3" json:"complete_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RecordActivityTaskHeartbeatRequest) Reset() { - *x = RecordActivityTaskHeartbeatRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondWorkflowTaskCompletedRequest) Reset() { + *x = RespondWorkflowTaskCompletedRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RecordActivityTaskHeartbeatRequest) String() string { +func (x *RespondWorkflowTaskCompletedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RecordActivityTaskHeartbeatRequest) ProtoMessage() {} +func (*RespondWorkflowTaskCompletedRequest) ProtoMessage() {} -func (x *RecordActivityTaskHeartbeatRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondWorkflowTaskCompletedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[16] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1681,51 +2055,52 @@ func (x *RecordActivityTaskHeartbeatRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use RecordActivityTaskHeartbeatRequest.ProtoReflect.Descriptor instead. -func (*RecordActivityTaskHeartbeatRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{18} +// Deprecated: Use RespondWorkflowTaskCompletedRequest.ProtoReflect.Descriptor instead. +func (*RespondWorkflowTaskCompletedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{16} } -func (x *RecordActivityTaskHeartbeatRequest) GetNamespaceId() string { +func (x *RespondWorkflowTaskCompletedRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RecordActivityTaskHeartbeatRequest) GetHeartbeatRequest() *v1.RecordActivityTaskHeartbeatRequest { +func (x *RespondWorkflowTaskCompletedRequest) GetCompleteRequest() *v1.RespondWorkflowTaskCompletedRequest { if x != nil { - return x.HeartbeatRequest + return x.CompleteRequest } return nil } -type RecordActivityTaskHeartbeatResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CancelRequested bool `protobuf:"varint,1,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` +type RespondWorkflowTaskCompletedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Deprecated: Marked as deprecated in temporal/server/api/historyservice/v1/request_response.proto. + StartedResponse *RecordWorkflowTaskStartedResponse `protobuf:"bytes,1,opt,name=started_response,json=startedResponse,proto3" json:"started_response,omitempty"` + ActivityTasks []*v1.PollActivityTaskQueueResponse `protobuf:"bytes,2,rep,name=activity_tasks,json=activityTasks,proto3" json:"activity_tasks,omitempty"` + ResetHistoryEventId int64 `protobuf:"varint,3,opt,name=reset_history_event_id,json=resetHistoryEventId,proto3" json:"reset_history_event_id,omitempty"` + NewWorkflowTask *v1.PollWorkflowTaskQueueResponse `protobuf:"bytes,4,opt,name=new_workflow_task,json=newWorkflowTask,proto3" json:"new_workflow_task,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RecordActivityTaskHeartbeatResponse) Reset() { - *x = RecordActivityTaskHeartbeatResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondWorkflowTaskCompletedResponse) Reset() { + *x = RespondWorkflowTaskCompletedResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RecordActivityTaskHeartbeatResponse) String() string { +func (x *RespondWorkflowTaskCompletedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RecordActivityTaskHeartbeatResponse) ProtoMessage() {} +func (*RespondWorkflowTaskCompletedResponse) ProtoMessage() {} -func (x *RecordActivityTaskHeartbeatResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondWorkflowTaskCompletedResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[17] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1735,45 +2110,64 @@ func (x *RecordActivityTaskHeartbeatResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use RecordActivityTaskHeartbeatResponse.ProtoReflect.Descriptor instead. -func (*RecordActivityTaskHeartbeatResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{19} +// Deprecated: Use RespondWorkflowTaskCompletedResponse.ProtoReflect.Descriptor instead. +func (*RespondWorkflowTaskCompletedResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{17} } -func (x *RecordActivityTaskHeartbeatResponse) GetCancelRequested() bool { +// Deprecated: Marked as deprecated in temporal/server/api/historyservice/v1/request_response.proto. +func (x *RespondWorkflowTaskCompletedResponse) GetStartedResponse() *RecordWorkflowTaskStartedResponse { if x != nil { - return x.CancelRequested + return x.StartedResponse } - return false + return nil } -type RespondActivityTaskCompletedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *RespondWorkflowTaskCompletedResponse) GetActivityTasks() []*v1.PollActivityTaskQueueResponse { + if x != nil { + return x.ActivityTasks + } + return nil +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - CompleteRequest *v1.RespondActivityTaskCompletedRequest `protobuf:"bytes,2,opt,name=complete_request,json=completeRequest,proto3" json:"complete_request,omitempty"` +func (x *RespondWorkflowTaskCompletedResponse) GetResetHistoryEventId() int64 { + if x != nil { + return x.ResetHistoryEventId + } + return 0 } -func (x *RespondActivityTaskCompletedRequest) Reset() { - *x = RespondActivityTaskCompletedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *RespondWorkflowTaskCompletedResponse) GetNewWorkflowTask() *v1.PollWorkflowTaskQueueResponse { + if x != nil { + return x.NewWorkflowTask } + return nil } -func (x *RespondActivityTaskCompletedRequest) String() string { +type RespondWorkflowTaskFailedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FailedRequest *v1.RespondWorkflowTaskFailedRequest `protobuf:"bytes,2,opt,name=failed_request,json=failedRequest,proto3" json:"failed_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RespondWorkflowTaskFailedRequest) Reset() { + *x = RespondWorkflowTaskFailedRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RespondWorkflowTaskFailedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondActivityTaskCompletedRequest) ProtoMessage() {} +func (*RespondWorkflowTaskFailedRequest) ProtoMessage() {} -func (x *RespondActivityTaskCompletedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondWorkflowTaskFailedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[18] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1783,49 +2177,47 @@ func (x *RespondActivityTaskCompletedRequest) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use RespondActivityTaskCompletedRequest.ProtoReflect.Descriptor instead. -func (*RespondActivityTaskCompletedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{20} +// Deprecated: Use RespondWorkflowTaskFailedRequest.ProtoReflect.Descriptor instead. +func (*RespondWorkflowTaskFailedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{18} } -func (x *RespondActivityTaskCompletedRequest) GetNamespaceId() string { +func (x *RespondWorkflowTaskFailedRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RespondActivityTaskCompletedRequest) GetCompleteRequest() *v1.RespondActivityTaskCompletedRequest { +func (x *RespondWorkflowTaskFailedRequest) GetFailedRequest() *v1.RespondWorkflowTaskFailedRequest { if x != nil { - return x.CompleteRequest + return x.FailedRequest } return nil } -type RespondActivityTaskCompletedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondWorkflowTaskFailedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RespondActivityTaskCompletedResponse) Reset() { - *x = RespondActivityTaskCompletedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondWorkflowTaskFailedResponse) Reset() { + *x = RespondWorkflowTaskFailedResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondActivityTaskCompletedResponse) String() string { +func (x *RespondWorkflowTaskFailedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondActivityTaskCompletedResponse) ProtoMessage() {} +func (*RespondWorkflowTaskFailedResponse) ProtoMessage() {} -func (x *RespondActivityTaskCompletedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondWorkflowTaskFailedResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[19] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1835,38 +2227,38 @@ func (x *RespondActivityTaskCompletedResponse) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use RespondActivityTaskCompletedResponse.ProtoReflect.Descriptor instead. -func (*RespondActivityTaskCompletedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{21} +// Deprecated: Use RespondWorkflowTaskFailedResponse.ProtoReflect.Descriptor instead. +func (*RespondWorkflowTaskFailedResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{19} } -type RespondActivityTaskFailedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - FailedRequest *v1.RespondActivityTaskFailedRequest `protobuf:"bytes,2,opt,name=failed_request,json=failedRequest,proto3" json:"failed_request,omitempty"` +type IsWorkflowTaskValidRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,3,opt,name=clock,proto3" json:"clock,omitempty"` + ScheduledEventId int64 `protobuf:"varint,4,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + Stamp int32 `protobuf:"varint,5,opt,name=stamp,proto3" json:"stamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RespondActivityTaskFailedRequest) Reset() { - *x = RespondActivityTaskFailedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *IsWorkflowTaskValidRequest) Reset() { + *x = IsWorkflowTaskValidRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondActivityTaskFailedRequest) String() string { +func (x *IsWorkflowTaskValidRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondActivityTaskFailedRequest) ProtoMessage() {} +func (*IsWorkflowTaskValidRequest) ProtoMessage() {} -func (x *RespondActivityTaskFailedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { +func (x *IsWorkflowTaskValidRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[20] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1876,49 +2268,70 @@ func (x *RespondActivityTaskFailedRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RespondActivityTaskFailedRequest.ProtoReflect.Descriptor instead. -func (*RespondActivityTaskFailedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{22} +// Deprecated: Use IsWorkflowTaskValidRequest.ProtoReflect.Descriptor instead. +func (*IsWorkflowTaskValidRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{20} } -func (x *RespondActivityTaskFailedRequest) GetNamespaceId() string { +func (x *IsWorkflowTaskValidRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RespondActivityTaskFailedRequest) GetFailedRequest() *v1.RespondActivityTaskFailedRequest { +func (x *IsWorkflowTaskValidRequest) GetExecution() *v14.WorkflowExecution { if x != nil { - return x.FailedRequest + return x.Execution } return nil } -type RespondActivityTaskFailedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *IsWorkflowTaskValidRequest) GetClock() *v18.VectorClock { + if x != nil { + return x.Clock + } + return nil } -func (x *RespondActivityTaskFailedResponse) Reset() { - *x = RespondActivityTaskFailedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *IsWorkflowTaskValidRequest) GetScheduledEventId() int64 { + if x != nil { + return x.ScheduledEventId } + return 0 } -func (x *RespondActivityTaskFailedResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *IsWorkflowTaskValidRequest) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 } -func (*RespondActivityTaskFailedResponse) ProtoMessage() {} +type IsWorkflowTaskValidResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // whether matching service can call history service to start the workflow task + IsValid bool `protobuf:"varint,1,opt,name=is_valid,json=isValid,proto3" json:"is_valid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} -func (x *RespondActivityTaskFailedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { +func (x *IsWorkflowTaskValidResponse) Reset() { + *x = IsWorkflowTaskValidResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IsWorkflowTaskValidResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IsWorkflowTaskValidResponse) ProtoMessage() {} + +func (x *IsWorkflowTaskValidResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[21] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1928,38 +2341,42 @@ func (x *RespondActivityTaskFailedResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use RespondActivityTaskFailedResponse.ProtoReflect.Descriptor instead. -func (*RespondActivityTaskFailedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{23} +// Deprecated: Use IsWorkflowTaskValidResponse.ProtoReflect.Descriptor instead. +func (*IsWorkflowTaskValidResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{21} } -type RespondActivityTaskCanceledRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *IsWorkflowTaskValidResponse) GetIsValid() bool { + if x != nil { + return x.IsValid + } + return false +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - CancelRequest *v1.RespondActivityTaskCanceledRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3" json:"cancel_request,omitempty"` +type RecordActivityTaskHeartbeatRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + HeartbeatRequest *v1.RecordActivityTaskHeartbeatRequest `protobuf:"bytes,2,opt,name=heartbeat_request,json=heartbeatRequest,proto3" json:"heartbeat_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RespondActivityTaskCanceledRequest) Reset() { - *x = RespondActivityTaskCanceledRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RecordActivityTaskHeartbeatRequest) Reset() { + *x = RecordActivityTaskHeartbeatRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondActivityTaskCanceledRequest) String() string { +func (x *RecordActivityTaskHeartbeatRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondActivityTaskCanceledRequest) ProtoMessage() {} +func (*RecordActivityTaskHeartbeatRequest) ProtoMessage() {} -func (x *RespondActivityTaskCanceledRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RecordActivityTaskHeartbeatRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[22] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1969,49 +2386,50 @@ func (x *RespondActivityTaskCanceledRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use RespondActivityTaskCanceledRequest.ProtoReflect.Descriptor instead. -func (*RespondActivityTaskCanceledRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{24} +// Deprecated: Use RecordActivityTaskHeartbeatRequest.ProtoReflect.Descriptor instead. +func (*RecordActivityTaskHeartbeatRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{22} } -func (x *RespondActivityTaskCanceledRequest) GetNamespaceId() string { +func (x *RecordActivityTaskHeartbeatRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RespondActivityTaskCanceledRequest) GetCancelRequest() *v1.RespondActivityTaskCanceledRequest { +func (x *RecordActivityTaskHeartbeatRequest) GetHeartbeatRequest() *v1.RecordActivityTaskHeartbeatRequest { if x != nil { - return x.CancelRequest + return x.HeartbeatRequest } return nil } -type RespondActivityTaskCanceledResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type RecordActivityTaskHeartbeatResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + CancelRequested bool `protobuf:"varint,1,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` + ActivityPaused bool `protobuf:"varint,2,opt,name=activity_paused,json=activityPaused,proto3" json:"activity_paused,omitempty"` + ActivityReset bool `protobuf:"varint,3,opt,name=activity_reset,json=activityReset,proto3" json:"activity_reset,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RespondActivityTaskCanceledResponse) Reset() { - *x = RespondActivityTaskCanceledResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RecordActivityTaskHeartbeatResponse) Reset() { + *x = RecordActivityTaskHeartbeatResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondActivityTaskCanceledResponse) String() string { +func (x *RecordActivityTaskHeartbeatResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondActivityTaskCanceledResponse) ProtoMessage() {} +func (*RecordActivityTaskHeartbeatResponse) ProtoMessage() {} -func (x *RespondActivityTaskCanceledResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RecordActivityTaskHeartbeatResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[23] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2021,40 +2439,56 @@ func (x *RespondActivityTaskCanceledResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use RespondActivityTaskCanceledResponse.ProtoReflect.Descriptor instead. -func (*RespondActivityTaskCanceledResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{25} +// Deprecated: Use RecordActivityTaskHeartbeatResponse.ProtoReflect.Descriptor instead. +func (*RecordActivityTaskHeartbeatResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{23} } -type IsActivityTaskValidRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *RecordActivityTaskHeartbeatResponse) GetCancelRequested() bool { + if x != nil { + return x.CancelRequested + } + return false +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,3,opt,name=clock,proto3" json:"clock,omitempty"` - ScheduledEventId int64 `protobuf:"varint,4,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` +func (x *RecordActivityTaskHeartbeatResponse) GetActivityPaused() bool { + if x != nil { + return x.ActivityPaused + } + return false } -func (x *IsActivityTaskValidRequest) Reset() { - *x = IsActivityTaskValidRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *RecordActivityTaskHeartbeatResponse) GetActivityReset() bool { + if x != nil { + return x.ActivityReset } + return false } -func (x *IsActivityTaskValidRequest) String() string { +type RespondActivityTaskCompletedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + CompleteRequest *v1.RespondActivityTaskCompletedRequest `protobuf:"bytes,2,opt,name=complete_request,json=completeRequest,proto3" json:"complete_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RespondActivityTaskCompletedRequest) Reset() { + *x = RespondActivityTaskCompletedRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RespondActivityTaskCompletedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*IsActivityTaskValidRequest) ProtoMessage() {} +func (*RespondActivityTaskCompletedRequest) ProtoMessage() {} -func (x *IsActivityTaskValidRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondActivityTaskCompletedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[24] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2064,66 +2498,47 @@ func (x *IsActivityTaskValidRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use IsActivityTaskValidRequest.ProtoReflect.Descriptor instead. -func (*IsActivityTaskValidRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{26} +// Deprecated: Use RespondActivityTaskCompletedRequest.ProtoReflect.Descriptor instead. +func (*RespondActivityTaskCompletedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{24} } -func (x *IsActivityTaskValidRequest) GetNamespaceId() string { +func (x *RespondActivityTaskCompletedRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *IsActivityTaskValidRequest) GetExecution() *v14.WorkflowExecution { - if x != nil { - return x.Execution - } - return nil -} - -func (x *IsActivityTaskValidRequest) GetClock() *v15.VectorClock { +func (x *RespondActivityTaskCompletedRequest) GetCompleteRequest() *v1.RespondActivityTaskCompletedRequest { if x != nil { - return x.Clock + return x.CompleteRequest } return nil } -func (x *IsActivityTaskValidRequest) GetScheduledEventId() int64 { - if x != nil { - return x.ScheduledEventId - } - return 0 -} - -type IsActivityTaskValidResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondActivityTaskCompletedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields - - // whether matching service can call history service to start the activity task - IsValid bool `protobuf:"varint,1,opt,name=is_valid,json=isValid,proto3" json:"is_valid,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *IsActivityTaskValidResponse) Reset() { - *x = IsActivityTaskValidResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondActivityTaskCompletedResponse) Reset() { + *x = RespondActivityTaskCompletedResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *IsActivityTaskValidResponse) String() string { +func (x *RespondActivityTaskCompletedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*IsActivityTaskValidResponse) ProtoMessage() {} +func (*RespondActivityTaskCompletedResponse) ProtoMessage() {} -func (x *IsActivityTaskValidResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondActivityTaskCompletedResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[25] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2133,47 +2548,35 @@ func (x *IsActivityTaskValidResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use IsActivityTaskValidResponse.ProtoReflect.Descriptor instead. -func (*IsActivityTaskValidResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{27} -} - -func (x *IsActivityTaskValidResponse) GetIsValid() bool { - if x != nil { - return x.IsValid - } - return false +// Deprecated: Use RespondActivityTaskCompletedResponse.ProtoReflect.Descriptor instead. +func (*RespondActivityTaskCompletedResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{25} } -type SignalWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondActivityTaskFailedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FailedRequest *v1.RespondActivityTaskFailedRequest `protobuf:"bytes,2,opt,name=failed_request,json=failedRequest,proto3" json:"failed_request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - SignalRequest *v1.SignalWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=signal_request,json=signalRequest,proto3" json:"signal_request,omitempty"` - ExternalWorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,3,opt,name=external_workflow_execution,json=externalWorkflowExecution,proto3" json:"external_workflow_execution,omitempty"` - ChildWorkflowOnly bool `protobuf:"varint,4,opt,name=child_workflow_only,json=childWorkflowOnly,proto3" json:"child_workflow_only,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *SignalWorkflowExecutionRequest) Reset() { - *x = SignalWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondActivityTaskFailedRequest) Reset() { + *x = RespondActivityTaskFailedRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SignalWorkflowExecutionRequest) String() string { +func (x *RespondActivityTaskFailedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SignalWorkflowExecutionRequest) ProtoMessage() {} +func (*RespondActivityTaskFailedRequest) ProtoMessage() {} -func (x *SignalWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondActivityTaskFailedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[26] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2183,63 +2586,47 @@ func (x *SignalWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SignalWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*SignalWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{28} +// Deprecated: Use RespondActivityTaskFailedRequest.ProtoReflect.Descriptor instead. +func (*RespondActivityTaskFailedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{26} } -func (x *SignalWorkflowExecutionRequest) GetNamespaceId() string { +func (x *RespondActivityTaskFailedRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *SignalWorkflowExecutionRequest) GetSignalRequest() *v1.SignalWorkflowExecutionRequest { - if x != nil { - return x.SignalRequest - } - return nil -} - -func (x *SignalWorkflowExecutionRequest) GetExternalWorkflowExecution() *v14.WorkflowExecution { +func (x *RespondActivityTaskFailedRequest) GetFailedRequest() *v1.RespondActivityTaskFailedRequest { if x != nil { - return x.ExternalWorkflowExecution + return x.FailedRequest } return nil } -func (x *SignalWorkflowExecutionRequest) GetChildWorkflowOnly() bool { - if x != nil { - return x.ChildWorkflowOnly - } - return false -} - -type SignalWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondActivityTaskFailedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *SignalWorkflowExecutionResponse) Reset() { - *x = SignalWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondActivityTaskFailedResponse) Reset() { + *x = RespondActivityTaskFailedResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SignalWorkflowExecutionResponse) String() string { +func (x *RespondActivityTaskFailedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SignalWorkflowExecutionResponse) ProtoMessage() {} +func (*RespondActivityTaskFailedResponse) ProtoMessage() {} -func (x *SignalWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondActivityTaskFailedResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[27] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2249,41 +2636,35 @@ func (x *SignalWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SignalWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*SignalWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{29} +// Deprecated: Use RespondActivityTaskFailedResponse.ProtoReflect.Descriptor instead. +func (*RespondActivityTaskFailedResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{27} } -type SignalWithStartWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondActivityTaskCanceledRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + CancelRequest *v1.RespondActivityTaskCanceledRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3" json:"cancel_request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - // (-- api-linter: core::0140::prepositions=disabled - // - // aip.dev/not-precedent: "with" is needed here. --) - SignalWithStartRequest *v1.SignalWithStartWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=signal_with_start_request,json=signalWithStartRequest,proto3" json:"signal_with_start_request,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *SignalWithStartWorkflowExecutionRequest) Reset() { - *x = SignalWithStartWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondActivityTaskCanceledRequest) Reset() { + *x = RespondActivityTaskCanceledRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SignalWithStartWorkflowExecutionRequest) String() string { +func (x *RespondActivityTaskCanceledRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SignalWithStartWorkflowExecutionRequest) ProtoMessage() {} +func (*RespondActivityTaskCanceledRequest) ProtoMessage() {} -func (x *SignalWithStartWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondActivityTaskCanceledRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[28] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2293,52 +2674,47 @@ func (x *SignalWithStartWorkflowExecutionRequest) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use SignalWithStartWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*SignalWithStartWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{30} +// Deprecated: Use RespondActivityTaskCanceledRequest.ProtoReflect.Descriptor instead. +func (*RespondActivityTaskCanceledRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{28} } -func (x *SignalWithStartWorkflowExecutionRequest) GetNamespaceId() string { +func (x *RespondActivityTaskCanceledRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *SignalWithStartWorkflowExecutionRequest) GetSignalWithStartRequest() *v1.SignalWithStartWorkflowExecutionRequest { +func (x *RespondActivityTaskCanceledRequest) GetCancelRequest() *v1.RespondActivityTaskCanceledRequest { if x != nil { - return x.SignalWithStartRequest + return x.CancelRequest } return nil } -type SignalWithStartWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondActivityTaskCanceledResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields - - RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - Started bool `protobuf:"varint,2,opt,name=started,proto3" json:"started,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *SignalWithStartWorkflowExecutionResponse) Reset() { - *x = SignalWithStartWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondActivityTaskCanceledResponse) Reset() { + *x = RespondActivityTaskCanceledResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SignalWithStartWorkflowExecutionResponse) String() string { +func (x *RespondActivityTaskCanceledResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SignalWithStartWorkflowExecutionResponse) ProtoMessage() {} +func (*RespondActivityTaskCanceledResponse) ProtoMessage() {} -func (x *SignalWithStartWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondActivityTaskCanceledResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[29] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2348,53 +2724,39 @@ func (x *SignalWithStartWorkflowExecutionResponse) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use SignalWithStartWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*SignalWithStartWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{31} -} - -func (x *SignalWithStartWorkflowExecutionResponse) GetRunId() string { - if x != nil { - return x.RunId - } - return "" -} - -func (x *SignalWithStartWorkflowExecutionResponse) GetStarted() bool { - if x != nil { - return x.Started - } - return false +// Deprecated: Use RespondActivityTaskCanceledResponse.ProtoReflect.Descriptor instead. +func (*RespondActivityTaskCanceledResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{29} } -type RemoveSignalMutableStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type IsActivityTaskValidRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,3,opt,name=clock,proto3" json:"clock,omitempty"` + ScheduledEventId int64 `protobuf:"varint,4,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + // Stamp represents the internal “version” of the activity options and can/will be changed with Activity API. + Stamp int32 `protobuf:"varint,5,opt,name=stamp,proto3" json:"stamp,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *RemoveSignalMutableStateRequest) Reset() { - *x = RemoveSignalMutableStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *IsActivityTaskValidRequest) Reset() { + *x = IsActivityTaskValidRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RemoveSignalMutableStateRequest) String() string { +func (x *IsActivityTaskValidRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveSignalMutableStateRequest) ProtoMessage() {} +func (*IsActivityTaskValidRequest) ProtoMessage() {} -func (x *RemoveSignalMutableStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { +func (x *IsActivityTaskValidRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[30] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2404,56 +2766,70 @@ func (x *RemoveSignalMutableStateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveSignalMutableStateRequest.ProtoReflect.Descriptor instead. -func (*RemoveSignalMutableStateRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{32} +// Deprecated: Use IsActivityTaskValidRequest.ProtoReflect.Descriptor instead. +func (*IsActivityTaskValidRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{30} } -func (x *RemoveSignalMutableStateRequest) GetNamespaceId() string { +func (x *IsActivityTaskValidRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RemoveSignalMutableStateRequest) GetWorkflowExecution() *v14.WorkflowExecution { +func (x *IsActivityTaskValidRequest) GetExecution() *v14.WorkflowExecution { if x != nil { - return x.WorkflowExecution + return x.Execution } return nil } -func (x *RemoveSignalMutableStateRequest) GetRequestId() string { +func (x *IsActivityTaskValidRequest) GetClock() *v18.VectorClock { if x != nil { - return x.RequestId + return x.Clock } - return "" + return nil } -type RemoveSignalMutableStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *IsActivityTaskValidRequest) GetScheduledEventId() int64 { + if x != nil { + return x.ScheduledEventId + } + return 0 } -func (x *RemoveSignalMutableStateResponse) Reset() { - *x = RemoveSignalMutableStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *IsActivityTaskValidRequest) GetStamp() int32 { + if x != nil { + return x.Stamp } + return 0 } -func (x *RemoveSignalMutableStateResponse) String() string { +type IsActivityTaskValidResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // whether matching service can call history service to start the activity task + IsValid bool `protobuf:"varint,1,opt,name=is_valid,json=isValid,proto3" json:"is_valid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IsActivityTaskValidResponse) Reset() { + *x = IsActivityTaskValidResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IsActivityTaskValidResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveSignalMutableStateResponse) ProtoMessage() {} +func (*IsActivityTaskValidResponse) ProtoMessage() {} -func (x *RemoveSignalMutableStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { +func (x *IsActivityTaskValidResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[31] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2463,40 +2839,44 @@ func (x *RemoveSignalMutableStateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveSignalMutableStateResponse.ProtoReflect.Descriptor instead. -func (*RemoveSignalMutableStateResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{33} +// Deprecated: Use IsActivityTaskValidResponse.ProtoReflect.Descriptor instead. +func (*IsActivityTaskValidResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{31} } -type TerminateWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *IsActivityTaskValidResponse) GetIsValid() bool { + if x != nil { + return x.IsValid + } + return false +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TerminateRequest *v1.TerminateWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=terminate_request,json=terminateRequest,proto3" json:"terminate_request,omitempty"` - ExternalWorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,3,opt,name=external_workflow_execution,json=externalWorkflowExecution,proto3" json:"external_workflow_execution,omitempty"` - ChildWorkflowOnly bool `protobuf:"varint,4,opt,name=child_workflow_only,json=childWorkflowOnly,proto3" json:"child_workflow_only,omitempty"` +type SignalWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + SignalRequest *v1.SignalWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=signal_request,json=signalRequest,proto3" json:"signal_request,omitempty"` + ExternalWorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,3,opt,name=external_workflow_execution,json=externalWorkflowExecution,proto3" json:"external_workflow_execution,omitempty"` + ChildWorkflowOnly bool `protobuf:"varint,4,opt,name=child_workflow_only,json=childWorkflowOnly,proto3" json:"child_workflow_only,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *TerminateWorkflowExecutionRequest) Reset() { - *x = TerminateWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *SignalWorkflowExecutionRequest) Reset() { + *x = SignalWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *TerminateWorkflowExecutionRequest) String() string { +func (x *SignalWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TerminateWorkflowExecutionRequest) ProtoMessage() {} +func (*SignalWorkflowExecutionRequest) ProtoMessage() {} -func (x *TerminateWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SignalWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[32] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2506,63 +2886,62 @@ func (x *TerminateWorkflowExecutionRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use TerminateWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*TerminateWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{34} +// Deprecated: Use SignalWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*SignalWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{32} } -func (x *TerminateWorkflowExecutionRequest) GetNamespaceId() string { +func (x *SignalWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *TerminateWorkflowExecutionRequest) GetTerminateRequest() *v1.TerminateWorkflowExecutionRequest { +func (x *SignalWorkflowExecutionRequest) GetSignalRequest() *v1.SignalWorkflowExecutionRequest { if x != nil { - return x.TerminateRequest + return x.SignalRequest } return nil } -func (x *TerminateWorkflowExecutionRequest) GetExternalWorkflowExecution() *v14.WorkflowExecution { +func (x *SignalWorkflowExecutionRequest) GetExternalWorkflowExecution() *v14.WorkflowExecution { if x != nil { return x.ExternalWorkflowExecution } return nil } -func (x *TerminateWorkflowExecutionRequest) GetChildWorkflowOnly() bool { +func (x *SignalWorkflowExecutionRequest) GetChildWorkflowOnly() bool { if x != nil { return x.ChildWorkflowOnly } return false } -type TerminateWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type SignalWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Link *v14.Link `protobuf:"bytes,1,opt,name=link,proto3" json:"link,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *TerminateWorkflowExecutionResponse) Reset() { - *x = TerminateWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *SignalWorkflowExecutionResponse) Reset() { + *x = SignalWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *TerminateWorkflowExecutionResponse) String() string { +func (x *SignalWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TerminateWorkflowExecutionResponse) ProtoMessage() {} +func (*SignalWorkflowExecutionResponse) ProtoMessage() {} -func (x *TerminateWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SignalWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[33] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2572,40 +2951,45 @@ func (x *TerminateWorkflowExecutionResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use TerminateWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*TerminateWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{35} +// Deprecated: Use SignalWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*SignalWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{33} } -type DeleteWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - WorkflowVersion int64 `protobuf:"varint,3,opt,name=workflow_version,json=workflowVersion,proto3" json:"workflow_version,omitempty"` - ClosedWorkflowOnly bool `protobuf:"varint,4,opt,name=closed_workflow_only,json=closedWorkflowOnly,proto3" json:"closed_workflow_only,omitempty"` +func (x *SignalWorkflowExecutionResponse) GetLink() *v14.Link { + if x != nil { + return x.Link + } + return nil } -func (x *DeleteWorkflowExecutionRequest) Reset() { - *x = DeleteWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +type SignalWithStartWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "with" is needed here. --) + SignalWithStartRequest *v1.SignalWithStartWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=signal_with_start_request,json=signalWithStartRequest,proto3" json:"signal_with_start_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DeleteWorkflowExecutionRequest) String() string { +func (x *SignalWithStartWorkflowExecutionRequest) Reset() { + *x = SignalWithStartWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignalWithStartWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteWorkflowExecutionRequest) ProtoMessage() {} +func (*SignalWithStartWorkflowExecutionRequest) ProtoMessage() {} -func (x *DeleteWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SignalWithStartWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[34] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2615,63 +2999,50 @@ func (x *DeleteWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*DeleteWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{36} +// Deprecated: Use SignalWithStartWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*SignalWithStartWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{34} } -func (x *DeleteWorkflowExecutionRequest) GetNamespaceId() string { +func (x *SignalWithStartWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *DeleteWorkflowExecutionRequest) GetWorkflowExecution() *v14.WorkflowExecution { +func (x *SignalWithStartWorkflowExecutionRequest) GetSignalWithStartRequest() *v1.SignalWithStartWorkflowExecutionRequest { if x != nil { - return x.WorkflowExecution + return x.SignalWithStartRequest } return nil } -func (x *DeleteWorkflowExecutionRequest) GetWorkflowVersion() int64 { - if x != nil { - return x.WorkflowVersion - } - return 0 -} - -func (x *DeleteWorkflowExecutionRequest) GetClosedWorkflowOnly() bool { - if x != nil { - return x.ClosedWorkflowOnly - } - return false -} - -type DeleteWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type SignalWithStartWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + Started bool `protobuf:"varint,2,opt,name=started,proto3" json:"started,omitempty"` + SignalLink *v14.Link `protobuf:"bytes,3,opt,name=signal_link,json=signalLink,proto3" json:"signal_link,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DeleteWorkflowExecutionResponse) Reset() { - *x = DeleteWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *SignalWithStartWorkflowExecutionResponse) Reset() { + *x = SignalWithStartWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DeleteWorkflowExecutionResponse) String() string { +func (x *SignalWithStartWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteWorkflowExecutionResponse) ProtoMessage() {} +func (*SignalWithStartWorkflowExecutionResponse) ProtoMessage() {} -func (x *DeleteWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SignalWithStartWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[35] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2681,38 +3052,57 @@ func (x *DeleteWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*DeleteWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{37} +// Deprecated: Use SignalWithStartWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*SignalWithStartWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{35} } -type ResetWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SignalWithStartWorkflowExecutionResponse) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - ResetRequest *v1.ResetWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=reset_request,json=resetRequest,proto3" json:"reset_request,omitempty"` +func (x *SignalWithStartWorkflowExecutionResponse) GetStarted() bool { + if x != nil { + return x.Started + } + return false } -func (x *ResetWorkflowExecutionRequest) Reset() { - *x = ResetWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SignalWithStartWorkflowExecutionResponse) GetSignalLink() *v14.Link { + if x != nil { + return x.SignalLink } + return nil } -func (x *ResetWorkflowExecutionRequest) String() string { +type RemoveSignalMutableStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemoveSignalMutableStateRequest) Reset() { + *x = RemoveSignalMutableStateRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemoveSignalMutableStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ResetWorkflowExecutionRequest) ProtoMessage() {} +func (*RemoveSignalMutableStateRequest) ProtoMessage() {} -func (x *ResetWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RemoveSignalMutableStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[36] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2722,51 +3112,54 @@ func (x *ResetWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ResetWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*ResetWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{38} +// Deprecated: Use RemoveSignalMutableStateRequest.ProtoReflect.Descriptor instead. +func (*RemoveSignalMutableStateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{36} } -func (x *ResetWorkflowExecutionRequest) GetNamespaceId() string { +func (x *RemoveSignalMutableStateRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *ResetWorkflowExecutionRequest) GetResetRequest() *v1.ResetWorkflowExecutionRequest { +func (x *RemoveSignalMutableStateRequest) GetWorkflowExecution() *v14.WorkflowExecution { if x != nil { - return x.ResetRequest + return x.WorkflowExecution } return nil } -type ResetWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *RemoveSignalMutableStateRequest) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} - RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` +type RemoveSignalMutableStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ResetWorkflowExecutionResponse) Reset() { - *x = ResetWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RemoveSignalMutableStateResponse) Reset() { + *x = RemoveSignalMutableStateResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ResetWorkflowExecutionResponse) String() string { +func (x *RemoveSignalMutableStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ResetWorkflowExecutionResponse) ProtoMessage() {} +func (*RemoveSignalMutableStateResponse) ProtoMessage() {} -func (x *ResetWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RemoveSignalMutableStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[37] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2776,48 +3169,37 @@ func (x *ResetWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ResetWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*ResetWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{39} -} - -func (x *ResetWorkflowExecutionResponse) GetRunId() string { - if x != nil { - return x.RunId - } - return "" +// Deprecated: Use RemoveSignalMutableStateResponse.ProtoReflect.Descriptor instead. +func (*RemoveSignalMutableStateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{37} } -type RequestCancelWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - CancelRequest *v1.RequestCancelWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3" json:"cancel_request,omitempty"` - ExternalInitiatedEventId int64 `protobuf:"varint,3,opt,name=external_initiated_event_id,json=externalInitiatedEventId,proto3" json:"external_initiated_event_id,omitempty"` - ExternalWorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,4,opt,name=external_workflow_execution,json=externalWorkflowExecution,proto3" json:"external_workflow_execution,omitempty"` - ChildWorkflowOnly bool `protobuf:"varint,5,opt,name=child_workflow_only,json=childWorkflowOnly,proto3" json:"child_workflow_only,omitempty"` +type TerminateWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TerminateRequest *v1.TerminateWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=terminate_request,json=terminateRequest,proto3" json:"terminate_request,omitempty"` + ExternalWorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,3,opt,name=external_workflow_execution,json=externalWorkflowExecution,proto3" json:"external_workflow_execution,omitempty"` + ChildWorkflowOnly bool `protobuf:"varint,4,opt,name=child_workflow_only,json=childWorkflowOnly,proto3" json:"child_workflow_only,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RequestCancelWorkflowExecutionRequest) Reset() { - *x = RequestCancelWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *TerminateWorkflowExecutionRequest) Reset() { + *x = TerminateWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RequestCancelWorkflowExecutionRequest) String() string { +func (x *TerminateWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RequestCancelWorkflowExecutionRequest) ProtoMessage() {} +func (*TerminateWorkflowExecutionRequest) ProtoMessage() {} -func (x *RequestCancelWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { +func (x *TerminateWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[38] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2827,70 +3209,61 @@ func (x *RequestCancelWorkflowExecutionRequest) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use RequestCancelWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*RequestCancelWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{40} +// Deprecated: Use TerminateWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*TerminateWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{38} } -func (x *RequestCancelWorkflowExecutionRequest) GetNamespaceId() string { +func (x *TerminateWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RequestCancelWorkflowExecutionRequest) GetCancelRequest() *v1.RequestCancelWorkflowExecutionRequest { +func (x *TerminateWorkflowExecutionRequest) GetTerminateRequest() *v1.TerminateWorkflowExecutionRequest { if x != nil { - return x.CancelRequest + return x.TerminateRequest } return nil } -func (x *RequestCancelWorkflowExecutionRequest) GetExternalInitiatedEventId() int64 { - if x != nil { - return x.ExternalInitiatedEventId - } - return 0 -} - -func (x *RequestCancelWorkflowExecutionRequest) GetExternalWorkflowExecution() *v14.WorkflowExecution { +func (x *TerminateWorkflowExecutionRequest) GetExternalWorkflowExecution() *v14.WorkflowExecution { if x != nil { return x.ExternalWorkflowExecution } return nil } -func (x *RequestCancelWorkflowExecutionRequest) GetChildWorkflowOnly() bool { +func (x *TerminateWorkflowExecutionRequest) GetChildWorkflowOnly() bool { if x != nil { return x.ChildWorkflowOnly } return false } -type RequestCancelWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type TerminateWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RequestCancelWorkflowExecutionResponse) Reset() { - *x = RequestCancelWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *TerminateWorkflowExecutionResponse) Reset() { + *x = TerminateWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RequestCancelWorkflowExecutionResponse) String() string { +func (x *TerminateWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RequestCancelWorkflowExecutionResponse) ProtoMessage() {} +func (*TerminateWorkflowExecutionResponse) ProtoMessage() {} -func (x *RequestCancelWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { +func (x *TerminateWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[39] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2900,41 +3273,36 @@ func (x *RequestCancelWorkflowExecutionResponse) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use RequestCancelWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*RequestCancelWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{41} +// Deprecated: Use TerminateWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*TerminateWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{39} } -type ScheduleWorkflowTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - IsFirstWorkflowTask bool `protobuf:"varint,3,opt,name=is_first_workflow_task,json=isFirstWorkflowTask,proto3" json:"is_first_workflow_task,omitempty"` - ChildClock *v15.VectorClock `protobuf:"bytes,4,opt,name=child_clock,json=childClock,proto3" json:"child_clock,omitempty"` - ParentClock *v15.VectorClock `protobuf:"bytes,5,opt,name=parent_clock,json=parentClock,proto3" json:"parent_clock,omitempty"` +type DeleteWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + ClosedWorkflowOnly bool `protobuf:"varint,4,opt,name=closed_workflow_only,json=closedWorkflowOnly,proto3" json:"closed_workflow_only,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ScheduleWorkflowTaskRequest) Reset() { - *x = ScheduleWorkflowTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeleteWorkflowExecutionRequest) Reset() { + *x = DeleteWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ScheduleWorkflowTaskRequest) String() string { +func (x *DeleteWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ScheduleWorkflowTaskRequest) ProtoMessage() {} +func (*DeleteWorkflowExecutionRequest) ProtoMessage() {} -func (x *ScheduleWorkflowTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeleteWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[40] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2944,70 +3312,54 @@ func (x *ScheduleWorkflowTaskRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ScheduleWorkflowTaskRequest.ProtoReflect.Descriptor instead. -func (*ScheduleWorkflowTaskRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{42} +// Deprecated: Use DeleteWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*DeleteWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{40} } -func (x *ScheduleWorkflowTaskRequest) GetNamespaceId() string { +func (x *DeleteWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *ScheduleWorkflowTaskRequest) GetWorkflowExecution() *v14.WorkflowExecution { +func (x *DeleteWorkflowExecutionRequest) GetWorkflowExecution() *v14.WorkflowExecution { if x != nil { return x.WorkflowExecution } return nil } -func (x *ScheduleWorkflowTaskRequest) GetIsFirstWorkflowTask() bool { +func (x *DeleteWorkflowExecutionRequest) GetClosedWorkflowOnly() bool { if x != nil { - return x.IsFirstWorkflowTask + return x.ClosedWorkflowOnly } return false } -func (x *ScheduleWorkflowTaskRequest) GetChildClock() *v15.VectorClock { - if x != nil { - return x.ChildClock - } - return nil -} - -func (x *ScheduleWorkflowTaskRequest) GetParentClock() *v15.VectorClock { - if x != nil { - return x.ParentClock - } - return nil -} - -type ScheduleWorkflowTaskResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DeleteWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ScheduleWorkflowTaskResponse) Reset() { - *x = ScheduleWorkflowTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeleteWorkflowExecutionResponse) Reset() { + *x = DeleteWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ScheduleWorkflowTaskResponse) String() string { +func (x *DeleteWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ScheduleWorkflowTaskResponse) ProtoMessage() {} +func (*DeleteWorkflowExecutionResponse) ProtoMessage() {} -func (x *ScheduleWorkflowTaskResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeleteWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[41] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3017,39 +3369,35 @@ func (x *ScheduleWorkflowTaskResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ScheduleWorkflowTaskResponse.ProtoReflect.Descriptor instead. -func (*ScheduleWorkflowTaskResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{43} +// Deprecated: Use DeleteWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*DeleteWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{41} } -type VerifyFirstWorkflowTaskScheduledRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ResetWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + ResetRequest *v1.ResetWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=reset_request,json=resetRequest,proto3" json:"reset_request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,3,opt,name=clock,proto3" json:"clock,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *VerifyFirstWorkflowTaskScheduledRequest) Reset() { - *x = VerifyFirstWorkflowTaskScheduledRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ResetWorkflowExecutionRequest) Reset() { + *x = ResetWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *VerifyFirstWorkflowTaskScheduledRequest) String() string { +func (x *ResetWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VerifyFirstWorkflowTaskScheduledRequest) ProtoMessage() {} +func (*ResetWorkflowExecutionRequest) ProtoMessage() {} -func (x *VerifyFirstWorkflowTaskScheduledRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ResetWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[42] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3059,56 +3407,48 @@ func (x *VerifyFirstWorkflowTaskScheduledRequest) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use VerifyFirstWorkflowTaskScheduledRequest.ProtoReflect.Descriptor instead. -func (*VerifyFirstWorkflowTaskScheduledRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{44} +// Deprecated: Use ResetWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*ResetWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{42} } -func (x *VerifyFirstWorkflowTaskScheduledRequest) GetNamespaceId() string { +func (x *ResetWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *VerifyFirstWorkflowTaskScheduledRequest) GetWorkflowExecution() *v14.WorkflowExecution { - if x != nil { - return x.WorkflowExecution - } - return nil -} - -func (x *VerifyFirstWorkflowTaskScheduledRequest) GetClock() *v15.VectorClock { +func (x *ResetWorkflowExecutionRequest) GetResetRequest() *v1.ResetWorkflowExecutionRequest { if x != nil { - return x.Clock + return x.ResetRequest } return nil } -type VerifyFirstWorkflowTaskScheduledResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ResetWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *VerifyFirstWorkflowTaskScheduledResponse) Reset() { - *x = VerifyFirstWorkflowTaskScheduledResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ResetWorkflowExecutionResponse) Reset() { + *x = ResetWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *VerifyFirstWorkflowTaskScheduledResponse) String() string { +func (x *ResetWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VerifyFirstWorkflowTaskScheduledResponse) ProtoMessage() {} +func (*ResetWorkflowExecutionResponse) ProtoMessage() {} -func (x *VerifyFirstWorkflowTaskScheduledResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ResetWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[43] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3118,49 +3458,45 @@ func (x *VerifyFirstWorkflowTaskScheduledResponse) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use VerifyFirstWorkflowTaskScheduledResponse.ProtoReflect.Descriptor instead. -func (*VerifyFirstWorkflowTaskScheduledResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{45} +// Deprecated: Use ResetWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*ResetWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{43} } -// * -// RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow -// execution which started it. When a child execution is completed it creates this request and calls the -// RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the -// child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when -// child creates multiple runs through ContinueAsNew before finally completing. -type RecordChildExecutionCompletedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ResetWorkflowExecutionResponse) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - ParentExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=parent_execution,json=parentExecution,proto3" json:"parent_execution,omitempty"` - ParentInitiatedId int64 `protobuf:"varint,3,opt,name=parent_initiated_id,json=parentInitiatedId,proto3" json:"parent_initiated_id,omitempty"` - ChildExecution *v14.WorkflowExecution `protobuf:"bytes,4,opt,name=child_execution,json=childExecution,proto3" json:"child_execution,omitempty"` - CompletionEvent *v111.HistoryEvent `protobuf:"bytes,5,opt,name=completion_event,json=completionEvent,proto3" json:"completion_event,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,6,opt,name=clock,proto3" json:"clock,omitempty"` - ParentInitiatedVersion int64 `protobuf:"varint,7,opt,name=parent_initiated_version,json=parentInitiatedVersion,proto3" json:"parent_initiated_version,omitempty"` +type RequestCancelWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + CancelRequest *v1.RequestCancelWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3" json:"cancel_request,omitempty"` + ExternalInitiatedEventId int64 `protobuf:"varint,3,opt,name=external_initiated_event_id,json=externalInitiatedEventId,proto3" json:"external_initiated_event_id,omitempty"` + ExternalWorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,4,opt,name=external_workflow_execution,json=externalWorkflowExecution,proto3" json:"external_workflow_execution,omitempty"` + ChildWorkflowOnly bool `protobuf:"varint,5,opt,name=child_workflow_only,json=childWorkflowOnly,proto3" json:"child_workflow_only,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RecordChildExecutionCompletedRequest) Reset() { - *x = RecordChildExecutionCompletedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RequestCancelWorkflowExecutionRequest) Reset() { + *x = RequestCancelWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RecordChildExecutionCompletedRequest) String() string { +func (x *RequestCancelWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RecordChildExecutionCompletedRequest) ProtoMessage() {} +func (*RequestCancelWorkflowExecutionRequest) ProtoMessage() {} -func (x *RecordChildExecutionCompletedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RequestCancelWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[44] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3170,84 +3506,68 @@ func (x *RecordChildExecutionCompletedRequest) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use RecordChildExecutionCompletedRequest.ProtoReflect.Descriptor instead. -func (*RecordChildExecutionCompletedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{46} +// Deprecated: Use RequestCancelWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*RequestCancelWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{44} } -func (x *RecordChildExecutionCompletedRequest) GetNamespaceId() string { +func (x *RequestCancelWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RecordChildExecutionCompletedRequest) GetParentExecution() *v14.WorkflowExecution { +func (x *RequestCancelWorkflowExecutionRequest) GetCancelRequest() *v1.RequestCancelWorkflowExecutionRequest { if x != nil { - return x.ParentExecution + return x.CancelRequest } return nil } -func (x *RecordChildExecutionCompletedRequest) GetParentInitiatedId() int64 { +func (x *RequestCancelWorkflowExecutionRequest) GetExternalInitiatedEventId() int64 { if x != nil { - return x.ParentInitiatedId + return x.ExternalInitiatedEventId } return 0 } -func (x *RecordChildExecutionCompletedRequest) GetChildExecution() *v14.WorkflowExecution { - if x != nil { - return x.ChildExecution - } - return nil -} - -func (x *RecordChildExecutionCompletedRequest) GetCompletionEvent() *v111.HistoryEvent { - if x != nil { - return x.CompletionEvent - } - return nil -} - -func (x *RecordChildExecutionCompletedRequest) GetClock() *v15.VectorClock { +func (x *RequestCancelWorkflowExecutionRequest) GetExternalWorkflowExecution() *v14.WorkflowExecution { if x != nil { - return x.Clock + return x.ExternalWorkflowExecution } return nil } -func (x *RecordChildExecutionCompletedRequest) GetParentInitiatedVersion() int64 { +func (x *RequestCancelWorkflowExecutionRequest) GetChildWorkflowOnly() bool { if x != nil { - return x.ParentInitiatedVersion + return x.ChildWorkflowOnly } - return 0 + return false } -type RecordChildExecutionCompletedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RequestCancelWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RecordChildExecutionCompletedResponse) Reset() { - *x = RecordChildExecutionCompletedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RequestCancelWorkflowExecutionResponse) Reset() { + *x = RequestCancelWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RecordChildExecutionCompletedResponse) String() string { +func (x *RequestCancelWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RecordChildExecutionCompletedResponse) ProtoMessage() {} +func (*RequestCancelWorkflowExecutionResponse) ProtoMessage() {} -func (x *RecordChildExecutionCompletedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RequestCancelWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[45] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3257,42 +3577,38 @@ func (x *RecordChildExecutionCompletedResponse) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use RecordChildExecutionCompletedResponse.ProtoReflect.Descriptor instead. -func (*RecordChildExecutionCompletedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{47} +// Deprecated: Use RequestCancelWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*RequestCancelWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{45} } -type VerifyChildExecutionCompletionRecordedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type ScheduleWorkflowTaskRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + IsFirstWorkflowTask bool `protobuf:"varint,3,opt,name=is_first_workflow_task,json=isFirstWorkflowTask,proto3" json:"is_first_workflow_task,omitempty"` + ChildClock *v18.VectorClock `protobuf:"bytes,4,opt,name=child_clock,json=childClock,proto3" json:"child_clock,omitempty"` + ParentClock *v18.VectorClock `protobuf:"bytes,5,opt,name=parent_clock,json=parentClock,proto3" json:"parent_clock,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - ParentExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=parent_execution,json=parentExecution,proto3" json:"parent_execution,omitempty"` - ChildExecution *v14.WorkflowExecution `protobuf:"bytes,3,opt,name=child_execution,json=childExecution,proto3" json:"child_execution,omitempty"` - ParentInitiatedId int64 `protobuf:"varint,4,opt,name=parent_initiated_id,json=parentInitiatedId,proto3" json:"parent_initiated_id,omitempty"` - ParentInitiatedVersion int64 `protobuf:"varint,5,opt,name=parent_initiated_version,json=parentInitiatedVersion,proto3" json:"parent_initiated_version,omitempty"` - Clock *v15.VectorClock `protobuf:"bytes,6,opt,name=clock,proto3" json:"clock,omitempty"` -} - -func (x *VerifyChildExecutionCompletionRecordedRequest) Reset() { - *x = VerifyChildExecutionCompletionRecordedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ScheduleWorkflowTaskRequest) Reset() { + *x = ScheduleWorkflowTaskRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *VerifyChildExecutionCompletionRecordedRequest) String() string { +func (x *ScheduleWorkflowTaskRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VerifyChildExecutionCompletionRecordedRequest) ProtoMessage() {} +func (*ScheduleWorkflowTaskRequest) ProtoMessage() {} -func (x *VerifyChildExecutionCompletionRecordedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ScheduleWorkflowTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[46] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3302,77 +3618,68 @@ func (x *VerifyChildExecutionCompletionRecordedRequest) ProtoReflect() protorefl return mi.MessageOf(x) } -// Deprecated: Use VerifyChildExecutionCompletionRecordedRequest.ProtoReflect.Descriptor instead. -func (*VerifyChildExecutionCompletionRecordedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{48} +// Deprecated: Use ScheduleWorkflowTaskRequest.ProtoReflect.Descriptor instead. +func (*ScheduleWorkflowTaskRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{46} } -func (x *VerifyChildExecutionCompletionRecordedRequest) GetNamespaceId() string { +func (x *ScheduleWorkflowTaskRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *VerifyChildExecutionCompletionRecordedRequest) GetParentExecution() *v14.WorkflowExecution { - if x != nil { - return x.ParentExecution - } - return nil -} - -func (x *VerifyChildExecutionCompletionRecordedRequest) GetChildExecution() *v14.WorkflowExecution { +func (x *ScheduleWorkflowTaskRequest) GetWorkflowExecution() *v14.WorkflowExecution { if x != nil { - return x.ChildExecution + return x.WorkflowExecution } return nil } -func (x *VerifyChildExecutionCompletionRecordedRequest) GetParentInitiatedId() int64 { +func (x *ScheduleWorkflowTaskRequest) GetIsFirstWorkflowTask() bool { if x != nil { - return x.ParentInitiatedId + return x.IsFirstWorkflowTask } - return 0 + return false } -func (x *VerifyChildExecutionCompletionRecordedRequest) GetParentInitiatedVersion() int64 { +func (x *ScheduleWorkflowTaskRequest) GetChildClock() *v18.VectorClock { if x != nil { - return x.ParentInitiatedVersion + return x.ChildClock } - return 0 + return nil } -func (x *VerifyChildExecutionCompletionRecordedRequest) GetClock() *v15.VectorClock { +func (x *ScheduleWorkflowTaskRequest) GetParentClock() *v18.VectorClock { if x != nil { - return x.Clock + return x.ParentClock } return nil } -type VerifyChildExecutionCompletionRecordedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ScheduleWorkflowTaskResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *VerifyChildExecutionCompletionRecordedResponse) Reset() { - *x = VerifyChildExecutionCompletionRecordedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ScheduleWorkflowTaskResponse) Reset() { + *x = ScheduleWorkflowTaskResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *VerifyChildExecutionCompletionRecordedResponse) String() string { +func (x *ScheduleWorkflowTaskResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VerifyChildExecutionCompletionRecordedResponse) ProtoMessage() {} +func (*ScheduleWorkflowTaskResponse) ProtoMessage() {} -func (x *VerifyChildExecutionCompletionRecordedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ScheduleWorkflowTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[47] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3382,38 +3689,36 @@ func (x *VerifyChildExecutionCompletionRecordedResponse) ProtoReflect() protoref return mi.MessageOf(x) } -// Deprecated: Use VerifyChildExecutionCompletionRecordedResponse.ProtoReflect.Descriptor instead. -func (*VerifyChildExecutionCompletionRecordedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{49} +// Deprecated: Use ScheduleWorkflowTaskResponse.ProtoReflect.Descriptor instead. +func (*ScheduleWorkflowTaskResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{47} } -type DescribeWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v1.DescribeWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +type VerifyFirstWorkflowTaskScheduledRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,3,opt,name=clock,proto3" json:"clock,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DescribeWorkflowExecutionRequest) Reset() { - *x = DescribeWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *VerifyFirstWorkflowTaskScheduledRequest) Reset() { + *x = VerifyFirstWorkflowTaskScheduledRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DescribeWorkflowExecutionRequest) String() string { +func (x *VerifyFirstWorkflowTaskScheduledRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DescribeWorkflowExecutionRequest) ProtoMessage() {} +func (*VerifyFirstWorkflowTaskScheduledRequest) ProtoMessage() {} -func (x *DescribeWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { +func (x *VerifyFirstWorkflowTaskScheduledRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[48] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3423,56 +3728,54 @@ func (x *DescribeWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DescribeWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*DescribeWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{50} +// Deprecated: Use VerifyFirstWorkflowTaskScheduledRequest.ProtoReflect.Descriptor instead. +func (*VerifyFirstWorkflowTaskScheduledRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{48} } -func (x *DescribeWorkflowExecutionRequest) GetNamespaceId() string { +func (x *VerifyFirstWorkflowTaskScheduledRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *DescribeWorkflowExecutionRequest) GetRequest() *v1.DescribeWorkflowExecutionRequest { +func (x *VerifyFirstWorkflowTaskScheduledRequest) GetWorkflowExecution() *v14.WorkflowExecution { if x != nil { - return x.Request + return x.WorkflowExecution } return nil } -type DescribeWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *VerifyFirstWorkflowTaskScheduledRequest) GetClock() *v18.VectorClock { + if x != nil { + return x.Clock + } + return nil +} - ExecutionConfig *v112.WorkflowExecutionConfig `protobuf:"bytes,1,opt,name=execution_config,json=executionConfig,proto3" json:"execution_config,omitempty"` - WorkflowExecutionInfo *v112.WorkflowExecutionInfo `protobuf:"bytes,2,opt,name=workflow_execution_info,json=workflowExecutionInfo,proto3" json:"workflow_execution_info,omitempty"` - PendingActivities []*v112.PendingActivityInfo `protobuf:"bytes,3,rep,name=pending_activities,json=pendingActivities,proto3" json:"pending_activities,omitempty"` - PendingChildren []*v112.PendingChildExecutionInfo `protobuf:"bytes,4,rep,name=pending_children,json=pendingChildren,proto3" json:"pending_children,omitempty"` - PendingWorkflowTask *v112.PendingWorkflowTaskInfo `protobuf:"bytes,5,opt,name=pending_workflow_task,json=pendingWorkflowTask,proto3" json:"pending_workflow_task,omitempty"` - Callbacks []*v112.CallbackInfo `protobuf:"bytes,6,rep,name=callbacks,proto3" json:"callbacks,omitempty"` +type VerifyFirstWorkflowTaskScheduledResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DescribeWorkflowExecutionResponse) Reset() { - *x = DescribeWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *VerifyFirstWorkflowTaskScheduledResponse) Reset() { + *x = VerifyFirstWorkflowTaskScheduledResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DescribeWorkflowExecutionResponse) String() string { +func (x *VerifyFirstWorkflowTaskScheduledResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DescribeWorkflowExecutionResponse) ProtoMessage() {} +func (*VerifyFirstWorkflowTaskScheduledResponse) ProtoMessage() {} -func (x *DescribeWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { +func (x *VerifyFirstWorkflowTaskScheduledResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[49] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3482,86 +3785,47 @@ func (x *DescribeWorkflowExecutionResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use DescribeWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*DescribeWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{51} -} - -func (x *DescribeWorkflowExecutionResponse) GetExecutionConfig() *v112.WorkflowExecutionConfig { - if x != nil { - return x.ExecutionConfig - } - return nil -} - -func (x *DescribeWorkflowExecutionResponse) GetWorkflowExecutionInfo() *v112.WorkflowExecutionInfo { - if x != nil { - return x.WorkflowExecutionInfo - } - return nil -} - -func (x *DescribeWorkflowExecutionResponse) GetPendingActivities() []*v112.PendingActivityInfo { - if x != nil { - return x.PendingActivities - } - return nil -} - -func (x *DescribeWorkflowExecutionResponse) GetPendingChildren() []*v112.PendingChildExecutionInfo { - if x != nil { - return x.PendingChildren - } - return nil -} - -func (x *DescribeWorkflowExecutionResponse) GetPendingWorkflowTask() *v112.PendingWorkflowTaskInfo { - if x != nil { - return x.PendingWorkflowTask - } - return nil -} - -func (x *DescribeWorkflowExecutionResponse) GetCallbacks() []*v112.CallbackInfo { - if x != nil { - return x.Callbacks - } - return nil +// Deprecated: Use VerifyFirstWorkflowTaskScheduledResponse.ProtoReflect.Descriptor instead. +func (*VerifyFirstWorkflowTaskScheduledResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{49} } -type ReplicateEventsV2Request struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` - VersionHistoryItems []*v16.VersionHistoryItem `protobuf:"bytes,3,rep,name=version_history_items,json=versionHistoryItems,proto3" json:"version_history_items,omitempty"` - Events *v14.DataBlob `protobuf:"bytes,4,opt,name=events,proto3" json:"events,omitempty"` - // New run events does not need version history since there is no prior events. - NewRunEvents *v14.DataBlob `protobuf:"bytes,5,opt,name=new_run_events,json=newRunEvents,proto3" json:"new_run_events,omitempty"` - BaseExecutionInfo *v11.BaseExecutionInfo `protobuf:"bytes,6,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` - NewRunId string `protobuf:"bytes,7,opt,name=new_run_id,json=newRunId,proto3" json:"new_run_id,omitempty"` +// * +// RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow +// execution which started it. When a child execution is completed it creates this request and calls the +// RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the +// child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when +// child creates multiple runs through ContinueAsNew before finally completing. +type RecordChildExecutionCompletedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + ParentExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=parent_execution,json=parentExecution,proto3" json:"parent_execution,omitempty"` + ParentInitiatedId int64 `protobuf:"varint,3,opt,name=parent_initiated_id,json=parentInitiatedId,proto3" json:"parent_initiated_id,omitempty"` + ChildExecution *v14.WorkflowExecution `protobuf:"bytes,4,opt,name=child_execution,json=childExecution,proto3" json:"child_execution,omitempty"` + CompletionEvent *v17.HistoryEvent `protobuf:"bytes,5,opt,name=completion_event,json=completionEvent,proto3" json:"completion_event,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,6,opt,name=clock,proto3" json:"clock,omitempty"` + ParentInitiatedVersion int64 `protobuf:"varint,7,opt,name=parent_initiated_version,json=parentInitiatedVersion,proto3" json:"parent_initiated_version,omitempty"` + ChildFirstExecutionRunId string `protobuf:"bytes,8,opt,name=child_first_execution_run_id,json=childFirstExecutionRunId,proto3" json:"child_first_execution_run_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ReplicateEventsV2Request) Reset() { - *x = ReplicateEventsV2Request{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RecordChildExecutionCompletedRequest) Reset() { + *x = RecordChildExecutionCompletedRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ReplicateEventsV2Request) String() string { +func (x *RecordChildExecutionCompletedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplicateEventsV2Request) ProtoMessage() {} +func (*RecordChildExecutionCompletedRequest) ProtoMessage() {} -func (x *ReplicateEventsV2Request) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RecordChildExecutionCompletedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[50] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3571,84 +3835,89 @@ func (x *ReplicateEventsV2Request) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReplicateEventsV2Request.ProtoReflect.Descriptor instead. -func (*ReplicateEventsV2Request) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{52} +// Deprecated: Use RecordChildExecutionCompletedRequest.ProtoReflect.Descriptor instead. +func (*RecordChildExecutionCompletedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{50} } -func (x *ReplicateEventsV2Request) GetNamespaceId() string { +func (x *RecordChildExecutionCompletedRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *ReplicateEventsV2Request) GetWorkflowExecution() *v14.WorkflowExecution { +func (x *RecordChildExecutionCompletedRequest) GetParentExecution() *v14.WorkflowExecution { if x != nil { - return x.WorkflowExecution + return x.ParentExecution } return nil } -func (x *ReplicateEventsV2Request) GetVersionHistoryItems() []*v16.VersionHistoryItem { +func (x *RecordChildExecutionCompletedRequest) GetParentInitiatedId() int64 { if x != nil { - return x.VersionHistoryItems + return x.ParentInitiatedId } - return nil + return 0 } -func (x *ReplicateEventsV2Request) GetEvents() *v14.DataBlob { +func (x *RecordChildExecutionCompletedRequest) GetChildExecution() *v14.WorkflowExecution { if x != nil { - return x.Events + return x.ChildExecution } return nil } -func (x *ReplicateEventsV2Request) GetNewRunEvents() *v14.DataBlob { +func (x *RecordChildExecutionCompletedRequest) GetCompletionEvent() *v17.HistoryEvent { if x != nil { - return x.NewRunEvents + return x.CompletionEvent } return nil } -func (x *ReplicateEventsV2Request) GetBaseExecutionInfo() *v11.BaseExecutionInfo { +func (x *RecordChildExecutionCompletedRequest) GetClock() *v18.VectorClock { if x != nil { - return x.BaseExecutionInfo + return x.Clock } return nil } -func (x *ReplicateEventsV2Request) GetNewRunId() string { +func (x *RecordChildExecutionCompletedRequest) GetParentInitiatedVersion() int64 { if x != nil { - return x.NewRunId + return x.ParentInitiatedVersion + } + return 0 +} + +func (x *RecordChildExecutionCompletedRequest) GetChildFirstExecutionRunId() string { + if x != nil { + return x.ChildFirstExecutionRunId } return "" } -type ReplicateEventsV2Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RecordChildExecutionCompletedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ReplicateEventsV2Response) Reset() { - *x = ReplicateEventsV2Response{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RecordChildExecutionCompletedResponse) Reset() { + *x = RecordChildExecutionCompletedResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ReplicateEventsV2Response) String() string { +func (x *RecordChildExecutionCompletedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplicateEventsV2Response) ProtoMessage() {} +func (*RecordChildExecutionCompletedResponse) ProtoMessage() {} -func (x *ReplicateEventsV2Response) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RecordChildExecutionCompletedResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[51] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3658,39 +3927,40 @@ func (x *ReplicateEventsV2Response) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReplicateEventsV2Response.ProtoReflect.Descriptor instead. -func (*ReplicateEventsV2Response) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{53} +// Deprecated: Use RecordChildExecutionCompletedResponse.ProtoReflect.Descriptor instead. +func (*RecordChildExecutionCompletedResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{51} } -type ReplicateWorkflowStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - WorkflowState *v113.WorkflowMutableState `protobuf:"bytes,1,opt,name=workflow_state,json=workflowState,proto3" json:"workflow_state,omitempty"` - RemoteCluster string `protobuf:"bytes,2,opt,name=remote_cluster,json=remoteCluster,proto3" json:"remote_cluster,omitempty"` - NamespaceId string `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` +type VerifyChildExecutionCompletionRecordedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + ParentExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=parent_execution,json=parentExecution,proto3" json:"parent_execution,omitempty"` + ChildExecution *v14.WorkflowExecution `protobuf:"bytes,3,opt,name=child_execution,json=childExecution,proto3" json:"child_execution,omitempty"` + ParentInitiatedId int64 `protobuf:"varint,4,opt,name=parent_initiated_id,json=parentInitiatedId,proto3" json:"parent_initiated_id,omitempty"` + ParentInitiatedVersion int64 `protobuf:"varint,5,opt,name=parent_initiated_version,json=parentInitiatedVersion,proto3" json:"parent_initiated_version,omitempty"` + Clock *v18.VectorClock `protobuf:"bytes,6,opt,name=clock,proto3" json:"clock,omitempty"` + ResendParent bool `protobuf:"varint,7,opt,name=resend_parent,json=resendParent,proto3" json:"resend_parent,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ReplicateWorkflowStateRequest) Reset() { - *x = ReplicateWorkflowStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *VerifyChildExecutionCompletionRecordedRequest) Reset() { + *x = VerifyChildExecutionCompletionRecordedRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ReplicateWorkflowStateRequest) String() string { +func (x *VerifyChildExecutionCompletionRecordedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplicateWorkflowStateRequest) ProtoMessage() {} +func (*VerifyChildExecutionCompletionRecordedRequest) ProtoMessage() {} -func (x *ReplicateWorkflowStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { +func (x *VerifyChildExecutionCompletionRecordedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[52] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3700,56 +3970,82 @@ func (x *ReplicateWorkflowStateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReplicateWorkflowStateRequest.ProtoReflect.Descriptor instead. -func (*ReplicateWorkflowStateRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{54} +// Deprecated: Use VerifyChildExecutionCompletionRecordedRequest.ProtoReflect.Descriptor instead. +func (*VerifyChildExecutionCompletionRecordedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{52} } -func (x *ReplicateWorkflowStateRequest) GetWorkflowState() *v113.WorkflowMutableState { +func (x *VerifyChildExecutionCompletionRecordedRequest) GetNamespaceId() string { if x != nil { - return x.WorkflowState + return x.NamespaceId + } + return "" +} + +func (x *VerifyChildExecutionCompletionRecordedRequest) GetParentExecution() *v14.WorkflowExecution { + if x != nil { + return x.ParentExecution } return nil } -func (x *ReplicateWorkflowStateRequest) GetRemoteCluster() string { +func (x *VerifyChildExecutionCompletionRecordedRequest) GetChildExecution() *v14.WorkflowExecution { if x != nil { - return x.RemoteCluster + return x.ChildExecution } - return "" + return nil } -func (x *ReplicateWorkflowStateRequest) GetNamespaceId() string { +func (x *VerifyChildExecutionCompletionRecordedRequest) GetParentInitiatedId() int64 { if x != nil { - return x.NamespaceId + return x.ParentInitiatedId } - return "" + return 0 } -type ReplicateWorkflowStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *VerifyChildExecutionCompletionRecordedRequest) GetParentInitiatedVersion() int64 { + if x != nil { + return x.ParentInitiatedVersion + } + return 0 } -func (x *ReplicateWorkflowStateResponse) Reset() { - *x = ReplicateWorkflowStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *VerifyChildExecutionCompletionRecordedRequest) GetClock() *v18.VectorClock { + if x != nil { + return x.Clock } + return nil } -func (x *ReplicateWorkflowStateResponse) String() string { +func (x *VerifyChildExecutionCompletionRecordedRequest) GetResendParent() bool { + if x != nil { + return x.ResendParent + } + return false +} + +type VerifyChildExecutionCompletionRecordedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VerifyChildExecutionCompletionRecordedResponse) Reset() { + *x = VerifyChildExecutionCompletionRecordedResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VerifyChildExecutionCompletionRecordedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplicateWorkflowStateResponse) ProtoMessage() {} +func (*VerifyChildExecutionCompletionRecordedResponse) ProtoMessage() {} -func (x *ReplicateWorkflowStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { +func (x *VerifyChildExecutionCompletionRecordedResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[53] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3759,39 +4055,35 @@ func (x *ReplicateWorkflowStateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReplicateWorkflowStateResponse.ProtoReflect.Descriptor instead. -func (*ReplicateWorkflowStateResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{55} +// Deprecated: Use VerifyChildExecutionCompletionRecordedResponse.ProtoReflect.Descriptor instead. +func (*VerifyChildExecutionCompletionRecordedResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{53} } -type SyncShardStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DescribeWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v1.DescribeWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - SourceCluster string `protobuf:"bytes,1,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` - ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - StatusTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=status_time,json=statusTime,proto3" json:"status_time,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *SyncShardStatusRequest) Reset() { - *x = SyncShardStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeWorkflowExecutionRequest) Reset() { + *x = DescribeWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SyncShardStatusRequest) String() string { +func (x *DescribeWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SyncShardStatusRequest) ProtoMessage() {} +func (*DescribeWorkflowExecutionRequest) ProtoMessage() {} -func (x *SyncShardStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[54] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3801,56 +4093,55 @@ func (x *SyncShardStatusRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SyncShardStatusRequest.ProtoReflect.Descriptor instead. -func (*SyncShardStatusRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{56} +// Deprecated: Use DescribeWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*DescribeWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{54} } -func (x *SyncShardStatusRequest) GetSourceCluster() string { +func (x *DescribeWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { - return x.SourceCluster + return x.NamespaceId } return "" } -func (x *SyncShardStatusRequest) GetShardId() int32 { - if x != nil { - return x.ShardId - } - return 0 -} - -func (x *SyncShardStatusRequest) GetStatusTime() *timestamppb.Timestamp { +func (x *DescribeWorkflowExecutionRequest) GetRequest() *v1.DescribeWorkflowExecutionRequest { if x != nil { - return x.StatusTime + return x.Request } return nil } -type SyncShardStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type DescribeWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ExecutionConfig *v15.WorkflowExecutionConfig `protobuf:"bytes,1,opt,name=execution_config,json=executionConfig,proto3" json:"execution_config,omitempty"` + WorkflowExecutionInfo *v15.WorkflowExecutionInfo `protobuf:"bytes,2,opt,name=workflow_execution_info,json=workflowExecutionInfo,proto3" json:"workflow_execution_info,omitempty"` + PendingActivities []*v15.PendingActivityInfo `protobuf:"bytes,3,rep,name=pending_activities,json=pendingActivities,proto3" json:"pending_activities,omitempty"` + PendingChildren []*v15.PendingChildExecutionInfo `protobuf:"bytes,4,rep,name=pending_children,json=pendingChildren,proto3" json:"pending_children,omitempty"` + PendingWorkflowTask *v15.PendingWorkflowTaskInfo `protobuf:"bytes,5,opt,name=pending_workflow_task,json=pendingWorkflowTask,proto3" json:"pending_workflow_task,omitempty"` + Callbacks []*v15.CallbackInfo `protobuf:"bytes,6,rep,name=callbacks,proto3" json:"callbacks,omitempty"` + PendingNexusOperations []*v15.PendingNexusOperationInfo `protobuf:"bytes,7,rep,name=pending_nexus_operations,json=pendingNexusOperations,proto3" json:"pending_nexus_operations,omitempty"` + WorkflowExtendedInfo *v15.WorkflowExecutionExtendedInfo `protobuf:"bytes,8,opt,name=workflow_extended_info,json=workflowExtendedInfo,proto3" json:"workflow_extended_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *SyncShardStatusResponse) Reset() { - *x = SyncShardStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeWorkflowExecutionResponse) Reset() { + *x = DescribeWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SyncShardStatusResponse) String() string { +func (x *DescribeWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SyncShardStatusResponse) ProtoMessage() {} +func (*DescribeWorkflowExecutionResponse) ProtoMessage() {} -func (x *SyncShardStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[55] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3860,199 +4151,294 @@ func (x *SyncShardStatusResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SyncShardStatusResponse.ProtoReflect.Descriptor instead. -func (*SyncShardStatusResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{57} +// Deprecated: Use DescribeWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*DescribeWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{55} } -type SyncActivityRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` - ScheduledEventId int64 `protobuf:"varint,5,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` - StartedEventId int64 `protobuf:"varint,7,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` - StartedTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` - LastHeartbeatTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=last_heartbeat_time,json=lastHeartbeatTime,proto3" json:"last_heartbeat_time,omitempty"` - Details *v14.Payloads `protobuf:"bytes,10,opt,name=details,proto3" json:"details,omitempty"` - Attempt int32 `protobuf:"varint,11,opt,name=attempt,proto3" json:"attempt,omitempty"` - LastFailure *v13.Failure `protobuf:"bytes,12,opt,name=last_failure,json=lastFailure,proto3" json:"last_failure,omitempty"` - LastWorkerIdentity string `protobuf:"bytes,13,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` - VersionHistory *v16.VersionHistory `protobuf:"bytes,14,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` - BaseExecutionInfo *v11.BaseExecutionInfo `protobuf:"bytes,15,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` +func (x *DescribeWorkflowExecutionResponse) GetExecutionConfig() *v15.WorkflowExecutionConfig { + if x != nil { + return x.ExecutionConfig + } + return nil } -func (x *SyncActivityRequest) Reset() { - *x = SyncActivityRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *DescribeWorkflowExecutionResponse) GetWorkflowExecutionInfo() *v15.WorkflowExecutionInfo { + if x != nil { + return x.WorkflowExecutionInfo } + return nil } -func (x *SyncActivityRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *DescribeWorkflowExecutionResponse) GetPendingActivities() []*v15.PendingActivityInfo { + if x != nil { + return x.PendingActivities + } + return nil } -func (*SyncActivityRequest) ProtoMessage() {} - -func (x *SyncActivityRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *DescribeWorkflowExecutionResponse) GetPendingChildren() []*v15.PendingChildExecutionInfo { + if x != nil { + return x.PendingChildren } - return mi.MessageOf(x) + return nil } -// Deprecated: Use SyncActivityRequest.ProtoReflect.Descriptor instead. -func (*SyncActivityRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{58} +func (x *DescribeWorkflowExecutionResponse) GetPendingWorkflowTask() *v15.PendingWorkflowTaskInfo { + if x != nil { + return x.PendingWorkflowTask + } + return nil } -func (x *SyncActivityRequest) GetNamespaceId() string { +func (x *DescribeWorkflowExecutionResponse) GetCallbacks() []*v15.CallbackInfo { if x != nil { - return x.NamespaceId + return x.Callbacks } - return "" + return nil } -func (x *SyncActivityRequest) GetWorkflowId() string { +func (x *DescribeWorkflowExecutionResponse) GetPendingNexusOperations() []*v15.PendingNexusOperationInfo { if x != nil { - return x.WorkflowId + return x.PendingNexusOperations } - return "" + return nil } -func (x *SyncActivityRequest) GetRunId() string { +func (x *DescribeWorkflowExecutionResponse) GetWorkflowExtendedInfo() *v15.WorkflowExecutionExtendedInfo { if x != nil { - return x.RunId + return x.WorkflowExtendedInfo } - return "" + return nil } -func (x *SyncActivityRequest) GetVersion() int64 { +type ReplicateEventsV2Request struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + VersionHistoryItems []*v19.VersionHistoryItem `protobuf:"bytes,3,rep,name=version_history_items,json=versionHistoryItems,proto3" json:"version_history_items,omitempty"` + Events *v14.DataBlob `protobuf:"bytes,4,opt,name=events,proto3" json:"events,omitempty"` + // New run events does not need version history since there is no prior events. + NewRunEvents *v14.DataBlob `protobuf:"bytes,5,opt,name=new_run_events,json=newRunEvents,proto3" json:"new_run_events,omitempty"` + BaseExecutionInfo *v11.BaseExecutionInfo `protobuf:"bytes,6,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` + NewRunId string `protobuf:"bytes,7,opt,name=new_run_id,json=newRunId,proto3" json:"new_run_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReplicateEventsV2Request) Reset() { + *x = ReplicateEventsV2Request{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReplicateEventsV2Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicateEventsV2Request) ProtoMessage() {} + +func (x *ReplicateEventsV2Request) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[56] if x != nil { - return x.Version + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (x *SyncActivityRequest) GetScheduledEventId() int64 { +// Deprecated: Use ReplicateEventsV2Request.ProtoReflect.Descriptor instead. +func (*ReplicateEventsV2Request) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{56} +} + +func (x *ReplicateEventsV2Request) GetNamespaceId() string { if x != nil { - return x.ScheduledEventId + return x.NamespaceId } - return 0 + return "" } -func (x *SyncActivityRequest) GetScheduledTime() *timestamppb.Timestamp { +func (x *ReplicateEventsV2Request) GetWorkflowExecution() *v14.WorkflowExecution { if x != nil { - return x.ScheduledTime + return x.WorkflowExecution } return nil } -func (x *SyncActivityRequest) GetStartedEventId() int64 { +func (x *ReplicateEventsV2Request) GetVersionHistoryItems() []*v19.VersionHistoryItem { if x != nil { - return x.StartedEventId + return x.VersionHistoryItems } - return 0 + return nil } -func (x *SyncActivityRequest) GetStartedTime() *timestamppb.Timestamp { +func (x *ReplicateEventsV2Request) GetEvents() *v14.DataBlob { if x != nil { - return x.StartedTime + return x.Events } return nil } -func (x *SyncActivityRequest) GetLastHeartbeatTime() *timestamppb.Timestamp { +func (x *ReplicateEventsV2Request) GetNewRunEvents() *v14.DataBlob { if x != nil { - return x.LastHeartbeatTime + return x.NewRunEvents } return nil } -func (x *SyncActivityRequest) GetDetails() *v14.Payloads { +func (x *ReplicateEventsV2Request) GetBaseExecutionInfo() *v11.BaseExecutionInfo { if x != nil { - return x.Details + return x.BaseExecutionInfo } return nil } -func (x *SyncActivityRequest) GetAttempt() int32 { +func (x *ReplicateEventsV2Request) GetNewRunId() string { if x != nil { - return x.Attempt + return x.NewRunId } - return 0 + return "" } -func (x *SyncActivityRequest) GetLastFailure() *v13.Failure { +type ReplicateEventsV2Response struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReplicateEventsV2Response) Reset() { + *x = ReplicateEventsV2Response{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReplicateEventsV2Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicateEventsV2Response) ProtoMessage() {} + +func (x *ReplicateEventsV2Response) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[57] if x != nil { - return x.LastFailure + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (x *SyncActivityRequest) GetLastWorkerIdentity() string { +// Deprecated: Use ReplicateEventsV2Response.ProtoReflect.Descriptor instead. +func (*ReplicateEventsV2Response) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{57} +} + +type ReplicateWorkflowStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + WorkflowState *v110.WorkflowMutableState `protobuf:"bytes,1,opt,name=workflow_state,json=workflowState,proto3" json:"workflow_state,omitempty"` + RemoteCluster string `protobuf:"bytes,2,opt,name=remote_cluster,json=remoteCluster,proto3" json:"remote_cluster,omitempty"` + NamespaceId string `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + IsForceReplication bool `protobuf:"varint,4,opt,name=is_force_replication,json=isForceReplication,proto3" json:"is_force_replication,omitempty"` + IsCloseTransferTaskAcked bool `protobuf:"varint,5,opt,name=is_close_transfer_task_acked,json=isCloseTransferTaskAcked,proto3" json:"is_close_transfer_task_acked,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReplicateWorkflowStateRequest) Reset() { + *x = ReplicateWorkflowStateRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReplicateWorkflowStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicateWorkflowStateRequest) ProtoMessage() {} + +func (x *ReplicateWorkflowStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[58] if x != nil { - return x.LastWorkerIdentity + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicateWorkflowStateRequest.ProtoReflect.Descriptor instead. +func (*ReplicateWorkflowStateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{58} } -func (x *SyncActivityRequest) GetVersionHistory() *v16.VersionHistory { +func (x *ReplicateWorkflowStateRequest) GetWorkflowState() *v110.WorkflowMutableState { if x != nil { - return x.VersionHistory + return x.WorkflowState } return nil } -func (x *SyncActivityRequest) GetBaseExecutionInfo() *v11.BaseExecutionInfo { +func (x *ReplicateWorkflowStateRequest) GetRemoteCluster() string { if x != nil { - return x.BaseExecutionInfo + return x.RemoteCluster } - return nil + return "" } -type SyncActivitiesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ReplicateWorkflowStateRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - ActivitiesInfo []*ActivitySyncInfo `protobuf:"bytes,4,rep,name=activities_info,json=activitiesInfo,proto3" json:"activities_info,omitempty"` +func (x *ReplicateWorkflowStateRequest) GetIsForceReplication() bool { + if x != nil { + return x.IsForceReplication + } + return false } -func (x *SyncActivitiesRequest) Reset() { - *x = SyncActivitiesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ReplicateWorkflowStateRequest) GetIsCloseTransferTaskAcked() bool { + if x != nil { + return x.IsCloseTransferTaskAcked } + return false } -func (x *SyncActivitiesRequest) String() string { +type ReplicateWorkflowStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReplicateWorkflowStateResponse) Reset() { + *x = ReplicateWorkflowStateResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReplicateWorkflowStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SyncActivitiesRequest) ProtoMessage() {} +func (*ReplicateWorkflowStateResponse) ProtoMessage() {} -func (x *SyncActivitiesRequest) ProtoReflect() protoreflect.Message { +func (x *ReplicateWorkflowStateResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4062,75 +4448,2051 @@ func (x *SyncActivitiesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SyncActivitiesRequest.ProtoReflect.Descriptor instead. -func (*SyncActivitiesRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use ReplicateWorkflowStateResponse.ProtoReflect.Descriptor instead. +func (*ReplicateWorkflowStateResponse) Descriptor() ([]byte, []int) { return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{59} } -func (x *SyncActivitiesRequest) GetNamespaceId() string { +type SyncShardStatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceCluster string `protobuf:"bytes,1,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` + ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + StatusTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=status_time,json=statusTime,proto3" json:"status_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncShardStatusRequest) Reset() { + *x = SyncShardStatusRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncShardStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncShardStatusRequest) ProtoMessage() {} + +func (x *SyncShardStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[60] if x != nil { - return x.NamespaceId + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *SyncActivitiesRequest) GetWorkflowId() string { +// Deprecated: Use SyncShardStatusRequest.ProtoReflect.Descriptor instead. +func (*SyncShardStatusRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{60} +} + +func (x *SyncShardStatusRequest) GetSourceCluster() string { if x != nil { - return x.WorkflowId + return x.SourceCluster } return "" } -func (x *SyncActivitiesRequest) GetRunId() string { +func (x *SyncShardStatusRequest) GetShardId() int32 { if x != nil { - return x.RunId + return x.ShardId } - return "" + return 0 } -func (x *SyncActivitiesRequest) GetActivitiesInfo() []*ActivitySyncInfo { +func (x *SyncShardStatusRequest) GetStatusTime() *timestamppb.Timestamp { if x != nil { - return x.ActivitiesInfo + return x.StatusTime } return nil } -type ActivitySyncInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type SyncShardStatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - ScheduledEventId int64 `protobuf:"varint,2,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` - StartedEventId int64 `protobuf:"varint,4,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` - StartedTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` - LastHeartbeatTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=last_heartbeat_time,json=lastHeartbeatTime,proto3" json:"last_heartbeat_time,omitempty"` - Details *v14.Payloads `protobuf:"bytes,7,opt,name=details,proto3" json:"details,omitempty"` - Attempt int32 `protobuf:"varint,8,opt,name=attempt,proto3" json:"attempt,omitempty"` - LastFailure *v13.Failure `protobuf:"bytes,9,opt,name=last_failure,json=lastFailure,proto3" json:"last_failure,omitempty"` - LastWorkerIdentity string `protobuf:"bytes,10,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` - VersionHistory *v16.VersionHistory `protobuf:"bytes,11,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` +func (x *SyncShardStatusResponse) Reset() { + *x = SyncShardStatusResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncShardStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncShardStatusResponse) ProtoMessage() {} + +func (x *SyncShardStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[61] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncShardStatusResponse.ProtoReflect.Descriptor instead. +func (*SyncShardStatusResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{61} +} + +type SyncActivityRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"` + ScheduledEventId int64 `protobuf:"varint,5,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + StartedEventId int64 `protobuf:"varint,7,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` + StartedTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + LastHeartbeatTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=last_heartbeat_time,json=lastHeartbeatTime,proto3" json:"last_heartbeat_time,omitempty"` + Details *v14.Payloads `protobuf:"bytes,10,opt,name=details,proto3" json:"details,omitempty"` + Attempt int32 `protobuf:"varint,11,opt,name=attempt,proto3" json:"attempt,omitempty"` + LastFailure *v13.Failure `protobuf:"bytes,12,opt,name=last_failure,json=lastFailure,proto3" json:"last_failure,omitempty"` + LastWorkerIdentity string `protobuf:"bytes,13,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` + VersionHistory *v19.VersionHistory `protobuf:"bytes,14,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + BaseExecutionInfo *v11.BaseExecutionInfo `protobuf:"bytes,15,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` + // build ID of the worker who received this activity last time + LastStartedBuildId string `protobuf:"bytes,16,opt,name=last_started_build_id,json=lastStartedBuildId,proto3" json:"last_started_build_id,omitempty"` + // workflows redirect_counter value when this activity started last time + LastStartedRedirectCounter int64 `protobuf:"varint,17,opt,name=last_started_redirect_counter,json=lastStartedRedirectCounter,proto3" json:"last_started_redirect_counter,omitempty"` + // The first time the activity was scheduled. + FirstScheduledTime *timestamppb.Timestamp `protobuf:"bytes,18,opt,name=first_scheduled_time,json=firstScheduledTime,proto3" json:"first_scheduled_time,omitempty"` + // The last time an activity attempt completion was recorded by the server. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // Stamp represents the internal “version” of the activity options and can/will be changed with Activity API. + Stamp int32 `protobuf:"varint,20,opt,name=stamp,proto3" json:"stamp,omitempty"` + // Indicates if the activity is paused. + Paused bool `protobuf:"varint,21,opt,name=paused,proto3" json:"paused,omitempty"` + // Retry policy for the activity. + RetryInitialInterval *durationpb.Duration `protobuf:"bytes,22,opt,name=retry_initial_interval,json=retryInitialInterval,proto3" json:"retry_initial_interval,omitempty"` + RetryMaximumInterval *durationpb.Duration `protobuf:"bytes,23,opt,name=retry_maximum_interval,json=retryMaximumInterval,proto3" json:"retry_maximum_interval,omitempty"` + RetryMaximumAttempts int32 `protobuf:"varint,24,opt,name=retry_maximum_attempts,json=retryMaximumAttempts,proto3" json:"retry_maximum_attempts,omitempty"` + RetryBackoffCoefficient float64 `protobuf:"fixed64,25,opt,name=retry_backoff_coefficient,json=retryBackoffCoefficient,proto3" json:"retry_backoff_coefficient,omitempty"` + StartVersion int64 `protobuf:"varint,26,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncActivityRequest) Reset() { + *x = SyncActivityRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncActivityRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncActivityRequest) ProtoMessage() {} + +func (x *SyncActivityRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[62] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncActivityRequest.ProtoReflect.Descriptor instead. +func (*SyncActivityRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{62} +} + +func (x *SyncActivityRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *SyncActivityRequest) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *SyncActivityRequest) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *SyncActivityRequest) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *SyncActivityRequest) GetScheduledEventId() int64 { + if x != nil { + return x.ScheduledEventId + } + return 0 +} + +func (x *SyncActivityRequest) GetScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduledTime + } + return nil +} + +func (x *SyncActivityRequest) GetStartedEventId() int64 { + if x != nil { + return x.StartedEventId + } + return 0 +} + +func (x *SyncActivityRequest) GetStartedTime() *timestamppb.Timestamp { + if x != nil { + return x.StartedTime + } + return nil +} + +func (x *SyncActivityRequest) GetLastHeartbeatTime() *timestamppb.Timestamp { + if x != nil { + return x.LastHeartbeatTime + } + return nil +} + +func (x *SyncActivityRequest) GetDetails() *v14.Payloads { + if x != nil { + return x.Details + } + return nil +} + +func (x *SyncActivityRequest) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +func (x *SyncActivityRequest) GetLastFailure() *v13.Failure { + if x != nil { + return x.LastFailure + } + return nil +} + +func (x *SyncActivityRequest) GetLastWorkerIdentity() string { + if x != nil { + return x.LastWorkerIdentity + } + return "" +} + +func (x *SyncActivityRequest) GetVersionHistory() *v19.VersionHistory { + if x != nil { + return x.VersionHistory + } + return nil +} + +func (x *SyncActivityRequest) GetBaseExecutionInfo() *v11.BaseExecutionInfo { + if x != nil { + return x.BaseExecutionInfo + } + return nil +} + +func (x *SyncActivityRequest) GetLastStartedBuildId() string { + if x != nil { + return x.LastStartedBuildId + } + return "" +} + +func (x *SyncActivityRequest) GetLastStartedRedirectCounter() int64 { + if x != nil { + return x.LastStartedRedirectCounter + } + return 0 +} + +func (x *SyncActivityRequest) GetFirstScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.FirstScheduledTime + } + return nil +} + +func (x *SyncActivityRequest) GetLastAttemptCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.LastAttemptCompleteTime + } + return nil +} + +func (x *SyncActivityRequest) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +func (x *SyncActivityRequest) GetPaused() bool { + if x != nil { + return x.Paused + } + return false +} + +func (x *SyncActivityRequest) GetRetryInitialInterval() *durationpb.Duration { + if x != nil { + return x.RetryInitialInterval + } + return nil +} + +func (x *SyncActivityRequest) GetRetryMaximumInterval() *durationpb.Duration { + if x != nil { + return x.RetryMaximumInterval + } + return nil +} + +func (x *SyncActivityRequest) GetRetryMaximumAttempts() int32 { + if x != nil { + return x.RetryMaximumAttempts + } + return 0 +} + +func (x *SyncActivityRequest) GetRetryBackoffCoefficient() float64 { + if x != nil { + return x.RetryBackoffCoefficient + } + return 0 +} + +func (x *SyncActivityRequest) GetStartVersion() int64 { + if x != nil { + return x.StartVersion + } + return 0 +} + +type SyncActivitiesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + ActivitiesInfo []*ActivitySyncInfo `protobuf:"bytes,4,rep,name=activities_info,json=activitiesInfo,proto3" json:"activities_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncActivitiesRequest) Reset() { + *x = SyncActivitiesRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncActivitiesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncActivitiesRequest) ProtoMessage() {} + +func (x *SyncActivitiesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[63] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncActivitiesRequest.ProtoReflect.Descriptor instead. +func (*SyncActivitiesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{63} +} + +func (x *SyncActivitiesRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *SyncActivitiesRequest) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *SyncActivitiesRequest) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *SyncActivitiesRequest) GetActivitiesInfo() []*ActivitySyncInfo { + if x != nil { + return x.ActivitiesInfo + } + return nil +} + +type ActivitySyncInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + ScheduledEventId int64 `protobuf:"varint,2,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + StartedEventId int64 `protobuf:"varint,4,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` + StartedTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + LastHeartbeatTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=last_heartbeat_time,json=lastHeartbeatTime,proto3" json:"last_heartbeat_time,omitempty"` + Details *v14.Payloads `protobuf:"bytes,7,opt,name=details,proto3" json:"details,omitempty"` + Attempt int32 `protobuf:"varint,8,opt,name=attempt,proto3" json:"attempt,omitempty"` + LastFailure *v13.Failure `protobuf:"bytes,9,opt,name=last_failure,json=lastFailure,proto3" json:"last_failure,omitempty"` + LastWorkerIdentity string `protobuf:"bytes,10,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` + VersionHistory *v19.VersionHistory `protobuf:"bytes,11,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + // build ID of the worker who received this activity last time + LastStartedBuildId string `protobuf:"bytes,12,opt,name=last_started_build_id,json=lastStartedBuildId,proto3" json:"last_started_build_id,omitempty"` + // workflows redirect_counter value when this activity started last time + LastStartedRedirectCounter int64 `protobuf:"varint,13,opt,name=last_started_redirect_counter,json=lastStartedRedirectCounter,proto3" json:"last_started_redirect_counter,omitempty"` + // The first time the activity was scheduled. + FirstScheduledTime *timestamppb.Timestamp `protobuf:"bytes,18,opt,name=first_scheduled_time,json=firstScheduledTime,proto3" json:"first_scheduled_time,omitempty"` + // The last time an activity attempt completion was recorded by the server. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // Stamp represents the internal “version” of the activity options and can/will be changed with Activity API. + Stamp int32 `protobuf:"varint,20,opt,name=stamp,proto3" json:"stamp,omitempty"` + // Indicates if the activity is paused. + Paused bool `protobuf:"varint,21,opt,name=paused,proto3" json:"paused,omitempty"` + // Retry policy for the activity. It needs to be replicated now, since the activity properties can be updated. + RetryInitialInterval *durationpb.Duration `protobuf:"bytes,22,opt,name=retry_initial_interval,json=retryInitialInterval,proto3" json:"retry_initial_interval,omitempty"` + RetryMaximumInterval *durationpb.Duration `protobuf:"bytes,23,opt,name=retry_maximum_interval,json=retryMaximumInterval,proto3" json:"retry_maximum_interval,omitempty"` + RetryMaximumAttempts int32 `protobuf:"varint,24,opt,name=retry_maximum_attempts,json=retryMaximumAttempts,proto3" json:"retry_maximum_attempts,omitempty"` + RetryBackoffCoefficient float64 `protobuf:"fixed64,25,opt,name=retry_backoff_coefficient,json=retryBackoffCoefficient,proto3" json:"retry_backoff_coefficient,omitempty"` + StartVersion int64 `protobuf:"varint,26,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivitySyncInfo) Reset() { + *x = ActivitySyncInfo{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivitySyncInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivitySyncInfo) ProtoMessage() {} + +func (x *ActivitySyncInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[64] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivitySyncInfo.ProtoReflect.Descriptor instead. +func (*ActivitySyncInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{64} +} + +func (x *ActivitySyncInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ActivitySyncInfo) GetScheduledEventId() int64 { + if x != nil { + return x.ScheduledEventId + } + return 0 +} + +func (x *ActivitySyncInfo) GetScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduledTime + } + return nil +} + +func (x *ActivitySyncInfo) GetStartedEventId() int64 { + if x != nil { + return x.StartedEventId + } + return 0 +} + +func (x *ActivitySyncInfo) GetStartedTime() *timestamppb.Timestamp { + if x != nil { + return x.StartedTime + } + return nil +} + +func (x *ActivitySyncInfo) GetLastHeartbeatTime() *timestamppb.Timestamp { + if x != nil { + return x.LastHeartbeatTime + } + return nil +} + +func (x *ActivitySyncInfo) GetDetails() *v14.Payloads { + if x != nil { + return x.Details + } + return nil +} + +func (x *ActivitySyncInfo) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +func (x *ActivitySyncInfo) GetLastFailure() *v13.Failure { + if x != nil { + return x.LastFailure + } + return nil +} + +func (x *ActivitySyncInfo) GetLastWorkerIdentity() string { + if x != nil { + return x.LastWorkerIdentity + } + return "" +} + +func (x *ActivitySyncInfo) GetVersionHistory() *v19.VersionHistory { + if x != nil { + return x.VersionHistory + } + return nil +} + +func (x *ActivitySyncInfo) GetLastStartedBuildId() string { + if x != nil { + return x.LastStartedBuildId + } + return "" +} + +func (x *ActivitySyncInfo) GetLastStartedRedirectCounter() int64 { + if x != nil { + return x.LastStartedRedirectCounter + } + return 0 +} + +func (x *ActivitySyncInfo) GetFirstScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.FirstScheduledTime + } + return nil +} + +func (x *ActivitySyncInfo) GetLastAttemptCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.LastAttemptCompleteTime + } + return nil +} + +func (x *ActivitySyncInfo) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +func (x *ActivitySyncInfo) GetPaused() bool { + if x != nil { + return x.Paused + } + return false +} + +func (x *ActivitySyncInfo) GetRetryInitialInterval() *durationpb.Duration { + if x != nil { + return x.RetryInitialInterval + } + return nil +} + +func (x *ActivitySyncInfo) GetRetryMaximumInterval() *durationpb.Duration { + if x != nil { + return x.RetryMaximumInterval + } + return nil +} + +func (x *ActivitySyncInfo) GetRetryMaximumAttempts() int32 { + if x != nil { + return x.RetryMaximumAttempts + } + return 0 +} + +func (x *ActivitySyncInfo) GetRetryBackoffCoefficient() float64 { + if x != nil { + return x.RetryBackoffCoefficient + } + return 0 +} + +func (x *ActivitySyncInfo) GetStartVersion() int64 { + if x != nil { + return x.StartVersion + } + return 0 +} + +type SyncActivityResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncActivityResponse) Reset() { + *x = SyncActivityResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncActivityResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncActivityResponse) ProtoMessage() {} + +func (x *SyncActivityResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[65] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncActivityResponse.ProtoReflect.Descriptor instead. +func (*SyncActivityResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{65} +} + +type DescribeMutableStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + SkipForceReload bool `protobuf:"varint,3,opt,name=skip_force_reload,json=skipForceReload,proto3" json:"skip_force_reload,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,4,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeMutableStateRequest) Reset() { + *x = DescribeMutableStateRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeMutableStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeMutableStateRequest) ProtoMessage() {} + +func (x *DescribeMutableStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[66] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeMutableStateRequest.ProtoReflect.Descriptor instead. +func (*DescribeMutableStateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{66} +} + +func (x *DescribeMutableStateRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DescribeMutableStateRequest) GetExecution() *v14.WorkflowExecution { + if x != nil { + return x.Execution + } + return nil +} + +func (x *DescribeMutableStateRequest) GetSkipForceReload() bool { + if x != nil { + return x.SkipForceReload + } + return false +} + +func (x *DescribeMutableStateRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +type DescribeMutableStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // CacheMutableState is only available when mutable state is in cache. + CacheMutableState *v110.WorkflowMutableState `protobuf:"bytes,1,opt,name=cache_mutable_state,json=cacheMutableState,proto3" json:"cache_mutable_state,omitempty"` + // DatabaseMutableState is always available, + // but only loaded from database when mutable state is NOT in cache or skip_force_reload is false. + DatabaseMutableState *v110.WorkflowMutableState `protobuf:"bytes,2,opt,name=database_mutable_state,json=databaseMutableState,proto3" json:"database_mutable_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeMutableStateResponse) Reset() { + *x = DescribeMutableStateResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeMutableStateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeMutableStateResponse) ProtoMessage() {} + +func (x *DescribeMutableStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[67] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeMutableStateResponse.ProtoReflect.Descriptor instead. +func (*DescribeMutableStateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{67} +} + +func (x *DescribeMutableStateResponse) GetCacheMutableState() *v110.WorkflowMutableState { + if x != nil { + return x.CacheMutableState + } + return nil +} + +func (x *DescribeMutableStateResponse) GetDatabaseMutableState() *v110.WorkflowMutableState { + if x != nil { + return x.DatabaseMutableState + } + return nil +} + +// At least one of the parameters needs to be provided. +type DescribeHistoryHostRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + //ip:port + HostAddress string `protobuf:"bytes,1,opt,name=host_address,json=hostAddress,proto3" json:"host_address,omitempty"` + ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + NamespaceId string `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,4,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeHistoryHostRequest) Reset() { + *x = DescribeHistoryHostRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeHistoryHostRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeHistoryHostRequest) ProtoMessage() {} + +func (x *DescribeHistoryHostRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[68] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeHistoryHostRequest.ProtoReflect.Descriptor instead. +func (*DescribeHistoryHostRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{68} +} + +func (x *DescribeHistoryHostRequest) GetHostAddress() string { + if x != nil { + return x.HostAddress + } + return "" +} + +func (x *DescribeHistoryHostRequest) GetShardId() int32 { + if x != nil { + return x.ShardId + } + return 0 +} + +func (x *DescribeHistoryHostRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DescribeHistoryHostRequest) GetWorkflowExecution() *v14.WorkflowExecution { + if x != nil { + return x.WorkflowExecution + } + return nil +} + +type DescribeHistoryHostResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardsNumber int32 `protobuf:"varint,1,opt,name=shards_number,json=shardsNumber,proto3" json:"shards_number,omitempty"` + ShardIds []int32 `protobuf:"varint,2,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` + NamespaceCache *v116.NamespaceCacheInfo `protobuf:"bytes,3,opt,name=namespace_cache,json=namespaceCache,proto3" json:"namespace_cache,omitempty"` + Address string `protobuf:"bytes,5,opt,name=address,proto3" json:"address,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeHistoryHostResponse) Reset() { + *x = DescribeHistoryHostResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeHistoryHostResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeHistoryHostResponse) ProtoMessage() {} + +func (x *DescribeHistoryHostResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[69] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeHistoryHostResponse.ProtoReflect.Descriptor instead. +func (*DescribeHistoryHostResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{69} +} + +func (x *DescribeHistoryHostResponse) GetShardsNumber() int32 { + if x != nil { + return x.ShardsNumber + } + return 0 +} + +func (x *DescribeHistoryHostResponse) GetShardIds() []int32 { + if x != nil { + return x.ShardIds + } + return nil +} + +func (x *DescribeHistoryHostResponse) GetNamespaceCache() *v116.NamespaceCacheInfo { + if x != nil { + return x.NamespaceCache + } + return nil +} + +func (x *DescribeHistoryHostResponse) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +type CloseShardRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CloseShardRequest) Reset() { + *x = CloseShardRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CloseShardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloseShardRequest) ProtoMessage() {} + +func (x *CloseShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[70] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloseShardRequest.ProtoReflect.Descriptor instead. +func (*CloseShardRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{70} +} + +func (x *CloseShardRequest) GetShardId() int32 { + if x != nil { + return x.ShardId + } + return 0 +} + +type CloseShardResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CloseShardResponse) Reset() { + *x = CloseShardResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CloseShardResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloseShardResponse) ProtoMessage() {} + +func (x *CloseShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[71] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloseShardResponse.ProtoReflect.Descriptor instead. +func (*CloseShardResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{71} +} + +type GetShardRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetShardRequest) Reset() { + *x = GetShardRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetShardRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetShardRequest) ProtoMessage() {} + +func (x *GetShardRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[72] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetShardRequest.ProtoReflect.Descriptor instead. +func (*GetShardRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{72} +} + +func (x *GetShardRequest) GetShardId() int32 { + if x != nil { + return x.ShardId + } + return 0 +} + +type GetShardResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardInfo *v110.ShardInfo `protobuf:"bytes,1,opt,name=shard_info,json=shardInfo,proto3" json:"shard_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetShardResponse) Reset() { + *x = GetShardResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetShardResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetShardResponse) ProtoMessage() {} + +func (x *GetShardResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[73] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetShardResponse.ProtoReflect.Descriptor instead. +func (*GetShardResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{73} +} + +func (x *GetShardResponse) GetShardInfo() *v110.ShardInfo { + if x != nil { + return x.ShardInfo + } + return nil +} + +type RemoveTaskRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + // The task category. See tasks.TaskCategoryRegistry for more. + Category int32 `protobuf:"varint,2,opt,name=category,proto3" json:"category,omitempty"` + TaskId int64 `protobuf:"varint,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemoveTaskRequest) Reset() { + *x = RemoveTaskRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemoveTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveTaskRequest) ProtoMessage() {} + +func (x *RemoveTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[74] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveTaskRequest.ProtoReflect.Descriptor instead. +func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{74} +} + +func (x *RemoveTaskRequest) GetShardId() int32 { + if x != nil { + return x.ShardId + } + return 0 +} + +func (x *RemoveTaskRequest) GetCategory() int32 { + if x != nil { + return x.Category + } + return 0 +} + +func (x *RemoveTaskRequest) GetTaskId() int64 { + if x != nil { + return x.TaskId + } + return 0 +} + +func (x *RemoveTaskRequest) GetVisibilityTime() *timestamppb.Timestamp { + if x != nil { + return x.VisibilityTime + } + return nil +} + +type RemoveTaskResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RemoveTaskResponse) Reset() { + *x = RemoveTaskResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RemoveTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RemoveTaskResponse) ProtoMessage() {} + +func (x *RemoveTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[75] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RemoveTaskResponse.ProtoReflect.Descriptor instead. +func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{75} +} + +type GetReplicationMessagesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Tokens []*v117.ReplicationToken `protobuf:"bytes,1,rep,name=tokens,proto3" json:"tokens,omitempty"` + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetReplicationMessagesRequest) Reset() { + *x = GetReplicationMessagesRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetReplicationMessagesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetReplicationMessagesRequest) ProtoMessage() {} + +func (x *GetReplicationMessagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[76] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetReplicationMessagesRequest.ProtoReflect.Descriptor instead. +func (*GetReplicationMessagesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{76} +} + +func (x *GetReplicationMessagesRequest) GetTokens() []*v117.ReplicationToken { + if x != nil { + return x.Tokens + } + return nil +} + +func (x *GetReplicationMessagesRequest) GetClusterName() string { + if x != nil { + return x.ClusterName + } + return "" +} + +type GetReplicationMessagesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardMessages map[int32]*v117.ReplicationMessages `protobuf:"bytes,1,rep,name=shard_messages,json=shardMessages,proto3" json:"shard_messages,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetReplicationMessagesResponse) Reset() { + *x = GetReplicationMessagesResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetReplicationMessagesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetReplicationMessagesResponse) ProtoMessage() {} + +func (x *GetReplicationMessagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[77] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetReplicationMessagesResponse.ProtoReflect.Descriptor instead. +func (*GetReplicationMessagesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{77} +} + +func (x *GetReplicationMessagesResponse) GetShardMessages() map[int32]*v117.ReplicationMessages { + if x != nil { + return x.ShardMessages + } + return nil +} + +type GetDLQReplicationMessagesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskInfos []*v117.ReplicationTaskInfo `protobuf:"bytes,1,rep,name=task_infos,json=taskInfos,proto3" json:"task_infos,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetDLQReplicationMessagesRequest) Reset() { + *x = GetDLQReplicationMessagesRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetDLQReplicationMessagesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDLQReplicationMessagesRequest) ProtoMessage() {} + +func (x *GetDLQReplicationMessagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[78] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDLQReplicationMessagesRequest.ProtoReflect.Descriptor instead. +func (*GetDLQReplicationMessagesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{78} +} + +func (x *GetDLQReplicationMessagesRequest) GetTaskInfos() []*v117.ReplicationTaskInfo { + if x != nil { + return x.TaskInfos + } + return nil +} + +type GetDLQReplicationMessagesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ReplicationTasks []*v117.ReplicationTask `protobuf:"bytes,1,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetDLQReplicationMessagesResponse) Reset() { + *x = GetDLQReplicationMessagesResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetDLQReplicationMessagesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDLQReplicationMessagesResponse) ProtoMessage() {} + +func (x *GetDLQReplicationMessagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[79] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDLQReplicationMessagesResponse.ProtoReflect.Descriptor instead. +func (*GetDLQReplicationMessagesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{79} +} + +func (x *GetDLQReplicationMessagesResponse) GetReplicationTasks() []*v117.ReplicationTask { + if x != nil { + return x.ReplicationTasks + } + return nil +} + +type QueryWorkflowRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v1.QueryWorkflowRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *QueryWorkflowRequest) Reset() { + *x = QueryWorkflowRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *QueryWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryWorkflowRequest) ProtoMessage() {} + +func (x *QueryWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[80] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryWorkflowRequest.ProtoReflect.Descriptor instead. +func (*QueryWorkflowRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{80} +} + +func (x *QueryWorkflowRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *QueryWorkflowRequest) GetRequest() *v1.QueryWorkflowRequest { + if x != nil { + return x.Request + } + return nil +} + +type QueryWorkflowResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.QueryWorkflowResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *QueryWorkflowResponse) Reset() { + *x = QueryWorkflowResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *QueryWorkflowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryWorkflowResponse) ProtoMessage() {} + +func (x *QueryWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[81] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryWorkflowResponse.ProtoReflect.Descriptor instead. +func (*QueryWorkflowResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{81} +} + +func (x *QueryWorkflowResponse) GetResponse() *v1.QueryWorkflowResponse { + if x != nil { + return x.Response + } + return nil +} + +type ReapplyEventsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v118.ReapplyEventsRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReapplyEventsRequest) Reset() { + *x = ReapplyEventsRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[82] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReapplyEventsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReapplyEventsRequest) ProtoMessage() {} + +func (x *ReapplyEventsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[82] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReapplyEventsRequest.ProtoReflect.Descriptor instead. +func (*ReapplyEventsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{82} +} + +func (x *ReapplyEventsRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ReapplyEventsRequest) GetRequest() *v118.ReapplyEventsRequest { + if x != nil { + return x.Request + } + return nil +} + +type ReapplyEventsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReapplyEventsResponse) Reset() { + *x = ReapplyEventsResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[83] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReapplyEventsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReapplyEventsResponse) ProtoMessage() {} + +func (x *ReapplyEventsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[83] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReapplyEventsResponse.ProtoReflect.Descriptor instead. +func (*ReapplyEventsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{83} +} + +type GetDLQMessagesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type v112.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` + ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` + InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` + MaximumPageSize int32 `protobuf:"varint,5,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,6,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetDLQMessagesRequest) Reset() { + *x = GetDLQMessagesRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[84] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetDLQMessagesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDLQMessagesRequest) ProtoMessage() {} + +func (x *GetDLQMessagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[84] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDLQMessagesRequest.ProtoReflect.Descriptor instead. +func (*GetDLQMessagesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{84} +} + +func (x *GetDLQMessagesRequest) GetType() v112.DeadLetterQueueType { + if x != nil { + return x.Type + } + return v112.DeadLetterQueueType(0) +} + +func (x *GetDLQMessagesRequest) GetShardId() int32 { + if x != nil { + return x.ShardId + } + return 0 +} + +func (x *GetDLQMessagesRequest) GetSourceCluster() string { + if x != nil { + return x.SourceCluster + } + return "" +} + +func (x *GetDLQMessagesRequest) GetInclusiveEndMessageId() int64 { + if x != nil { + return x.InclusiveEndMessageId + } + return 0 +} + +func (x *GetDLQMessagesRequest) GetMaximumPageSize() int32 { + if x != nil { + return x.MaximumPageSize + } + return 0 +} + +func (x *GetDLQMessagesRequest) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken + } + return nil +} + +type GetDLQMessagesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type v112.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` + ReplicationTasks []*v117.ReplicationTask `protobuf:"bytes,2,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` + NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + ReplicationTasksInfo []*v117.ReplicationTaskInfo `protobuf:"bytes,4,rep,name=replication_tasks_info,json=replicationTasksInfo,proto3" json:"replication_tasks_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetDLQMessagesResponse) Reset() { + *x = GetDLQMessagesResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetDLQMessagesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDLQMessagesResponse) ProtoMessage() {} + +func (x *GetDLQMessagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[85] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDLQMessagesResponse.ProtoReflect.Descriptor instead. +func (*GetDLQMessagesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{85} +} + +func (x *GetDLQMessagesResponse) GetType() v112.DeadLetterQueueType { + if x != nil { + return x.Type + } + return v112.DeadLetterQueueType(0) +} + +func (x *GetDLQMessagesResponse) GetReplicationTasks() []*v117.ReplicationTask { + if x != nil { + return x.ReplicationTasks + } + return nil +} + +func (x *GetDLQMessagesResponse) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken + } + return nil +} + +func (x *GetDLQMessagesResponse) GetReplicationTasksInfo() []*v117.ReplicationTaskInfo { + if x != nil { + return x.ReplicationTasksInfo + } + return nil +} + +type PurgeDLQMessagesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type v112.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` + ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` + InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PurgeDLQMessagesRequest) Reset() { + *x = PurgeDLQMessagesRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[86] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PurgeDLQMessagesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PurgeDLQMessagesRequest) ProtoMessage() {} + +func (x *PurgeDLQMessagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[86] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PurgeDLQMessagesRequest.ProtoReflect.Descriptor instead. +func (*PurgeDLQMessagesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{86} +} + +func (x *PurgeDLQMessagesRequest) GetType() v112.DeadLetterQueueType { + if x != nil { + return x.Type + } + return v112.DeadLetterQueueType(0) +} + +func (x *PurgeDLQMessagesRequest) GetShardId() int32 { + if x != nil { + return x.ShardId + } + return 0 +} + +func (x *PurgeDLQMessagesRequest) GetSourceCluster() string { + if x != nil { + return x.SourceCluster + } + return "" +} + +func (x *PurgeDLQMessagesRequest) GetInclusiveEndMessageId() int64 { + if x != nil { + return x.InclusiveEndMessageId + } + return 0 +} + +type PurgeDLQMessagesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PurgeDLQMessagesResponse) Reset() { + *x = PurgeDLQMessagesResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[87] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PurgeDLQMessagesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PurgeDLQMessagesResponse) ProtoMessage() {} + +func (x *PurgeDLQMessagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[87] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PurgeDLQMessagesResponse.ProtoReflect.Descriptor instead. +func (*PurgeDLQMessagesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{87} +} + +type MergeDLQMessagesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type v112.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` + ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` + InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` + MaximumPageSize int32 `protobuf:"varint,5,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,6,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MergeDLQMessagesRequest) Reset() { + *x = MergeDLQMessagesRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MergeDLQMessagesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MergeDLQMessagesRequest) ProtoMessage() {} + +func (x *MergeDLQMessagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[88] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MergeDLQMessagesRequest.ProtoReflect.Descriptor instead. +func (*MergeDLQMessagesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{88} +} + +func (x *MergeDLQMessagesRequest) GetType() v112.DeadLetterQueueType { + if x != nil { + return x.Type + } + return v112.DeadLetterQueueType(0) +} + +func (x *MergeDLQMessagesRequest) GetShardId() int32 { + if x != nil { + return x.ShardId + } + return 0 +} + +func (x *MergeDLQMessagesRequest) GetSourceCluster() string { + if x != nil { + return x.SourceCluster + } + return "" +} + +func (x *MergeDLQMessagesRequest) GetInclusiveEndMessageId() int64 { + if x != nil { + return x.InclusiveEndMessageId + } + return 0 +} + +func (x *MergeDLQMessagesRequest) GetMaximumPageSize() int32 { + if x != nil { + return x.MaximumPageSize + } + return 0 +} + +func (x *MergeDLQMessagesRequest) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken + } + return nil +} + +type MergeDLQMessagesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MergeDLQMessagesResponse) Reset() { + *x = MergeDLQMessagesResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MergeDLQMessagesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MergeDLQMessagesResponse) ProtoMessage() {} + +func (x *MergeDLQMessagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[89] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MergeDLQMessagesResponse.ProtoReflect.Descriptor instead. +func (*MergeDLQMessagesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{89} +} + +func (x *MergeDLQMessagesResponse) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken + } + return nil +} + +type RefreshWorkflowTasksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,3,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + Request *v118.RefreshWorkflowTasksRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RefreshWorkflowTasksRequest) Reset() { + *x = RefreshWorkflowTasksRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[90] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RefreshWorkflowTasksRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RefreshWorkflowTasksRequest) ProtoMessage() {} + +func (x *RefreshWorkflowTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[90] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RefreshWorkflowTasksRequest.ProtoReflect.Descriptor instead. +func (*RefreshWorkflowTasksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{90} +} + +func (x *RefreshWorkflowTasksRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *RefreshWorkflowTasksRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +func (x *RefreshWorkflowTasksRequest) GetRequest() *v118.RefreshWorkflowTasksRequest { + if x != nil { + return x.Request + } + return nil +} + +type RefreshWorkflowTasksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ActivitySyncInfo) Reset() { - *x = ActivitySyncInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[60] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RefreshWorkflowTasksResponse) Reset() { + *x = RefreshWorkflowTasksResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[91] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ActivitySyncInfo) String() string { +func (x *RefreshWorkflowTasksResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ActivitySyncInfo) ProtoMessage() {} +func (*RefreshWorkflowTasksResponse) ProtoMessage() {} -func (x *ActivitySyncInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[60] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RefreshWorkflowTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[91] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4140,112 +6502,104 @@ func (x *ActivitySyncInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ActivitySyncInfo.ProtoReflect.Descriptor instead. -func (*ActivitySyncInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{60} -} - -func (x *ActivitySyncInfo) GetVersion() int64 { - if x != nil { - return x.Version - } - return 0 +// Deprecated: Use RefreshWorkflowTasksResponse.ProtoReflect.Descriptor instead. +func (*RefreshWorkflowTasksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{91} } -func (x *ActivitySyncInfo) GetScheduledEventId() int64 { - if x != nil { - return x.ScheduledEventId - } - return 0 +type GenerateLastHistoryReplicationTasksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + TargetClusters []string `protobuf:"bytes,3,rep,name=target_clusters,json=targetClusters,proto3" json:"target_clusters,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,4,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ActivitySyncInfo) GetScheduledTime() *timestamppb.Timestamp { - if x != nil { - return x.ScheduledTime - } - return nil +func (x *GenerateLastHistoryReplicationTasksRequest) Reset() { + *x = GenerateLastHistoryReplicationTasksRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[92] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ActivitySyncInfo) GetStartedEventId() int64 { - if x != nil { - return x.StartedEventId - } - return 0 +func (x *GenerateLastHistoryReplicationTasksRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *ActivitySyncInfo) GetStartedTime() *timestamppb.Timestamp { - if x != nil { - return x.StartedTime - } - return nil -} +func (*GenerateLastHistoryReplicationTasksRequest) ProtoMessage() {} -func (x *ActivitySyncInfo) GetLastHeartbeatTime() *timestamppb.Timestamp { +func (x *GenerateLastHistoryReplicationTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[92] if x != nil { - return x.LastHeartbeatTime + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (x *ActivitySyncInfo) GetDetails() *v14.Payloads { - if x != nil { - return x.Details - } - return nil +// Deprecated: Use GenerateLastHistoryReplicationTasksRequest.ProtoReflect.Descriptor instead. +func (*GenerateLastHistoryReplicationTasksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{92} } -func (x *ActivitySyncInfo) GetAttempt() int32 { +func (x *GenerateLastHistoryReplicationTasksRequest) GetNamespaceId() string { if x != nil { - return x.Attempt + return x.NamespaceId } - return 0 + return "" } -func (x *ActivitySyncInfo) GetLastFailure() *v13.Failure { +func (x *GenerateLastHistoryReplicationTasksRequest) GetExecution() *v14.WorkflowExecution { if x != nil { - return x.LastFailure + return x.Execution } return nil } -func (x *ActivitySyncInfo) GetLastWorkerIdentity() string { +func (x *GenerateLastHistoryReplicationTasksRequest) GetTargetClusters() []string { if x != nil { - return x.LastWorkerIdentity + return x.TargetClusters } - return "" + return nil } -func (x *ActivitySyncInfo) GetVersionHistory() *v16.VersionHistory { +func (x *GenerateLastHistoryReplicationTasksRequest) GetArchetypeId() uint32 { if x != nil { - return x.VersionHistory + return x.ArchetypeId } - return nil + return 0 } -type SyncActivityResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type GenerateLastHistoryReplicationTasksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + StateTransitionCount int64 `protobuf:"varint,1,opt,name=state_transition_count,json=stateTransitionCount,proto3" json:"state_transition_count,omitempty"` + HistoryLength int64 `protobuf:"varint,2,opt,name=history_length,json=historyLength,proto3" json:"history_length,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *SyncActivityResponse) Reset() { - *x = SyncActivityResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[61] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GenerateLastHistoryReplicationTasksResponse) Reset() { + *x = GenerateLastHistoryReplicationTasksResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[93] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SyncActivityResponse) String() string { +func (x *GenerateLastHistoryReplicationTasksResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SyncActivityResponse) ProtoMessage() {} +func (*GenerateLastHistoryReplicationTasksResponse) ProtoMessage() {} -func (x *SyncActivityResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[61] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GenerateLastHistoryReplicationTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[93] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4255,38 +6609,49 @@ func (x *SyncActivityResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SyncActivityResponse.ProtoReflect.Descriptor instead. -func (*SyncActivityResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{61} +// Deprecated: Use GenerateLastHistoryReplicationTasksResponse.ProtoReflect.Descriptor instead. +func (*GenerateLastHistoryReplicationTasksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{93} } -type DescribeMutableStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` +func (x *GenerateLastHistoryReplicationTasksResponse) GetStateTransitionCount() int64 { + if x != nil { + return x.StateTransitionCount + } + return 0 } -func (x *DescribeMutableStateRequest) Reset() { - *x = DescribeMutableStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[62] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *GenerateLastHistoryReplicationTasksResponse) GetHistoryLength() int64 { + if x != nil { + return x.HistoryLength } + return 0 } -func (x *DescribeMutableStateRequest) String() string { +type GetReplicationStatusRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Remote cluster names to query for. If omit, will return for all remote clusters. + RemoteClusters []string `protobuf:"bytes,1,rep,name=remote_clusters,json=remoteClusters,proto3" json:"remote_clusters,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetReplicationStatusRequest) Reset() { + *x = GetReplicationStatusRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[94] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetReplicationStatusRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DescribeMutableStateRequest) ProtoMessage() {} +func (*GetReplicationStatusRequest) ProtoMessage() {} -func (x *DescribeMutableStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[62] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetReplicationStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[94] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4296,52 +6661,41 @@ func (x *DescribeMutableStateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DescribeMutableStateRequest.ProtoReflect.Descriptor instead. -func (*DescribeMutableStateRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{62} -} - -func (x *DescribeMutableStateRequest) GetNamespaceId() string { - if x != nil { - return x.NamespaceId - } - return "" +// Deprecated: Use GetReplicationStatusRequest.ProtoReflect.Descriptor instead. +func (*GetReplicationStatusRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{94} } -func (x *DescribeMutableStateRequest) GetExecution() *v14.WorkflowExecution { +func (x *GetReplicationStatusRequest) GetRemoteClusters() []string { if x != nil { - return x.Execution + return x.RemoteClusters } return nil } -type DescribeMutableStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetReplicationStatusResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Shards []*ShardReplicationStatus `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` unknownFields protoimpl.UnknownFields - - CacheMutableState *v113.WorkflowMutableState `protobuf:"bytes,1,opt,name=cache_mutable_state,json=cacheMutableState,proto3" json:"cache_mutable_state,omitempty"` - DatabaseMutableState *v113.WorkflowMutableState `protobuf:"bytes,2,opt,name=database_mutable_state,json=databaseMutableState,proto3" json:"database_mutable_state,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *DescribeMutableStateResponse) Reset() { - *x = DescribeMutableStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[63] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetReplicationStatusResponse) Reset() { + *x = GetReplicationStatusResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[95] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DescribeMutableStateResponse) String() string { +func (x *GetReplicationStatusResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DescribeMutableStateResponse) ProtoMessage() {} +func (*GetReplicationStatusResponse) ProtoMessage() {} -func (x *DescribeMutableStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[63] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetReplicationStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[95] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4351,127 +6705,128 @@ func (x *DescribeMutableStateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DescribeMutableStateResponse.ProtoReflect.Descriptor instead. -func (*DescribeMutableStateResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{63} -} - -func (x *DescribeMutableStateResponse) GetCacheMutableState() *v113.WorkflowMutableState { - if x != nil { - return x.CacheMutableState - } - return nil +// Deprecated: Use GetReplicationStatusResponse.ProtoReflect.Descriptor instead. +func (*GetReplicationStatusResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{95} } -func (x *DescribeMutableStateResponse) GetDatabaseMutableState() *v113.WorkflowMutableState { +func (x *GetReplicationStatusResponse) GetShards() []*ShardReplicationStatus { if x != nil { - return x.DatabaseMutableState + return x.Shards } return nil } -// At least one of the parameters needs to be provided. -type DescribeHistoryHostRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - //ip:port - HostAddress string `protobuf:"bytes,1,opt,name=host_address,json=hostAddress,proto3" json:"host_address,omitempty"` - ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - NamespaceId string `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowExecution *v14.WorkflowExecution `protobuf:"bytes,4,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` +type ShardReplicationStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + // Max replication task id of current cluster + MaxReplicationTaskId int64 `protobuf:"varint,2,opt,name=max_replication_task_id,json=maxReplicationTaskId,proto3" json:"max_replication_task_id,omitempty"` + // Local time on this shard + ShardLocalTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=shard_local_time,json=shardLocalTime,proto3" json:"shard_local_time,omitempty"` + RemoteClusters map[string]*ShardReplicationStatusPerCluster `protobuf:"bytes,4,rep,name=remote_clusters,json=remoteClusters,proto3" json:"remote_clusters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + HandoverNamespaces map[string]*HandoverNamespaceInfo `protobuf:"bytes,5,rep,name=handover_namespaces,json=handoverNamespaces,proto3" json:"handover_namespaces,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + MaxReplicationTaskVisibilityTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=max_replication_task_visibility_time,json=maxReplicationTaskVisibilityTime,proto3" json:"max_replication_task_visibility_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DescribeHistoryHostRequest) Reset() { - *x = DescribeHistoryHostRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[64] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ShardReplicationStatus) Reset() { + *x = ShardReplicationStatus{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[96] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DescribeHistoryHostRequest) String() string { +func (x *ShardReplicationStatus) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DescribeHistoryHostRequest) ProtoMessage() {} +func (*ShardReplicationStatus) ProtoMessage() {} -func (x *DescribeHistoryHostRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[64] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ShardReplicationStatus) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[96] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } - return mi.MessageOf(x) + return mi.MessageOf(x) +} + +// Deprecated: Use ShardReplicationStatus.ProtoReflect.Descriptor instead. +func (*ShardReplicationStatus) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{96} +} + +func (x *ShardReplicationStatus) GetShardId() int32 { + if x != nil { + return x.ShardId + } + return 0 } -// Deprecated: Use DescribeHistoryHostRequest.ProtoReflect.Descriptor instead. -func (*DescribeHistoryHostRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{64} +func (x *ShardReplicationStatus) GetMaxReplicationTaskId() int64 { + if x != nil { + return x.MaxReplicationTaskId + } + return 0 } -func (x *DescribeHistoryHostRequest) GetHostAddress() string { +func (x *ShardReplicationStatus) GetShardLocalTime() *timestamppb.Timestamp { if x != nil { - return x.HostAddress + return x.ShardLocalTime } - return "" + return nil } -func (x *DescribeHistoryHostRequest) GetShardId() int32 { +func (x *ShardReplicationStatus) GetRemoteClusters() map[string]*ShardReplicationStatusPerCluster { if x != nil { - return x.ShardId + return x.RemoteClusters } - return 0 + return nil } -func (x *DescribeHistoryHostRequest) GetNamespaceId() string { +func (x *ShardReplicationStatus) GetHandoverNamespaces() map[string]*HandoverNamespaceInfo { if x != nil { - return x.NamespaceId + return x.HandoverNamespaces } - return "" + return nil } -func (x *DescribeHistoryHostRequest) GetWorkflowExecution() *v14.WorkflowExecution { +func (x *ShardReplicationStatus) GetMaxReplicationTaskVisibilityTime() *timestamppb.Timestamp { if x != nil { - return x.WorkflowExecution + return x.MaxReplicationTaskVisibilityTime } return nil } -type DescribeHistoryHostResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ShardsNumber int32 `protobuf:"varint,1,opt,name=shards_number,json=shardsNumber,proto3" json:"shards_number,omitempty"` - ShardIds []int32 `protobuf:"varint,2,rep,packed,name=shard_ids,json=shardIds,proto3" json:"shard_ids,omitempty"` - NamespaceCache *v114.NamespaceCacheInfo `protobuf:"bytes,3,opt,name=namespace_cache,json=namespaceCache,proto3" json:"namespace_cache,omitempty"` - Address string `protobuf:"bytes,5,opt,name=address,proto3" json:"address,omitempty"` +type HandoverNamespaceInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // max replication task id when namespace transition to Handover state + HandoverReplicationTaskId int64 `protobuf:"varint,1,opt,name=handover_replication_task_id,json=handoverReplicationTaskId,proto3" json:"handover_replication_task_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DescribeHistoryHostResponse) Reset() { - *x = DescribeHistoryHostResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[65] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *HandoverNamespaceInfo) Reset() { + *x = HandoverNamespaceInfo{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[97] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DescribeHistoryHostResponse) String() string { +func (x *HandoverNamespaceInfo) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DescribeHistoryHostResponse) ProtoMessage() {} +func (*HandoverNamespaceInfo) ProtoMessage() {} -func (x *DescribeHistoryHostResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[65] - if protoimpl.UnsafeEnabled && x != nil { +func (x *HandoverNamespaceInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[97] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4481,65 +6836,96 @@ func (x *DescribeHistoryHostResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DescribeHistoryHostResponse.ProtoReflect.Descriptor instead. -func (*DescribeHistoryHostResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{65} +// Deprecated: Use HandoverNamespaceInfo.ProtoReflect.Descriptor instead. +func (*HandoverNamespaceInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{97} } -func (x *DescribeHistoryHostResponse) GetShardsNumber() int32 { +func (x *HandoverNamespaceInfo) GetHandoverReplicationTaskId() int64 { if x != nil { - return x.ShardsNumber + return x.HandoverReplicationTaskId } return 0 } -func (x *DescribeHistoryHostResponse) GetShardIds() []int32 { +type ShardReplicationStatusPerCluster struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Acked replication task id + AckedTaskId int64 `protobuf:"varint,1,opt,name=acked_task_id,json=ackedTaskId,proto3" json:"acked_task_id,omitempty"` + // Acked replication task creation time + AckedTaskVisibilityTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=acked_task_visibility_time,json=ackedTaskVisibilityTime,proto3" json:"acked_task_visibility_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ShardReplicationStatusPerCluster) Reset() { + *x = ShardReplicationStatusPerCluster{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[98] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ShardReplicationStatusPerCluster) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShardReplicationStatusPerCluster) ProtoMessage() {} + +func (x *ShardReplicationStatusPerCluster) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[98] if x != nil { - return x.ShardIds + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) +} + +// Deprecated: Use ShardReplicationStatusPerCluster.ProtoReflect.Descriptor instead. +func (*ShardReplicationStatusPerCluster) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{98} } -func (x *DescribeHistoryHostResponse) GetNamespaceCache() *v114.NamespaceCacheInfo { +func (x *ShardReplicationStatusPerCluster) GetAckedTaskId() int64 { if x != nil { - return x.NamespaceCache + return x.AckedTaskId } - return nil + return 0 } -func (x *DescribeHistoryHostResponse) GetAddress() string { +func (x *ShardReplicationStatusPerCluster) GetAckedTaskVisibilityTime() *timestamppb.Timestamp { if x != nil { - return x.Address + return x.AckedTaskVisibilityTime } - return "" + return nil } -type CloseShardRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RebuildMutableStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *CloseShardRequest) Reset() { - *x = CloseShardRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[66] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RebuildMutableStateRequest) Reset() { + *x = RebuildMutableStateRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[99] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CloseShardRequest) String() string { +func (x *RebuildMutableStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CloseShardRequest) ProtoMessage() {} +func (*RebuildMutableStateRequest) ProtoMessage() {} -func (x *CloseShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[66] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RebuildMutableStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[99] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4549,42 +6935,47 @@ func (x *CloseShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CloseShardRequest.ProtoReflect.Descriptor instead. -func (*CloseShardRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{66} +// Deprecated: Use RebuildMutableStateRequest.ProtoReflect.Descriptor instead. +func (*RebuildMutableStateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{99} } -func (x *CloseShardRequest) GetShardId() int32 { +func (x *RebuildMutableStateRequest) GetNamespaceId() string { if x != nil { - return x.ShardId + return x.NamespaceId } - return 0 + return "" } -type CloseShardResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +func (x *RebuildMutableStateRequest) GetExecution() *v14.WorkflowExecution { + if x != nil { + return x.Execution + } + return nil +} + +type RebuildMutableStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *CloseShardResponse) Reset() { - *x = CloseShardResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[67] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RebuildMutableStateResponse) Reset() { + *x = RebuildMutableStateResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[100] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CloseShardResponse) String() string { +func (x *RebuildMutableStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CloseShardResponse) ProtoMessage() {} +func (*RebuildMutableStateResponse) ProtoMessage() {} -func (x *CloseShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[67] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RebuildMutableStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[100] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4594,37 +6985,38 @@ func (x *CloseShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CloseShardResponse.ProtoReflect.Descriptor instead. -func (*CloseShardResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{67} +// Deprecated: Use RebuildMutableStateResponse.ProtoReflect.Descriptor instead. +func (*RebuildMutableStateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{100} } -type GetShardRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` +type ImportWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + HistoryBatches []*v14.DataBlob `protobuf:"bytes,3,rep,name=history_batches,json=historyBatches,proto3" json:"history_batches,omitempty"` + VersionHistory *v19.VersionHistory `protobuf:"bytes,4,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + Token []byte `protobuf:"bytes,5,opt,name=token,proto3" json:"token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetShardRequest) Reset() { - *x = GetShardRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[68] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ImportWorkflowExecutionRequest) Reset() { + *x = ImportWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[101] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetShardRequest) String() string { +func (x *ImportWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardRequest) ProtoMessage() {} +func (*ImportWorkflowExecutionRequest) ProtoMessage() {} -func (x *GetShardRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[68] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ImportWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[101] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4634,44 +7026,70 @@ func (x *GetShardRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardRequest.ProtoReflect.Descriptor instead. -func (*GetShardRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{68} +// Deprecated: Use ImportWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*ImportWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{101} } -func (x *GetShardRequest) GetShardId() int32 { +func (x *ImportWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { - return x.ShardId + return x.NamespaceId } - return 0 + return "" } -type GetShardResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ImportWorkflowExecutionRequest) GetExecution() *v14.WorkflowExecution { + if x != nil { + return x.Execution + } + return nil +} - ShardInfo *v113.ShardInfo `protobuf:"bytes,1,opt,name=shard_info,json=shardInfo,proto3" json:"shard_info,omitempty"` +func (x *ImportWorkflowExecutionRequest) GetHistoryBatches() []*v14.DataBlob { + if x != nil { + return x.HistoryBatches + } + return nil } -func (x *GetShardResponse) Reset() { - *x = GetShardResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[69] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ImportWorkflowExecutionRequest) GetVersionHistory() *v19.VersionHistory { + if x != nil { + return x.VersionHistory } + return nil } -func (x *GetShardResponse) String() string { +func (x *ImportWorkflowExecutionRequest) GetToken() []byte { + if x != nil { + return x.Token + } + return nil +} + +type ImportWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Token []byte `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + EventsApplied bool `protobuf:"varint,2,opt,name=events_applied,json=eventsApplied,proto3" json:"events_applied,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ImportWorkflowExecutionResponse) Reset() { + *x = ImportWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[102] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ImportWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetShardResponse) ProtoMessage() {} +func (*ImportWorkflowExecutionResponse) ProtoMessage() {} -func (x *GetShardResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[69] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ImportWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[102] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4681,48 +7099,51 @@ func (x *GetShardResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetShardResponse.ProtoReflect.Descriptor instead. -func (*GetShardResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{69} +// Deprecated: Use ImportWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*ImportWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{102} } -func (x *GetShardResponse) GetShardInfo() *v113.ShardInfo { +func (x *ImportWorkflowExecutionResponse) GetToken() []byte { if x != nil { - return x.ShardInfo + return x.Token } return nil } -type RemoveTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ImportWorkflowExecutionResponse) GetEventsApplied() bool { + if x != nil { + return x.EventsApplied + } + return false +} - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - // The task category. See tasks.TaskCategoryRegistry for more. - Category int32 `protobuf:"varint,2,opt,name=category,proto3" json:"category,omitempty"` - TaskId int64 `protobuf:"varint,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` +type DeleteWorkflowVisibilityRecordRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + WorkflowStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=workflow_start_time,json=workflowStartTime,proto3" json:"workflow_start_time,omitempty"` + WorkflowCloseTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=workflow_close_time,json=workflowCloseTime,proto3" json:"workflow_close_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RemoveTaskRequest) Reset() { - *x = RemoveTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[70] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeleteWorkflowVisibilityRecordRequest) Reset() { + *x = DeleteWorkflowVisibilityRecordRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[103] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RemoveTaskRequest) String() string { +func (x *DeleteWorkflowVisibilityRecordRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveTaskRequest) ProtoMessage() {} +func (*DeleteWorkflowVisibilityRecordRequest) ProtoMessage() {} -func (x *RemoveTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[70] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeleteWorkflowVisibilityRecordRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[103] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4732,63 +7153,61 @@ func (x *RemoveTaskRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveTaskRequest.ProtoReflect.Descriptor instead. -func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{70} +// Deprecated: Use DeleteWorkflowVisibilityRecordRequest.ProtoReflect.Descriptor instead. +func (*DeleteWorkflowVisibilityRecordRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{103} } -func (x *RemoveTaskRequest) GetShardId() int32 { +func (x *DeleteWorkflowVisibilityRecordRequest) GetNamespaceId() string { if x != nil { - return x.ShardId + return x.NamespaceId } - return 0 + return "" } -func (x *RemoveTaskRequest) GetCategory() int32 { +func (x *DeleteWorkflowVisibilityRecordRequest) GetExecution() *v14.WorkflowExecution { if x != nil { - return x.Category + return x.Execution } - return 0 + return nil } -func (x *RemoveTaskRequest) GetTaskId() int64 { +func (x *DeleteWorkflowVisibilityRecordRequest) GetWorkflowStartTime() *timestamppb.Timestamp { if x != nil { - return x.TaskId + return x.WorkflowStartTime } - return 0 + return nil } -func (x *RemoveTaskRequest) GetVisibilityTime() *timestamppb.Timestamp { +func (x *DeleteWorkflowVisibilityRecordRequest) GetWorkflowCloseTime() *timestamppb.Timestamp { if x != nil { - return x.VisibilityTime + return x.WorkflowCloseTime } return nil } -type RemoveTaskResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DeleteWorkflowVisibilityRecordResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RemoveTaskResponse) Reset() { - *x = RemoveTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[71] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeleteWorkflowVisibilityRecordResponse) Reset() { + *x = DeleteWorkflowVisibilityRecordResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[104] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RemoveTaskResponse) String() string { +func (x *DeleteWorkflowVisibilityRecordResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RemoveTaskResponse) ProtoMessage() {} +func (*DeleteWorkflowVisibilityRecordResponse) ProtoMessage() {} -func (x *RemoveTaskResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[71] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeleteWorkflowVisibilityRecordResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[104] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4798,38 +7217,38 @@ func (x *RemoveTaskResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RemoveTaskResponse.ProtoReflect.Descriptor instead. -func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{71} +// Deprecated: Use DeleteWorkflowVisibilityRecordResponse.ProtoReflect.Descriptor instead. +func (*DeleteWorkflowVisibilityRecordResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{104} } -type GetReplicationMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// (-- api-linter: core::0134=disabled +// +// aip.dev/not-precedent: This service does not follow the update method AIP --) +type UpdateWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v1.UpdateWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - Tokens []*v115.ReplicationToken `protobuf:"bytes,1,rep,name=tokens,proto3" json:"tokens,omitempty"` - ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetReplicationMessagesRequest) Reset() { - *x = GetReplicationMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[72] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateWorkflowExecutionRequest) Reset() { + *x = UpdateWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[105] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetReplicationMessagesRequest) String() string { +func (x *UpdateWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetReplicationMessagesRequest) ProtoMessage() {} +func (*UpdateWorkflowExecutionRequest) ProtoMessage() {} -func (x *GetReplicationMessagesRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[72] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[105] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4839,51 +7258,48 @@ func (x *GetReplicationMessagesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetReplicationMessagesRequest.ProtoReflect.Descriptor instead. -func (*GetReplicationMessagesRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{72} +// Deprecated: Use UpdateWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*UpdateWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{105} } -func (x *GetReplicationMessagesRequest) GetTokens() []*v115.ReplicationToken { +func (x *UpdateWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { - return x.Tokens + return x.NamespaceId } - return nil + return "" } -func (x *GetReplicationMessagesRequest) GetClusterName() string { +func (x *UpdateWorkflowExecutionRequest) GetRequest() *v1.UpdateWorkflowExecutionRequest { if x != nil { - return x.ClusterName + return x.Request } - return "" + return nil } -type GetReplicationMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type UpdateWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.UpdateWorkflowExecutionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields - - ShardMessages map[int32]*v115.ReplicationMessages `protobuf:"bytes,1,rep,name=shard_messages,json=shardMessages,proto3" json:"shard_messages,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } -func (x *GetReplicationMessagesResponse) Reset() { - *x = GetReplicationMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[73] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateWorkflowExecutionResponse) Reset() { + *x = UpdateWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[106] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetReplicationMessagesResponse) String() string { +func (x *UpdateWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetReplicationMessagesResponse) ProtoMessage() {} +func (*UpdateWorkflowExecutionResponse) ProtoMessage() {} -func (x *GetReplicationMessagesResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[73] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[106] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4893,44 +7309,44 @@ func (x *GetReplicationMessagesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetReplicationMessagesResponse.ProtoReflect.Descriptor instead. -func (*GetReplicationMessagesResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{73} +// Deprecated: Use UpdateWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*UpdateWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{106} } -func (x *GetReplicationMessagesResponse) GetShardMessages() map[int32]*v115.ReplicationMessages { +func (x *UpdateWorkflowExecutionResponse) GetResponse() *v1.UpdateWorkflowExecutionResponse { if x != nil { - return x.ShardMessages + return x.Response } return nil } -type GetDLQReplicationMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type StreamWorkflowReplicationMessagesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Attributes: + // + // *StreamWorkflowReplicationMessagesRequest_SyncReplicationState + Attributes isStreamWorkflowReplicationMessagesRequest_Attributes `protobuf_oneof:"attributes"` unknownFields protoimpl.UnknownFields - - TaskInfos []*v115.ReplicationTaskInfo `protobuf:"bytes,1,rep,name=task_infos,json=taskInfos,proto3" json:"task_infos,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetDLQReplicationMessagesRequest) Reset() { - *x = GetDLQReplicationMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[74] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *StreamWorkflowReplicationMessagesRequest) Reset() { + *x = StreamWorkflowReplicationMessagesRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[107] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetDLQReplicationMessagesRequest) String() string { +func (x *StreamWorkflowReplicationMessagesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetDLQReplicationMessagesRequest) ProtoMessage() {} +func (*StreamWorkflowReplicationMessagesRequest) ProtoMessage() {} -func (x *GetDLQReplicationMessagesRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[74] - if protoimpl.UnsafeEnabled && x != nil { +func (x *StreamWorkflowReplicationMessagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[107] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4940,44 +7356,64 @@ func (x *GetDLQReplicationMessagesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetDLQReplicationMessagesRequest.ProtoReflect.Descriptor instead. -func (*GetDLQReplicationMessagesRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{74} +// Deprecated: Use StreamWorkflowReplicationMessagesRequest.ProtoReflect.Descriptor instead. +func (*StreamWorkflowReplicationMessagesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{107} } -func (x *GetDLQReplicationMessagesRequest) GetTaskInfos() []*v115.ReplicationTaskInfo { +func (x *StreamWorkflowReplicationMessagesRequest) GetAttributes() isStreamWorkflowReplicationMessagesRequest_Attributes { if x != nil { - return x.TaskInfos + return x.Attributes } return nil } -type GetDLQReplicationMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *StreamWorkflowReplicationMessagesRequest) GetSyncReplicationState() *v117.SyncReplicationState { + if x != nil { + if x, ok := x.Attributes.(*StreamWorkflowReplicationMessagesRequest_SyncReplicationState); ok { + return x.SyncReplicationState + } + } + return nil +} - ReplicationTasks []*v115.ReplicationTask `protobuf:"bytes,1,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` +type isStreamWorkflowReplicationMessagesRequest_Attributes interface { + isStreamWorkflowReplicationMessagesRequest_Attributes() } -func (x *GetDLQReplicationMessagesResponse) Reset() { - *x = GetDLQReplicationMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[75] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +type StreamWorkflowReplicationMessagesRequest_SyncReplicationState struct { + SyncReplicationState *v117.SyncReplicationState `protobuf:"bytes,1,opt,name=sync_replication_state,json=syncReplicationState,proto3,oneof"` } -func (x *GetDLQReplicationMessagesResponse) String() string { +func (*StreamWorkflowReplicationMessagesRequest_SyncReplicationState) isStreamWorkflowReplicationMessagesRequest_Attributes() { +} + +type StreamWorkflowReplicationMessagesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Attributes: + // + // *StreamWorkflowReplicationMessagesResponse_Messages + Attributes isStreamWorkflowReplicationMessagesResponse_Attributes `protobuf_oneof:"attributes"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StreamWorkflowReplicationMessagesResponse) Reset() { + *x = StreamWorkflowReplicationMessagesResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[108] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StreamWorkflowReplicationMessagesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetDLQReplicationMessagesResponse) ProtoMessage() {} +func (*StreamWorkflowReplicationMessagesResponse) ProtoMessage() {} -func (x *GetDLQReplicationMessagesResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[75] - if protoimpl.UnsafeEnabled && x != nil { +func (x *StreamWorkflowReplicationMessagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[108] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -4987,45 +7423,62 @@ func (x *GetDLQReplicationMessagesResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use GetDLQReplicationMessagesResponse.ProtoReflect.Descriptor instead. -func (*GetDLQReplicationMessagesResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{75} +// Deprecated: Use StreamWorkflowReplicationMessagesResponse.ProtoReflect.Descriptor instead. +func (*StreamWorkflowReplicationMessagesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{108} +} + +func (x *StreamWorkflowReplicationMessagesResponse) GetAttributes() isStreamWorkflowReplicationMessagesResponse_Attributes { + if x != nil { + return x.Attributes + } + return nil } -func (x *GetDLQReplicationMessagesResponse) GetReplicationTasks() []*v115.ReplicationTask { +func (x *StreamWorkflowReplicationMessagesResponse) GetMessages() *v117.WorkflowReplicationMessages { if x != nil { - return x.ReplicationTasks + if x, ok := x.Attributes.(*StreamWorkflowReplicationMessagesResponse_Messages); ok { + return x.Messages + } } return nil } -type QueryWorkflowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type isStreamWorkflowReplicationMessagesResponse_Attributes interface { + isStreamWorkflowReplicationMessagesResponse_Attributes() +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v1.QueryWorkflowRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +type StreamWorkflowReplicationMessagesResponse_Messages struct { + Messages *v117.WorkflowReplicationMessages `protobuf:"bytes,1,opt,name=messages,proto3,oneof"` } -func (x *QueryWorkflowRequest) Reset() { - *x = QueryWorkflowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[76] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (*StreamWorkflowReplicationMessagesResponse_Messages) isStreamWorkflowReplicationMessagesResponse_Attributes() { } -func (x *QueryWorkflowRequest) String() string { +type PollWorkflowExecutionUpdateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v1.PollWorkflowExecutionUpdateRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollWorkflowExecutionUpdateRequest) Reset() { + *x = PollWorkflowExecutionUpdateRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[109] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollWorkflowExecutionUpdateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*QueryWorkflowRequest) ProtoMessage() {} +func (*PollWorkflowExecutionUpdateRequest) ProtoMessage() {} -func (x *QueryWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[76] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PollWorkflowExecutionUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[109] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5035,51 +7488,48 @@ func (x *QueryWorkflowRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use QueryWorkflowRequest.ProtoReflect.Descriptor instead. -func (*QueryWorkflowRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{76} +// Deprecated: Use PollWorkflowExecutionUpdateRequest.ProtoReflect.Descriptor instead. +func (*PollWorkflowExecutionUpdateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{109} } -func (x *QueryWorkflowRequest) GetNamespaceId() string { +func (x *PollWorkflowExecutionUpdateRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *QueryWorkflowRequest) GetRequest() *v1.QueryWorkflowRequest { +func (x *PollWorkflowExecutionUpdateRequest) GetRequest() *v1.PollWorkflowExecutionUpdateRequest { if x != nil { return x.Request } return nil } -type QueryWorkflowResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type PollWorkflowExecutionUpdateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.PollWorkflowExecutionUpdateResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields - - Response *v1.QueryWorkflowResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *QueryWorkflowResponse) Reset() { - *x = QueryWorkflowResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[77] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PollWorkflowExecutionUpdateResponse) Reset() { + *x = PollWorkflowExecutionUpdateResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[110] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *QueryWorkflowResponse) String() string { +func (x *PollWorkflowExecutionUpdateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*QueryWorkflowResponse) ProtoMessage() {} +func (*PollWorkflowExecutionUpdateResponse) ProtoMessage() {} -func (x *QueryWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[77] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PollWorkflowExecutionUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[110] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5089,45 +7539,42 @@ func (x *QueryWorkflowResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use QueryWorkflowResponse.ProtoReflect.Descriptor instead. -func (*QueryWorkflowResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{77} +// Deprecated: Use PollWorkflowExecutionUpdateResponse.ProtoReflect.Descriptor instead. +func (*PollWorkflowExecutionUpdateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{110} } -func (x *QueryWorkflowResponse) GetResponse() *v1.QueryWorkflowResponse { +func (x *PollWorkflowExecutionUpdateResponse) GetResponse() *v1.PollWorkflowExecutionUpdateResponse { if x != nil { return x.Response } return nil } -type ReapplyEventsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetWorkflowExecutionHistoryRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v1.GetWorkflowExecutionHistoryRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v116.ReapplyEventsRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ReapplyEventsRequest) Reset() { - *x = ReapplyEventsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[78] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetWorkflowExecutionHistoryRequest) Reset() { + *x = GetWorkflowExecutionHistoryRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[111] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ReapplyEventsRequest) String() string { +func (x *GetWorkflowExecutionHistoryRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReapplyEventsRequest) ProtoMessage() {} +func (*GetWorkflowExecutionHistoryRequest) ProtoMessage() {} -func (x *ReapplyEventsRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[78] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetWorkflowExecutionHistoryRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[111] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5137,49 +7584,49 @@ func (x *ReapplyEventsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReapplyEventsRequest.ProtoReflect.Descriptor instead. -func (*ReapplyEventsRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{78} +// Deprecated: Use GetWorkflowExecutionHistoryRequest.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionHistoryRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{111} } -func (x *ReapplyEventsRequest) GetNamespaceId() string { +func (x *GetWorkflowExecutionHistoryRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *ReapplyEventsRequest) GetRequest() *v116.ReapplyEventsRequest { +func (x *GetWorkflowExecutionHistoryRequest) GetRequest() *v1.GetWorkflowExecutionHistoryRequest { if x != nil { return x.Request } return nil } -type ReapplyEventsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetWorkflowExecutionHistoryResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.GetWorkflowExecutionHistoryResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + History *v17.History `protobuf:"bytes,2,opt,name=history,proto3" json:"history,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ReapplyEventsResponse) Reset() { - *x = ReapplyEventsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[79] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetWorkflowExecutionHistoryResponse) Reset() { + *x = GetWorkflowExecutionHistoryResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[112] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ReapplyEventsResponse) String() string { +func (x *GetWorkflowExecutionHistoryResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReapplyEventsResponse) ProtoMessage() {} +func (*GetWorkflowExecutionHistoryResponse) ProtoMessage() {} -func (x *ReapplyEventsResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[79] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetWorkflowExecutionHistoryResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[112] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5189,42 +7636,50 @@ func (x *ReapplyEventsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReapplyEventsResponse.ProtoReflect.Descriptor instead. -func (*ReapplyEventsResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{79} +// Deprecated: Use GetWorkflowExecutionHistoryResponse.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionHistoryResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{112} } -type GetDLQMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type v18.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` - ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` - InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` - MaximumPageSize int32 `protobuf:"varint,5,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,6,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +func (x *GetWorkflowExecutionHistoryResponse) GetResponse() *v1.GetWorkflowExecutionHistoryResponse { + if x != nil { + return x.Response + } + return nil } -func (x *GetDLQMessagesRequest) Reset() { - *x = GetDLQMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[80] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *GetWorkflowExecutionHistoryResponse) GetHistory() *v17.History { + if x != nil { + return x.History } + return nil } -func (x *GetDLQMessagesRequest) String() string { +// This message must be wire compatible with GetWorkflowExecutionHistoryResponse. +type GetWorkflowExecutionHistoryResponseWithRaw struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.GetWorkflowExecutionHistoryResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + History [][]byte `protobuf:"bytes,2,rep,name=history,proto3" json:"history,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetWorkflowExecutionHistoryResponseWithRaw) Reset() { + *x = GetWorkflowExecutionHistoryResponseWithRaw{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[113] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetWorkflowExecutionHistoryResponseWithRaw) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetDLQMessagesRequest) ProtoMessage() {} +func (*GetWorkflowExecutionHistoryResponseWithRaw) ProtoMessage() {} -func (x *GetDLQMessagesRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[80] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetWorkflowExecutionHistoryResponseWithRaw) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[113] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5234,82 +7689,49 @@ func (x *GetDLQMessagesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetDLQMessagesRequest.ProtoReflect.Descriptor instead. -func (*GetDLQMessagesRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{80} -} - -func (x *GetDLQMessagesRequest) GetType() v18.DeadLetterQueueType { - if x != nil { - return x.Type - } - return v18.DeadLetterQueueType(0) -} - -func (x *GetDLQMessagesRequest) GetShardId() int32 { - if x != nil { - return x.ShardId - } - return 0 -} - -func (x *GetDLQMessagesRequest) GetSourceCluster() string { - if x != nil { - return x.SourceCluster - } - return "" -} - -func (x *GetDLQMessagesRequest) GetInclusiveEndMessageId() int64 { - if x != nil { - return x.InclusiveEndMessageId - } - return 0 +// Deprecated: Use GetWorkflowExecutionHistoryResponseWithRaw.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionHistoryResponseWithRaw) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{113} } -func (x *GetDLQMessagesRequest) GetMaximumPageSize() int32 { +func (x *GetWorkflowExecutionHistoryResponseWithRaw) GetResponse() *v1.GetWorkflowExecutionHistoryResponse { if x != nil { - return x.MaximumPageSize + return x.Response } - return 0 + return nil } -func (x *GetDLQMessagesRequest) GetNextPageToken() []byte { +func (x *GetWorkflowExecutionHistoryResponseWithRaw) GetHistory() [][]byte { if x != nil { - return x.NextPageToken + return x.History } return nil } -type GetDLQMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetWorkflowExecutionHistoryReverseRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v1.GetWorkflowExecutionHistoryReverseRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - Type v18.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` - ReplicationTasks []*v115.ReplicationTask `protobuf:"bytes,2,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` - NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - ReplicationTasksInfo []*v115.ReplicationTaskInfo `protobuf:"bytes,4,rep,name=replication_tasks_info,json=replicationTasksInfo,proto3" json:"replication_tasks_info,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetDLQMessagesResponse) Reset() { - *x = GetDLQMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[81] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetWorkflowExecutionHistoryReverseRequest) Reset() { + *x = GetWorkflowExecutionHistoryReverseRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[114] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetDLQMessagesResponse) String() string { +func (x *GetWorkflowExecutionHistoryReverseRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetDLQMessagesResponse) ProtoMessage() {} +func (*GetWorkflowExecutionHistoryReverseRequest) ProtoMessage() {} -func (x *GetDLQMessagesResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[81] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetWorkflowExecutionHistoryReverseRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[114] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5319,68 +7741,96 @@ func (x *GetDLQMessagesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetDLQMessagesResponse.ProtoReflect.Descriptor instead. -func (*GetDLQMessagesResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{81} +// Deprecated: Use GetWorkflowExecutionHistoryReverseRequest.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionHistoryReverseRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{114} } -func (x *GetDLQMessagesResponse) GetType() v18.DeadLetterQueueType { +func (x *GetWorkflowExecutionHistoryReverseRequest) GetNamespaceId() string { if x != nil { - return x.Type + return x.NamespaceId } - return v18.DeadLetterQueueType(0) + return "" } -func (x *GetDLQMessagesResponse) GetReplicationTasks() []*v115.ReplicationTask { +func (x *GetWorkflowExecutionHistoryReverseRequest) GetRequest() *v1.GetWorkflowExecutionHistoryReverseRequest { if x != nil { - return x.ReplicationTasks + return x.Request + } + return nil +} + +type GetWorkflowExecutionHistoryReverseResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.GetWorkflowExecutionHistoryReverseResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetWorkflowExecutionHistoryReverseResponse) Reset() { + *x = GetWorkflowExecutionHistoryReverseResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[115] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetWorkflowExecutionHistoryReverseResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWorkflowExecutionHistoryReverseResponse) ProtoMessage() {} + +func (x *GetWorkflowExecutionHistoryReverseResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[115] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (x *GetDLQMessagesResponse) GetNextPageToken() []byte { - if x != nil { - return x.NextPageToken - } - return nil +// Deprecated: Use GetWorkflowExecutionHistoryReverseResponse.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionHistoryReverseResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{115} } -func (x *GetDLQMessagesResponse) GetReplicationTasksInfo() []*v115.ReplicationTaskInfo { +func (x *GetWorkflowExecutionHistoryReverseResponse) GetResponse() *v1.GetWorkflowExecutionHistoryReverseResponse { if x != nil { - return x.ReplicationTasksInfo + return x.Response } return nil } -type PurgeDLQMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// * +// StartEventId defines the beginning of the event to fetch. The first event is exclusive. +// EndEventId and EndEventVersion defines the end of the event to fetch. The end event is exclusive. +type GetWorkflowExecutionRawHistoryV2Request struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v118.GetWorkflowExecutionRawHistoryV2Request `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - Type v18.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` - ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` - InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *PurgeDLQMessagesRequest) Reset() { - *x = PurgeDLQMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[82] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetWorkflowExecutionRawHistoryV2Request) Reset() { + *x = GetWorkflowExecutionRawHistoryV2Request{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[116] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *PurgeDLQMessagesRequest) String() string { +func (x *GetWorkflowExecutionRawHistoryV2Request) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PurgeDLQMessagesRequest) ProtoMessage() {} +func (*GetWorkflowExecutionRawHistoryV2Request) ProtoMessage() {} -func (x *PurgeDLQMessagesRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[82] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetWorkflowExecutionRawHistoryV2Request) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[116] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5390,63 +7840,48 @@ func (x *PurgeDLQMessagesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PurgeDLQMessagesRequest.ProtoReflect.Descriptor instead. -func (*PurgeDLQMessagesRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{82} -} - -func (x *PurgeDLQMessagesRequest) GetType() v18.DeadLetterQueueType { - if x != nil { - return x.Type - } - return v18.DeadLetterQueueType(0) -} - -func (x *PurgeDLQMessagesRequest) GetShardId() int32 { - if x != nil { - return x.ShardId - } - return 0 +// Deprecated: Use GetWorkflowExecutionRawHistoryV2Request.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionRawHistoryV2Request) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{116} } -func (x *PurgeDLQMessagesRequest) GetSourceCluster() string { +func (x *GetWorkflowExecutionRawHistoryV2Request) GetNamespaceId() string { if x != nil { - return x.SourceCluster + return x.NamespaceId } return "" } -func (x *PurgeDLQMessagesRequest) GetInclusiveEndMessageId() int64 { +func (x *GetWorkflowExecutionRawHistoryV2Request) GetRequest() *v118.GetWorkflowExecutionRawHistoryV2Request { if x != nil { - return x.InclusiveEndMessageId + return x.Request } - return 0 + return nil } -type PurgeDLQMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetWorkflowExecutionRawHistoryV2Response struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v118.GetWorkflowExecutionRawHistoryV2Response `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *PurgeDLQMessagesResponse) Reset() { - *x = PurgeDLQMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[83] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetWorkflowExecutionRawHistoryV2Response) Reset() { + *x = GetWorkflowExecutionRawHistoryV2Response{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[117] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *PurgeDLQMessagesResponse) String() string { +func (x *GetWorkflowExecutionRawHistoryV2Response) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PurgeDLQMessagesResponse) ProtoMessage() {} +func (*GetWorkflowExecutionRawHistoryV2Response) ProtoMessage() {} -func (x *PurgeDLQMessagesResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[83] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetWorkflowExecutionRawHistoryV2Response) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[117] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5456,42 +7891,42 @@ func (x *PurgeDLQMessagesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PurgeDLQMessagesResponse.ProtoReflect.Descriptor instead. -func (*PurgeDLQMessagesResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{83} +// Deprecated: Use GetWorkflowExecutionRawHistoryV2Response.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionRawHistoryV2Response) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{117} } -type MergeDLQMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *GetWorkflowExecutionRawHistoryV2Response) GetResponse() *v118.GetWorkflowExecutionRawHistoryV2Response { + if x != nil { + return x.Response + } + return nil +} - Type v18.DeadLetterQueueType `protobuf:"varint,1,opt,name=type,proto3,enum=temporal.server.api.enums.v1.DeadLetterQueueType" json:"type,omitempty"` - ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - SourceCluster string `protobuf:"bytes,3,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` - InclusiveEndMessageId int64 `protobuf:"varint,4,opt,name=inclusive_end_message_id,json=inclusiveEndMessageId,proto3" json:"inclusive_end_message_id,omitempty"` - MaximumPageSize int32 `protobuf:"varint,5,opt,name=maximum_page_size,json=maximumPageSize,proto3" json:"maximum_page_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,6,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +type GetWorkflowExecutionRawHistoryRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v118.GetWorkflowExecutionRawHistoryRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *MergeDLQMessagesRequest) Reset() { - *x = MergeDLQMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[84] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetWorkflowExecutionRawHistoryRequest) Reset() { + *x = GetWorkflowExecutionRawHistoryRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[118] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *MergeDLQMessagesRequest) String() string { +func (x *GetWorkflowExecutionRawHistoryRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*MergeDLQMessagesRequest) ProtoMessage() {} +func (*GetWorkflowExecutionRawHistoryRequest) ProtoMessage() {} -func (x *MergeDLQMessagesRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[84] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetWorkflowExecutionRawHistoryRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[118] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5501,79 +7936,48 @@ func (x *MergeDLQMessagesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use MergeDLQMessagesRequest.ProtoReflect.Descriptor instead. -func (*MergeDLQMessagesRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{84} -} - -func (x *MergeDLQMessagesRequest) GetType() v18.DeadLetterQueueType { - if x != nil { - return x.Type - } - return v18.DeadLetterQueueType(0) -} - -func (x *MergeDLQMessagesRequest) GetShardId() int32 { - if x != nil { - return x.ShardId - } - return 0 +// Deprecated: Use GetWorkflowExecutionRawHistoryRequest.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionRawHistoryRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{118} } -func (x *MergeDLQMessagesRequest) GetSourceCluster() string { +func (x *GetWorkflowExecutionRawHistoryRequest) GetNamespaceId() string { if x != nil { - return x.SourceCluster + return x.NamespaceId } return "" } -func (x *MergeDLQMessagesRequest) GetInclusiveEndMessageId() int64 { - if x != nil { - return x.InclusiveEndMessageId - } - return 0 -} - -func (x *MergeDLQMessagesRequest) GetMaximumPageSize() int32 { - if x != nil { - return x.MaximumPageSize - } - return 0 -} - -func (x *MergeDLQMessagesRequest) GetNextPageToken() []byte { +func (x *GetWorkflowExecutionRawHistoryRequest) GetRequest() *v118.GetWorkflowExecutionRawHistoryRequest { if x != nil { - return x.NextPageToken + return x.Request } return nil } -type MergeDLQMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetWorkflowExecutionRawHistoryResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v118.GetWorkflowExecutionRawHistoryResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields - - NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *MergeDLQMessagesResponse) Reset() { - *x = MergeDLQMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[85] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetWorkflowExecutionRawHistoryResponse) Reset() { + *x = GetWorkflowExecutionRawHistoryResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[119] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *MergeDLQMessagesResponse) String() string { +func (x *GetWorkflowExecutionRawHistoryResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*MergeDLQMessagesResponse) ProtoMessage() {} +func (*GetWorkflowExecutionRawHistoryResponse) ProtoMessage() {} -func (x *MergeDLQMessagesResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[85] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetWorkflowExecutionRawHistoryResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[119] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5583,45 +7987,44 @@ func (x *MergeDLQMessagesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use MergeDLQMessagesResponse.ProtoReflect.Descriptor instead. -func (*MergeDLQMessagesResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{85} +// Deprecated: Use GetWorkflowExecutionRawHistoryResponse.ProtoReflect.Descriptor instead. +func (*GetWorkflowExecutionRawHistoryResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{119} } -func (x *MergeDLQMessagesResponse) GetNextPageToken() []byte { +func (x *GetWorkflowExecutionRawHistoryResponse) GetResponse() *v118.GetWorkflowExecutionRawHistoryResponse { if x != nil { - return x.NextPageToken + return x.Response } return nil } -type RefreshWorkflowTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ForceDeleteWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,3,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + Request *v118.DeleteWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v116.RefreshWorkflowTasksRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *RefreshWorkflowTasksRequest) Reset() { - *x = RefreshWorkflowTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[86] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ForceDeleteWorkflowExecutionRequest) Reset() { + *x = ForceDeleteWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[120] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RefreshWorkflowTasksRequest) String() string { +func (x *ForceDeleteWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshWorkflowTasksRequest) ProtoMessage() {} +func (*ForceDeleteWorkflowExecutionRequest) ProtoMessage() {} -func (x *RefreshWorkflowTasksRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[86] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ForceDeleteWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[120] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5631,49 +8034,55 @@ func (x *RefreshWorkflowTasksRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshWorkflowTasksRequest.ProtoReflect.Descriptor instead. -func (*RefreshWorkflowTasksRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{86} +// Deprecated: Use ForceDeleteWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*ForceDeleteWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{120} } -func (x *RefreshWorkflowTasksRequest) GetNamespaceId() string { +func (x *ForceDeleteWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RefreshWorkflowTasksRequest) GetRequest() *v116.RefreshWorkflowTasksRequest { +func (x *ForceDeleteWorkflowExecutionRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +func (x *ForceDeleteWorkflowExecutionRequest) GetRequest() *v118.DeleteWorkflowExecutionRequest { if x != nil { return x.Request } return nil } -type RefreshWorkflowTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ForceDeleteWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v118.DeleteWorkflowExecutionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RefreshWorkflowTasksResponse) Reset() { - *x = RefreshWorkflowTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[87] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ForceDeleteWorkflowExecutionResponse) Reset() { + *x = ForceDeleteWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[121] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RefreshWorkflowTasksResponse) String() string { +func (x *ForceDeleteWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RefreshWorkflowTasksResponse) ProtoMessage() {} +func (*ForceDeleteWorkflowExecutionResponse) ProtoMessage() {} -func (x *RefreshWorkflowTasksResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[87] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ForceDeleteWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[121] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5683,38 +8092,44 @@ func (x *RefreshWorkflowTasksResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RefreshWorkflowTasksResponse.ProtoReflect.Descriptor instead. -func (*RefreshWorkflowTasksResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{87} +// Deprecated: Use ForceDeleteWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*ForceDeleteWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{121} +} + +func (x *ForceDeleteWorkflowExecutionResponse) GetResponse() *v118.DeleteWorkflowExecutionResponse { + if x != nil { + return x.Response + } + return nil } -type GenerateLastHistoryReplicationTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetDLQTasksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + DlqKey *v119.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` + // page_size must be positive. Up to this many tasks will be returned. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GenerateLastHistoryReplicationTasksRequest) Reset() { - *x = GenerateLastHistoryReplicationTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[88] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetDLQTasksRequest) Reset() { + *x = GetDLQTasksRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[122] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GenerateLastHistoryReplicationTasksRequest) String() string { +func (x *GetDLQTasksRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GenerateLastHistoryReplicationTasksRequest) ProtoMessage() {} +func (*GetDLQTasksRequest) ProtoMessage() {} -func (x *GenerateLastHistoryReplicationTasksRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[88] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetDLQTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[122] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5724,52 +8139,59 @@ func (x *GenerateLastHistoryReplicationTasksRequest) ProtoReflect() protoreflect return mi.MessageOf(x) } -// Deprecated: Use GenerateLastHistoryReplicationTasksRequest.ProtoReflect.Descriptor instead. -func (*GenerateLastHistoryReplicationTasksRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{88} +// Deprecated: Use GetDLQTasksRequest.ProtoReflect.Descriptor instead. +func (*GetDLQTasksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{122} } -func (x *GenerateLastHistoryReplicationTasksRequest) GetNamespaceId() string { +func (x *GetDLQTasksRequest) GetDlqKey() *v119.HistoryDLQKey { if x != nil { - return x.NamespaceId + return x.DlqKey } - return "" + return nil } -func (x *GenerateLastHistoryReplicationTasksRequest) GetExecution() *v14.WorkflowExecution { +func (x *GetDLQTasksRequest) GetPageSize() int32 { if x != nil { - return x.Execution + return x.PageSize + } + return 0 +} + +func (x *GetDLQTasksRequest) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken } return nil } -type GenerateLastHistoryReplicationTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type GetDLQTasksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + DlqTasks []*v119.HistoryDLQTask `protobuf:"bytes,1,rep,name=dlq_tasks,json=dlqTasks,proto3" json:"dlq_tasks,omitempty"` + // next_page_token is empty if there are no more results. However, the converse is not true. If there are no more + // results, this field may still be non-empty. This is to avoid having to do a count query to determine whether + // there are more results. + NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields - - StateTransitionCount int64 `protobuf:"varint,1,opt,name=state_transition_count,json=stateTransitionCount,proto3" json:"state_transition_count,omitempty"` - HistoryLength int64 `protobuf:"varint,2,opt,name=history_length,json=historyLength,proto3" json:"history_length,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GenerateLastHistoryReplicationTasksResponse) Reset() { - *x = GenerateLastHistoryReplicationTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[89] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *GetDLQTasksResponse) Reset() { + *x = GetDLQTasksResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[123] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GenerateLastHistoryReplicationTasksResponse) String() string { +func (x *GetDLQTasksResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GenerateLastHistoryReplicationTasksResponse) ProtoMessage() {} +func (*GetDLQTasksResponse) ProtoMessage() {} -func (x *GenerateLastHistoryReplicationTasksResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[89] - if protoimpl.UnsafeEnabled && x != nil { +func (x *GetDLQTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[123] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5779,52 +8201,49 @@ func (x *GenerateLastHistoryReplicationTasksResponse) ProtoReflect() protoreflec return mi.MessageOf(x) } -// Deprecated: Use GenerateLastHistoryReplicationTasksResponse.ProtoReflect.Descriptor instead. -func (*GenerateLastHistoryReplicationTasksResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{89} +// Deprecated: Use GetDLQTasksResponse.ProtoReflect.Descriptor instead. +func (*GetDLQTasksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{123} } -func (x *GenerateLastHistoryReplicationTasksResponse) GetStateTransitionCount() int64 { +func (x *GetDLQTasksResponse) GetDlqTasks() []*v119.HistoryDLQTask { if x != nil { - return x.StateTransitionCount + return x.DlqTasks } - return 0 + return nil } -func (x *GenerateLastHistoryReplicationTasksResponse) GetHistoryLength() int64 { +func (x *GetDLQTasksResponse) GetNextPageToken() []byte { if x != nil { - return x.HistoryLength + return x.NextPageToken } - return 0 + return nil } -type GetReplicationStatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Remote cluster names to query for. If omit, will return for all remote clusters. - RemoteClusters []string `protobuf:"bytes,1,rep,name=remote_clusters,json=remoteClusters,proto3" json:"remote_clusters,omitempty"` +type DeleteDLQTasksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + DlqKey *v119.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` + InclusiveMaxTaskMetadata *v119.HistoryDLQTaskMetadata `protobuf:"bytes,2,opt,name=inclusive_max_task_metadata,json=inclusiveMaxTaskMetadata,proto3" json:"inclusive_max_task_metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetReplicationStatusRequest) Reset() { - *x = GetReplicationStatusRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[90] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeleteDLQTasksRequest) Reset() { + *x = DeleteDLQTasksRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[124] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetReplicationStatusRequest) String() string { +func (x *DeleteDLQTasksRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetReplicationStatusRequest) ProtoMessage() {} +func (*DeleteDLQTasksRequest) ProtoMessage() {} -func (x *GetReplicationStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[90] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeleteDLQTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[124] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5834,44 +8253,49 @@ func (x *GetReplicationStatusRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetReplicationStatusRequest.ProtoReflect.Descriptor instead. -func (*GetReplicationStatusRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{90} +// Deprecated: Use DeleteDLQTasksRequest.ProtoReflect.Descriptor instead. +func (*DeleteDLQTasksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{124} } -func (x *GetReplicationStatusRequest) GetRemoteClusters() []string { +func (x *DeleteDLQTasksRequest) GetDlqKey() *v119.HistoryDLQKey { if x != nil { - return x.RemoteClusters + return x.DlqKey } return nil } -type GetReplicationStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *DeleteDLQTasksRequest) GetInclusiveMaxTaskMetadata() *v119.HistoryDLQTaskMetadata { + if x != nil { + return x.InclusiveMaxTaskMetadata + } + return nil +} - Shards []*ShardReplicationStatus `protobuf:"bytes,1,rep,name=shards,proto3" json:"shards,omitempty"` +type DeleteDLQTasksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // messages_deleted is the total number of messages deleted in DeleteDLQTasks operation. + MessagesDeleted int64 `protobuf:"varint,1,opt,name=messages_deleted,json=messagesDeleted,proto3" json:"messages_deleted,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetReplicationStatusResponse) Reset() { - *x = GetReplicationStatusResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[91] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeleteDLQTasksResponse) Reset() { + *x = DeleteDLQTasksResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[125] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetReplicationStatusResponse) String() string { +func (x *DeleteDLQTasksResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetReplicationStatusResponse) ProtoMessage() {} +func (*DeleteDLQTasksResponse) ProtoMessage() {} -func (x *GetReplicationStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[91] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeleteDLQTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[125] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5881,51 +8305,43 @@ func (x *GetReplicationStatusResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetReplicationStatusResponse.ProtoReflect.Descriptor instead. -func (*GetReplicationStatusResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{91} +// Deprecated: Use DeleteDLQTasksResponse.ProtoReflect.Descriptor instead. +func (*DeleteDLQTasksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{125} } -func (x *GetReplicationStatusResponse) GetShards() []*ShardReplicationStatus { +func (x *DeleteDLQTasksResponse) GetMessagesDeleted() int64 { if x != nil { - return x.Shards + return x.MessagesDeleted } - return nil + return 0 } -type ShardReplicationStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ListQueuesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + QueueType int32 `protobuf:"varint,1,opt,name=queue_type,json=queueType,proto3" json:"queue_type,omitempty"` + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - // Max replication task id of current cluster - MaxReplicationTaskId int64 `protobuf:"varint,2,opt,name=max_replication_task_id,json=maxReplicationTaskId,proto3" json:"max_replication_task_id,omitempty"` - // Local time on this shard - ShardLocalTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=shard_local_time,json=shardLocalTime,proto3" json:"shard_local_time,omitempty"` - RemoteClusters map[string]*ShardReplicationStatusPerCluster `protobuf:"bytes,4,rep,name=remote_clusters,json=remoteClusters,proto3" json:"remote_clusters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - HandoverNamespaces map[string]*HandoverNamespaceInfo `protobuf:"bytes,5,rep,name=handover_namespaces,json=handoverNamespaces,proto3" json:"handover_namespaces,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - MaxReplicationTaskVisibilityTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=max_replication_task_visibility_time,json=maxReplicationTaskVisibilityTime,proto3" json:"max_replication_task_visibility_time,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ShardReplicationStatus) Reset() { - *x = ShardReplicationStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[92] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ListQueuesRequest) Reset() { + *x = ListQueuesRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[126] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ShardReplicationStatus) String() string { +func (x *ListQueuesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationStatus) ProtoMessage() {} +func (*ListQueuesRequest) ProtoMessage() {} -func (x *ShardReplicationStatus) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[92] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ListQueuesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[126] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -5935,80 +8351,112 @@ func (x *ShardReplicationStatus) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationStatus.ProtoReflect.Descriptor instead. -func (*ShardReplicationStatus) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{92} +// Deprecated: Use ListQueuesRequest.ProtoReflect.Descriptor instead. +func (*ListQueuesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{126} } -func (x *ShardReplicationStatus) GetShardId() int32 { +func (x *ListQueuesRequest) GetQueueType() int32 { if x != nil { - return x.ShardId + return x.QueueType } return 0 } -func (x *ShardReplicationStatus) GetMaxReplicationTaskId() int64 { +func (x *ListQueuesRequest) GetPageSize() int32 { if x != nil { - return x.MaxReplicationTaskId + return x.PageSize } return 0 } -func (x *ShardReplicationStatus) GetShardLocalTime() *timestamppb.Timestamp { +func (x *ListQueuesRequest) GetNextPageToken() []byte { if x != nil { - return x.ShardLocalTime + return x.NextPageToken } return nil } -func (x *ShardReplicationStatus) GetRemoteClusters() map[string]*ShardReplicationStatusPerCluster { +type ListQueuesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Queues []*ListQueuesResponse_QueueInfo `protobuf:"bytes,1,rep,name=queues,proto3" json:"queues,omitempty"` + NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListQueuesResponse) Reset() { + *x = ListQueuesResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[127] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListQueuesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListQueuesResponse) ProtoMessage() {} + +func (x *ListQueuesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[127] if x != nil { - return x.RemoteClusters + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (x *ShardReplicationStatus) GetHandoverNamespaces() map[string]*HandoverNamespaceInfo { +// Deprecated: Use ListQueuesResponse.ProtoReflect.Descriptor instead. +func (*ListQueuesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{127} +} + +func (x *ListQueuesResponse) GetQueues() []*ListQueuesResponse_QueueInfo { if x != nil { - return x.HandoverNamespaces + return x.Queues } return nil } -func (x *ShardReplicationStatus) GetMaxReplicationTaskVisibilityTime() *timestamppb.Timestamp { +func (x *ListQueuesResponse) GetNextPageToken() []byte { if x != nil { - return x.MaxReplicationTaskVisibilityTime + return x.NextPageToken } return nil } -type HandoverNamespaceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // max replication task id when namespace transition to Handover state - HandoverReplicationTaskId int64 `protobuf:"varint,1,opt,name=handover_replication_task_id,json=handoverReplicationTaskId,proto3" json:"handover_replication_task_id,omitempty"` +type AddTasksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Even though we can obtain the shard ID from the tasks, we still need the shard_id in the request for routing. If + // not, it would be possible to include tasks for shards that belong to different hosts, and we'd need to fan-out the + // request, which would be more complicated. + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + // A list of tasks to enqueue or re-enqueue. + Tasks []*AddTasksRequest_Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *HandoverNamespaceInfo) Reset() { - *x = HandoverNamespaceInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[93] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *AddTasksRequest) Reset() { + *x = AddTasksRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[128] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *HandoverNamespaceInfo) String() string { +func (x *AddTasksRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*HandoverNamespaceInfo) ProtoMessage() {} +func (*AddTasksRequest) ProtoMessage() {} -func (x *HandoverNamespaceInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[93] - if protoimpl.UnsafeEnabled && x != nil { +func (x *AddTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[128] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6018,47 +8466,47 @@ func (x *HandoverNamespaceInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use HandoverNamespaceInfo.ProtoReflect.Descriptor instead. -func (*HandoverNamespaceInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{93} +// Deprecated: Use AddTasksRequest.ProtoReflect.Descriptor instead. +func (*AddTasksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{128} } -func (x *HandoverNamespaceInfo) GetHandoverReplicationTaskId() int64 { +func (x *AddTasksRequest) GetShardId() int32 { if x != nil { - return x.HandoverReplicationTaskId + return x.ShardId } return 0 } -type ShardReplicationStatusPerCluster struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *AddTasksRequest) GetTasks() []*AddTasksRequest_Task { + if x != nil { + return x.Tasks + } + return nil +} - // Acked replication task id - AckedTaskId int64 `protobuf:"varint,1,opt,name=acked_task_id,json=ackedTaskId,proto3" json:"acked_task_id,omitempty"` - // Acked replication task creation time - AckedTaskVisibilityTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=acked_task_visibility_time,json=ackedTaskVisibilityTime,proto3" json:"acked_task_visibility_time,omitempty"` +type AddTasksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ShardReplicationStatusPerCluster) Reset() { - *x = ShardReplicationStatusPerCluster{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[94] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *AddTasksResponse) Reset() { + *x = AddTasksResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[129] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ShardReplicationStatusPerCluster) String() string { +func (x *AddTasksResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ShardReplicationStatusPerCluster) ProtoMessage() {} +func (*AddTasksResponse) ProtoMessage() {} -func (x *ShardReplicationStatusPerCluster) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[94] - if protoimpl.UnsafeEnabled && x != nil { +func (x *AddTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[129] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6068,52 +8516,34 @@ func (x *ShardReplicationStatusPerCluster) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ShardReplicationStatusPerCluster.ProtoReflect.Descriptor instead. -func (*ShardReplicationStatusPerCluster) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{94} -} - -func (x *ShardReplicationStatusPerCluster) GetAckedTaskId() int64 { - if x != nil { - return x.AckedTaskId - } - return 0 -} - -func (x *ShardReplicationStatusPerCluster) GetAckedTaskVisibilityTime() *timestamppb.Timestamp { - if x != nil { - return x.AckedTaskVisibilityTime - } - return nil +// Deprecated: Use AddTasksResponse.ProtoReflect.Descriptor instead. +func (*AddTasksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{129} } -type RebuildMutableStateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ListTasksRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Request *v118.ListHistoryTasksRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *RebuildMutableStateRequest) Reset() { - *x = RebuildMutableStateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[95] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ListTasksRequest) Reset() { + *x = ListTasksRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[130] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RebuildMutableStateRequest) String() string { +func (x *ListTasksRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildMutableStateRequest) ProtoMessage() {} +func (*ListTasksRequest) ProtoMessage() {} -func (x *RebuildMutableStateRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[95] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ListTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[130] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6123,49 +8553,41 @@ func (x *RebuildMutableStateRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildMutableStateRequest.ProtoReflect.Descriptor instead. -func (*RebuildMutableStateRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{95} -} - -func (x *RebuildMutableStateRequest) GetNamespaceId() string { - if x != nil { - return x.NamespaceId - } - return "" +// Deprecated: Use ListTasksRequest.ProtoReflect.Descriptor instead. +func (*ListTasksRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{130} } -func (x *RebuildMutableStateRequest) GetExecution() *v14.WorkflowExecution { +func (x *ListTasksRequest) GetRequest() *v118.ListHistoryTasksRequest { if x != nil { - return x.Execution + return x.Request } return nil } -type RebuildMutableStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ListTasksResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v118.ListHistoryTasksResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RebuildMutableStateResponse) Reset() { - *x = RebuildMutableStateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[96] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ListTasksResponse) Reset() { + *x = ListTasksResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[131] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RebuildMutableStateResponse) String() string { +func (x *ListTasksResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RebuildMutableStateResponse) ProtoMessage() {} +func (*ListTasksResponse) ProtoMessage() {} -func (x *RebuildMutableStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[96] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ListTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[131] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6175,41 +8597,56 @@ func (x *RebuildMutableStateResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RebuildMutableStateResponse.ProtoReflect.Descriptor instead. -func (*RebuildMutableStateResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{96} +// Deprecated: Use ListTasksResponse.ProtoReflect.Descriptor instead. +func (*ListTasksResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{131} } -type ImportWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ListTasksResponse) GetResponse() *v118.ListHistoryTasksResponse { + if x != nil { + return x.Response + } + return nil +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - HistoryBatches []*v14.DataBlob `protobuf:"bytes,3,rep,name=history_batches,json=historyBatches,proto3" json:"history_batches,omitempty"` - VersionHistory *v16.VersionHistory `protobuf:"bytes,4,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` - Token []byte `protobuf:"bytes,5,opt,name=token,proto3" json:"token,omitempty"` +type CompleteNexusOperationChasmRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Completion token - holds information for locating an entity and the corresponding component. + Completion *v120.NexusOperationCompletion `protobuf:"bytes,1,opt,name=completion,proto3" json:"completion,omitempty"` + // Types that are valid to be assigned to Outcome: + // + // *CompleteNexusOperationChasmRequest_Success + // *CompleteNexusOperationChasmRequest_Failure + Outcome isCompleteNexusOperationChasmRequest_Outcome `protobuf_oneof:"outcome"` + // Time when the operation was closed. + CloseTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"` + // Links from the Nexus completion callback (e.g. references to the handler workflow). + Links []*v14.Link `protobuf:"bytes,5,rep,name=links,proto3" json:"links,omitempty"` + // Async operation token from the callback request, used to synthesize a started event when + // completion arrives before the worker's start response. + OperationToken string `protobuf:"bytes,6,opt,name=operation_token,json=operationToken,proto3" json:"operation_token,omitempty"` + // Time when the operation was started. Used when completion is received before the started response. + StartTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ImportWorkflowExecutionRequest) Reset() { - *x = ImportWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[97] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CompleteNexusOperationChasmRequest) Reset() { + *x = CompleteNexusOperationChasmRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[132] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ImportWorkflowExecutionRequest) String() string { +func (x *CompleteNexusOperationChasmRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ImportWorkflowExecutionRequest) ProtoMessage() {} +func (*CompleteNexusOperationChasmRequest) ProtoMessage() {} -func (x *ImportWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[97] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CompleteNexusOperationChasmRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[132] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6219,73 +8656,111 @@ func (x *ImportWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ImportWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*ImportWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{97} +// Deprecated: Use CompleteNexusOperationChasmRequest.ProtoReflect.Descriptor instead. +func (*CompleteNexusOperationChasmRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{132} } -func (x *ImportWorkflowExecutionRequest) GetNamespaceId() string { +func (x *CompleteNexusOperationChasmRequest) GetCompletion() *v120.NexusOperationCompletion { if x != nil { - return x.NamespaceId + return x.Completion } - return "" + return nil } -func (x *ImportWorkflowExecutionRequest) GetExecution() *v14.WorkflowExecution { +func (x *CompleteNexusOperationChasmRequest) GetOutcome() isCompleteNexusOperationChasmRequest_Outcome { if x != nil { - return x.Execution + return x.Outcome } return nil } -func (x *ImportWorkflowExecutionRequest) GetHistoryBatches() []*v14.DataBlob { +func (x *CompleteNexusOperationChasmRequest) GetSuccess() *v14.Payload { if x != nil { - return x.HistoryBatches + if x, ok := x.Outcome.(*CompleteNexusOperationChasmRequest_Success); ok { + return x.Success + } } return nil } -func (x *ImportWorkflowExecutionRequest) GetVersionHistory() *v16.VersionHistory { +func (x *CompleteNexusOperationChasmRequest) GetFailure() *v13.Failure { if x != nil { - return x.VersionHistory + if x, ok := x.Outcome.(*CompleteNexusOperationChasmRequest_Failure); ok { + return x.Failure + } } return nil } -func (x *ImportWorkflowExecutionRequest) GetToken() []byte { +func (x *CompleteNexusOperationChasmRequest) GetCloseTime() *timestamppb.Timestamp { if x != nil { - return x.Token + return x.CloseTime } return nil } -type ImportWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *CompleteNexusOperationChasmRequest) GetLinks() []*v14.Link { + if x != nil { + return x.Links + } + return nil +} - Token []byte `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` - EventsApplied bool `protobuf:"varint,2,opt,name=events_applied,json=eventsApplied,proto3" json:"events_applied,omitempty"` +func (x *CompleteNexusOperationChasmRequest) GetOperationToken() string { + if x != nil { + return x.OperationToken + } + return "" } -func (x *ImportWorkflowExecutionResponse) Reset() { - *x = ImportWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[98] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *CompleteNexusOperationChasmRequest) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime } + return nil } -func (x *ImportWorkflowExecutionResponse) String() string { +type isCompleteNexusOperationChasmRequest_Outcome interface { + isCompleteNexusOperationChasmRequest_Outcome() +} + +type CompleteNexusOperationChasmRequest_Success struct { + // Result of a successful operation, only set if state == successful. + Success *v14.Payload `protobuf:"bytes,2,opt,name=success,proto3,oneof"` +} + +type CompleteNexusOperationChasmRequest_Failure struct { + // Operation failure, only set if state != successful. + Failure *v13.Failure `protobuf:"bytes,3,opt,name=failure,proto3,oneof"` +} + +func (*CompleteNexusOperationChasmRequest_Success) isCompleteNexusOperationChasmRequest_Outcome() {} + +func (*CompleteNexusOperationChasmRequest_Failure) isCompleteNexusOperationChasmRequest_Outcome() {} + +type CompleteNexusOperationChasmResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CompleteNexusOperationChasmResponse) Reset() { + *x = CompleteNexusOperationChasmResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[133] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CompleteNexusOperationChasmResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ImportWorkflowExecutionResponse) ProtoMessage() {} +func (*CompleteNexusOperationChasmResponse) ProtoMessage() {} -func (x *ImportWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[98] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CompleteNexusOperationChasmResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[133] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6295,120 +8770,162 @@ func (x *ImportWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ImportWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*ImportWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{98} +// Deprecated: Use CompleteNexusOperationChasmResponse.ProtoReflect.Descriptor instead. +func (*CompleteNexusOperationChasmResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{133} } -func (x *ImportWorkflowExecutionResponse) GetToken() []byte { - if x != nil { - return x.Token - } - return nil +type CompleteNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Completion token - holds information for locating a run and the corresponding operation state machine. + Completion *v120.NexusOperationCompletion `protobuf:"bytes,1,opt,name=completion,proto3" json:"completion,omitempty"` + // Operation state - may only be successful / failed / canceled. + State string `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + // Types that are valid to be assigned to Outcome: + // + // *CompleteNexusOperationRequest_Success + // *CompleteNexusOperationRequest_Failure + Outcome isCompleteNexusOperationRequest_Outcome `protobuf_oneof:"outcome"` + // Operation token - used when the completion is received before the started response. + OperationToken string `protobuf:"bytes,5,opt,name=operation_token,json=operationToken,proto3" json:"operation_token,omitempty"` + // Time the operation was started. Used when completion is received before the started response. + StartTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // Links to be attached to a fabricated start event if completion is received before started response. + Links []*v14.Link `protobuf:"bytes,7,rep,name=links,proto3" json:"links,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ImportWorkflowExecutionResponse) GetEventsApplied() bool { +func (x *CompleteNexusOperationRequest) Reset() { + *x = CompleteNexusOperationRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[134] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CompleteNexusOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompleteNexusOperationRequest) ProtoMessage() {} + +func (x *CompleteNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[134] if x != nil { - return x.EventsApplied + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return false + return mi.MessageOf(x) } -type DeleteWorkflowVisibilityRecordRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +// Deprecated: Use CompleteNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*CompleteNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{134} +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - WorkflowStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=workflow_start_time,json=workflowStartTime,proto3" json:"workflow_start_time,omitempty"` - WorkflowCloseTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=workflow_close_time,json=workflowCloseTime,proto3" json:"workflow_close_time,omitempty"` +func (x *CompleteNexusOperationRequest) GetCompletion() *v120.NexusOperationCompletion { + if x != nil { + return x.Completion + } + return nil } -func (x *DeleteWorkflowVisibilityRecordRequest) Reset() { - *x = DeleteWorkflowVisibilityRecordRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[99] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *CompleteNexusOperationRequest) GetState() string { + if x != nil { + return x.State } + return "" } -func (x *DeleteWorkflowVisibilityRecordRequest) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *CompleteNexusOperationRequest) GetOutcome() isCompleteNexusOperationRequest_Outcome { + if x != nil { + return x.Outcome + } + return nil } -func (*DeleteWorkflowVisibilityRecordRequest) ProtoMessage() {} - -func (x *DeleteWorkflowVisibilityRecordRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[99] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) +func (x *CompleteNexusOperationRequest) GetSuccess() *v14.Payload { + if x != nil { + if x, ok := x.Outcome.(*CompleteNexusOperationRequest_Success); ok { + return x.Success } - return ms } - return mi.MessageOf(x) + return nil } -// Deprecated: Use DeleteWorkflowVisibilityRecordRequest.ProtoReflect.Descriptor instead. -func (*DeleteWorkflowVisibilityRecordRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{99} +func (x *CompleteNexusOperationRequest) GetFailure() *v121.Failure { + if x != nil { + if x, ok := x.Outcome.(*CompleteNexusOperationRequest_Failure); ok { + return x.Failure + } + } + return nil } -func (x *DeleteWorkflowVisibilityRecordRequest) GetNamespaceId() string { +func (x *CompleteNexusOperationRequest) GetOperationToken() string { if x != nil { - return x.NamespaceId + return x.OperationToken } return "" } -func (x *DeleteWorkflowVisibilityRecordRequest) GetExecution() *v14.WorkflowExecution { +func (x *CompleteNexusOperationRequest) GetStartTime() *timestamppb.Timestamp { if x != nil { - return x.Execution + return x.StartTime } return nil } -func (x *DeleteWorkflowVisibilityRecordRequest) GetWorkflowStartTime() *timestamppb.Timestamp { +func (x *CompleteNexusOperationRequest) GetLinks() []*v14.Link { if x != nil { - return x.WorkflowStartTime + return x.Links } return nil } -func (x *DeleteWorkflowVisibilityRecordRequest) GetWorkflowCloseTime() *timestamppb.Timestamp { - if x != nil { - return x.WorkflowCloseTime - } - return nil +type isCompleteNexusOperationRequest_Outcome interface { + isCompleteNexusOperationRequest_Outcome() } -type DeleteWorkflowVisibilityRecordResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type CompleteNexusOperationRequest_Success struct { + // Result of a successful operation, only set if state == successful. + Success *v14.Payload `protobuf:"bytes,3,opt,name=success,proto3,oneof"` +} + +type CompleteNexusOperationRequest_Failure struct { + // Operation failure, only set if state != successful. + Failure *v121.Failure `protobuf:"bytes,4,opt,name=failure,proto3,oneof"` +} + +func (*CompleteNexusOperationRequest_Success) isCompleteNexusOperationRequest_Outcome() {} + +func (*CompleteNexusOperationRequest_Failure) isCompleteNexusOperationRequest_Outcome() {} + +type CompleteNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DeleteWorkflowVisibilityRecordResponse) Reset() { - *x = DeleteWorkflowVisibilityRecordResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[100] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CompleteNexusOperationResponse) Reset() { + *x = CompleteNexusOperationResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[135] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DeleteWorkflowVisibilityRecordResponse) String() string { +func (x *CompleteNexusOperationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteWorkflowVisibilityRecordResponse) ProtoMessage() {} +func (*CompleteNexusOperationResponse) ProtoMessage() {} -func (x *DeleteWorkflowVisibilityRecordResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[100] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CompleteNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[135] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6418,41 +8935,50 @@ func (x *DeleteWorkflowVisibilityRecordResponse) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use DeleteWorkflowVisibilityRecordResponse.ProtoReflect.Descriptor instead. -func (*DeleteWorkflowVisibilityRecordResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{100} +// Deprecated: Use CompleteNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*CompleteNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{135} } -// (-- api-linter: core::0134=disabled -// -// aip.dev/not-precedent: This service does not follow the update method AIP --) -type UpdateWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type InvokeStateMachineMethodRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // TODO(Tianyu): This is the same as NexusOperationsCompletion but obviously is not about Nexus. This is because + // State machine signaling is a generalization of the Nexus mechanisms. Perhaps eventually they should be merged. + // Namespace UUID. + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // Workflow ID. + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + // Run ID at the time this token was generated. + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + // Reference including the path to the backing Operation state machine and a version + transition count for + // staleness checks. + Ref *v110.StateMachineRef `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"` + // The method name to invoke. Methods must be explicitly registered for the target state machine in the state + // machine registry, and accept an argument type of HistoryEvent that is the completion event of the completed + // workflow. + MethodName string `protobuf:"bytes,5,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` + // Input, in serialized bytes, to the method. Users specify a deserializer during method registration for each state machine. + Input []byte `protobuf:"bytes,6,opt,name=input,proto3" json:"input,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v1.UpdateWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *UpdateWorkflowExecutionRequest) Reset() { - *x = UpdateWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[101] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *InvokeStateMachineMethodRequest) Reset() { + *x = InvokeStateMachineMethodRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[136] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateWorkflowExecutionRequest) String() string { +func (x *InvokeStateMachineMethodRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateWorkflowExecutionRequest) ProtoMessage() {} +func (*InvokeStateMachineMethodRequest) ProtoMessage() {} -func (x *UpdateWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[101] - if protoimpl.UnsafeEnabled && x != nil { +func (x *InvokeStateMachineMethodRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[136] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6462,51 +8988,77 @@ func (x *UpdateWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*UpdateWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{101} +// Deprecated: Use InvokeStateMachineMethodRequest.ProtoReflect.Descriptor instead. +func (*InvokeStateMachineMethodRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{136} } -func (x *UpdateWorkflowExecutionRequest) GetNamespaceId() string { +func (x *InvokeStateMachineMethodRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *UpdateWorkflowExecutionRequest) GetRequest() *v1.UpdateWorkflowExecutionRequest { +func (x *InvokeStateMachineMethodRequest) GetWorkflowId() string { if x != nil { - return x.Request + return x.WorkflowId } - return nil + return "" } -type UpdateWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *InvokeStateMachineMethodRequest) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} - Response *v1.UpdateWorkflowExecutionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` +func (x *InvokeStateMachineMethodRequest) GetRef() *v110.StateMachineRef { + if x != nil { + return x.Ref + } + return nil } -func (x *UpdateWorkflowExecutionResponse) Reset() { - *x = UpdateWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[102] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *InvokeStateMachineMethodRequest) GetMethodName() string { + if x != nil { + return x.MethodName } + return "" } -func (x *UpdateWorkflowExecutionResponse) String() string { +func (x *InvokeStateMachineMethodRequest) GetInput() []byte { + if x != nil { + return x.Input + } + return nil +} + +type InvokeStateMachineMethodResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Output, in serialized bytes, of the method. Users specify a serializer during method registration for each state machine. + Output []byte `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokeStateMachineMethodResponse) Reset() { + *x = InvokeStateMachineMethodResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[137] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokeStateMachineMethodResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateWorkflowExecutionResponse) ProtoMessage() {} +func (*InvokeStateMachineMethodResponse) ProtoMessage() {} -func (x *UpdateWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[102] - if protoimpl.UnsafeEnabled && x != nil { +func (x *InvokeStateMachineMethodResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[137] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6516,47 +9068,41 @@ func (x *UpdateWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*UpdateWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{102} +// Deprecated: Use InvokeStateMachineMethodResponse.ProtoReflect.Descriptor instead. +func (*InvokeStateMachineMethodResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{137} } -func (x *UpdateWorkflowExecutionResponse) GetResponse() *v1.UpdateWorkflowExecutionResponse { +func (x *InvokeStateMachineMethodResponse) GetOutput() []byte { if x != nil { - return x.Response + return x.Output } return nil } -type StreamWorkflowReplicationMessagesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DeepHealthCheckRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + HostAddress string `protobuf:"bytes,1,opt,name=host_address,json=hostAddress,proto3" json:"host_address,omitempty"` unknownFields protoimpl.UnknownFields - - // Types that are assignable to Attributes: - // - // *StreamWorkflowReplicationMessagesRequest_SyncReplicationState - Attributes isStreamWorkflowReplicationMessagesRequest_Attributes `protobuf_oneof:"attributes"` + sizeCache protoimpl.SizeCache } -func (x *StreamWorkflowReplicationMessagesRequest) Reset() { - *x = StreamWorkflowReplicationMessagesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[103] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeepHealthCheckRequest) Reset() { + *x = DeepHealthCheckRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[138] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *StreamWorkflowReplicationMessagesRequest) String() string { +func (x *DeepHealthCheckRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StreamWorkflowReplicationMessagesRequest) ProtoMessage() {} +func (*DeepHealthCheckRequest) ProtoMessage() {} -func (x *StreamWorkflowReplicationMessagesRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[103] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeepHealthCheckRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[138] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6566,65 +9112,43 @@ func (x *StreamWorkflowReplicationMessagesRequest) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use StreamWorkflowReplicationMessagesRequest.ProtoReflect.Descriptor instead. -func (*StreamWorkflowReplicationMessagesRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{103} -} - -func (m *StreamWorkflowReplicationMessagesRequest) GetAttributes() isStreamWorkflowReplicationMessagesRequest_Attributes { - if m != nil { - return m.Attributes - } - return nil +// Deprecated: Use DeepHealthCheckRequest.ProtoReflect.Descriptor instead. +func (*DeepHealthCheckRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{138} } -func (x *StreamWorkflowReplicationMessagesRequest) GetSyncReplicationState() *v115.SyncReplicationState { - if x, ok := x.GetAttributes().(*StreamWorkflowReplicationMessagesRequest_SyncReplicationState); ok { - return x.SyncReplicationState +func (x *DeepHealthCheckRequest) GetHostAddress() string { + if x != nil { + return x.HostAddress } - return nil -} - -type isStreamWorkflowReplicationMessagesRequest_Attributes interface { - isStreamWorkflowReplicationMessagesRequest_Attributes() -} - -type StreamWorkflowReplicationMessagesRequest_SyncReplicationState struct { - SyncReplicationState *v115.SyncReplicationState `protobuf:"bytes,1,opt,name=sync_replication_state,json=syncReplicationState,proto3,oneof"` -} - -func (*StreamWorkflowReplicationMessagesRequest_SyncReplicationState) isStreamWorkflowReplicationMessagesRequest_Attributes() { + return "" } -type StreamWorkflowReplicationMessagesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DeepHealthCheckResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + State v112.HealthState `protobuf:"varint,1,opt,name=state,proto3,enum=temporal.server.api.enums.v1.HealthState" json:"state,omitempty"` + // Per-check diagnostic results. Populated for all checks regardless of state. + Checks []*v122.HealthCheck `protobuf:"bytes,2,rep,name=checks,proto3" json:"checks,omitempty"` unknownFields protoimpl.UnknownFields - - // Types that are assignable to Attributes: - // - // *StreamWorkflowReplicationMessagesResponse_Messages - Attributes isStreamWorkflowReplicationMessagesResponse_Attributes `protobuf_oneof:"attributes"` + sizeCache protoimpl.SizeCache } -func (x *StreamWorkflowReplicationMessagesResponse) Reset() { - *x = StreamWorkflowReplicationMessagesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[104] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeepHealthCheckResponse) Reset() { + *x = DeepHealthCheckResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[139] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *StreamWorkflowReplicationMessagesResponse) String() string { +func (x *DeepHealthCheckResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StreamWorkflowReplicationMessagesResponse) ProtoMessage() {} +func (*DeepHealthCheckResponse) ProtoMessage() {} -func (x *StreamWorkflowReplicationMessagesResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[104] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeepHealthCheckResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[139] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6634,63 +9158,54 @@ func (x *StreamWorkflowReplicationMessagesResponse) ProtoReflect() protoreflect. return mi.MessageOf(x) } -// Deprecated: Use StreamWorkflowReplicationMessagesResponse.ProtoReflect.Descriptor instead. -func (*StreamWorkflowReplicationMessagesResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{104} +// Deprecated: Use DeepHealthCheckResponse.ProtoReflect.Descriptor instead. +func (*DeepHealthCheckResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{139} } -func (m *StreamWorkflowReplicationMessagesResponse) GetAttributes() isStreamWorkflowReplicationMessagesResponse_Attributes { - if m != nil { - return m.Attributes +func (x *DeepHealthCheckResponse) GetState() v112.HealthState { + if x != nil { + return x.State } - return nil + return v112.HealthState(0) } -func (x *StreamWorkflowReplicationMessagesResponse) GetMessages() *v115.WorkflowReplicationMessages { - if x, ok := x.GetAttributes().(*StreamWorkflowReplicationMessagesResponse_Messages); ok { - return x.Messages +func (x *DeepHealthCheckResponse) GetChecks() []*v122.HealthCheck { + if x != nil { + return x.Checks } return nil } -type isStreamWorkflowReplicationMessagesResponse_Attributes interface { - isStreamWorkflowReplicationMessagesResponse_Attributes() -} - -type StreamWorkflowReplicationMessagesResponse_Messages struct { - Messages *v115.WorkflowReplicationMessages `protobuf:"bytes,1,opt,name=messages,proto3,oneof"` -} - -func (*StreamWorkflowReplicationMessagesResponse_Messages) isStreamWorkflowReplicationMessagesResponse_Attributes() { -} - -type PollWorkflowExecutionUpdateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type SyncWorkflowStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v14.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + VersionedTransition *v110.VersionedTransition `protobuf:"bytes,3,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + VersionHistories *v19.VersionHistories `protobuf:"bytes,4,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + TargetClusterId int32 `protobuf:"varint,5,opt,name=target_cluster_id,json=targetClusterId,proto3" json:"target_cluster_id,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,6,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v1.PollWorkflowExecutionUpdateRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *PollWorkflowExecutionUpdateRequest) Reset() { - *x = PollWorkflowExecutionUpdateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[105] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *SyncWorkflowStateRequest) Reset() { + *x = SyncWorkflowStateRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[140] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *PollWorkflowExecutionUpdateRequest) String() string { +func (x *SyncWorkflowStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PollWorkflowExecutionUpdateRequest) ProtoMessage() {} +func (*SyncWorkflowStateRequest) ProtoMessage() {} -func (x *PollWorkflowExecutionUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[105] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SyncWorkflowStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[140] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6700,51 +9215,76 @@ func (x *PollWorkflowExecutionUpdateRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use PollWorkflowExecutionUpdateRequest.ProtoReflect.Descriptor instead. -func (*PollWorkflowExecutionUpdateRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{105} +// Deprecated: Use SyncWorkflowStateRequest.ProtoReflect.Descriptor instead. +func (*SyncWorkflowStateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{140} } -func (x *PollWorkflowExecutionUpdateRequest) GetNamespaceId() string { +func (x *SyncWorkflowStateRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *PollWorkflowExecutionUpdateRequest) GetRequest() *v1.PollWorkflowExecutionUpdateRequest { +func (x *SyncWorkflowStateRequest) GetExecution() *v14.WorkflowExecution { if x != nil { - return x.Request + return x.Execution } return nil } -type PollWorkflowExecutionUpdateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SyncWorkflowStateRequest) GetVersionedTransition() *v110.VersionedTransition { + if x != nil { + return x.VersionedTransition + } + return nil +} - Response *v1.PollWorkflowExecutionUpdateResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` +func (x *SyncWorkflowStateRequest) GetVersionHistories() *v19.VersionHistories { + if x != nil { + return x.VersionHistories + } + return nil } -func (x *PollWorkflowExecutionUpdateResponse) Reset() { - *x = PollWorkflowExecutionUpdateResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[106] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SyncWorkflowStateRequest) GetTargetClusterId() int32 { + if x != nil { + return x.TargetClusterId } + return 0 } -func (x *PollWorkflowExecutionUpdateResponse) String() string { +func (x *SyncWorkflowStateRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +type SyncWorkflowStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + VersionedTransitionArtifact *v117.VersionedTransitionArtifact `protobuf:"bytes,5,opt,name=versioned_transition_artifact,json=versionedTransitionArtifact,proto3" json:"versioned_transition_artifact,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncWorkflowStateResponse) Reset() { + *x = SyncWorkflowStateResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[141] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncWorkflowStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PollWorkflowExecutionUpdateResponse) ProtoMessage() {} +func (*SyncWorkflowStateResponse) ProtoMessage() {} -func (x *PollWorkflowExecutionUpdateResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[106] - if protoimpl.UnsafeEnabled && x != nil { +func (x *SyncWorkflowStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[141] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6754,45 +9294,45 @@ func (x *PollWorkflowExecutionUpdateResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use PollWorkflowExecutionUpdateResponse.ProtoReflect.Descriptor instead. -func (*PollWorkflowExecutionUpdateResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{106} +// Deprecated: Use SyncWorkflowStateResponse.ProtoReflect.Descriptor instead. +func (*SyncWorkflowStateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{141} } -func (x *PollWorkflowExecutionUpdateResponse) GetResponse() *v1.PollWorkflowExecutionUpdateResponse { +func (x *SyncWorkflowStateResponse) GetVersionedTransitionArtifact() *v117.VersionedTransitionArtifact { if x != nil { - return x.Response + return x.VersionedTransitionArtifact } return nil } -type GetWorkflowExecutionHistoryRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// (-- api-linter: core::0134::request-mask-required=disabled +// (-- api-linter: core::0134::request-resource-required=disabled +type UpdateActivityOptionsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace ID of the workflow which scheduled this activity + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + UpdateRequest *v1.UpdateActivityOptionsRequest `protobuf:"bytes,2,opt,name=update_request,json=updateRequest,proto3" json:"update_request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v1.GetWorkflowExecutionHistoryRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetWorkflowExecutionHistoryRequest) Reset() { - *x = GetWorkflowExecutionHistoryRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[107] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateActivityOptionsRequest) Reset() { + *x = UpdateActivityOptionsRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[142] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkflowExecutionHistoryRequest) String() string { +func (x *UpdateActivityOptionsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowExecutionHistoryRequest) ProtoMessage() {} +func (*UpdateActivityOptionsRequest) ProtoMessage() {} -func (x *GetWorkflowExecutionHistoryRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[107] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateActivityOptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[142] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6801,52 +9341,50 @@ func (x *GetWorkflowExecutionHistoryRequest) ProtoReflect() protoreflect.Message } return mi.MessageOf(x) } - -// Deprecated: Use GetWorkflowExecutionHistoryRequest.ProtoReflect.Descriptor instead. -func (*GetWorkflowExecutionHistoryRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{107} + +// Deprecated: Use UpdateActivityOptionsRequest.ProtoReflect.Descriptor instead. +func (*UpdateActivityOptionsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{142} } -func (x *GetWorkflowExecutionHistoryRequest) GetNamespaceId() string { +func (x *UpdateActivityOptionsRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *GetWorkflowExecutionHistoryRequest) GetRequest() *v1.GetWorkflowExecutionHistoryRequest { +func (x *UpdateActivityOptionsRequest) GetUpdateRequest() *v1.UpdateActivityOptionsRequest { if x != nil { - return x.Request + return x.UpdateRequest } return nil } -type GetWorkflowExecutionHistoryResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Response *v1.GetWorkflowExecutionHistoryResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` +type UpdateActivityOptionsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Activity options after an update + ActivityOptions *v123.ActivityOptions `protobuf:"bytes,1,opt,name=activity_options,json=activityOptions,proto3" json:"activity_options,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetWorkflowExecutionHistoryResponse) Reset() { - *x = GetWorkflowExecutionHistoryResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[108] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateActivityOptionsResponse) Reset() { + *x = UpdateActivityOptionsResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[143] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkflowExecutionHistoryResponse) String() string { +func (x *UpdateActivityOptionsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowExecutionHistoryResponse) ProtoMessage() {} +func (*UpdateActivityOptionsResponse) ProtoMessage() {} -func (x *GetWorkflowExecutionHistoryResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[108] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateActivityOptionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[143] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6856,45 +9394,43 @@ func (x *GetWorkflowExecutionHistoryResponse) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowExecutionHistoryResponse.ProtoReflect.Descriptor instead. -func (*GetWorkflowExecutionHistoryResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{108} +// Deprecated: Use UpdateActivityOptionsResponse.ProtoReflect.Descriptor instead. +func (*UpdateActivityOptionsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{143} } -func (x *GetWorkflowExecutionHistoryResponse) GetResponse() *v1.GetWorkflowExecutionHistoryResponse { +func (x *UpdateActivityOptionsResponse) GetActivityOptions() *v123.ActivityOptions { if x != nil { - return x.Response + return x.ActivityOptions } return nil } -type GetWorkflowExecutionHistoryReverseRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v1.GetWorkflowExecutionHistoryReverseRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +type PauseActivityRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace ID of the workflow which scheduled this activity + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.PauseActivityRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetWorkflowExecutionHistoryReverseRequest) Reset() { - *x = GetWorkflowExecutionHistoryReverseRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[109] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PauseActivityRequest) Reset() { + *x = PauseActivityRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[144] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkflowExecutionHistoryReverseRequest) String() string { +func (x *PauseActivityRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowExecutionHistoryReverseRequest) ProtoMessage() {} +func (*PauseActivityRequest) ProtoMessage() {} -func (x *GetWorkflowExecutionHistoryReverseRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[109] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PauseActivityRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[144] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6904,51 +9440,47 @@ func (x *GetWorkflowExecutionHistoryReverseRequest) ProtoReflect() protoreflect. return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowExecutionHistoryReverseRequest.ProtoReflect.Descriptor instead. -func (*GetWorkflowExecutionHistoryReverseRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{109} +// Deprecated: Use PauseActivityRequest.ProtoReflect.Descriptor instead. +func (*PauseActivityRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{144} } -func (x *GetWorkflowExecutionHistoryReverseRequest) GetNamespaceId() string { +func (x *PauseActivityRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *GetWorkflowExecutionHistoryReverseRequest) GetRequest() *v1.GetWorkflowExecutionHistoryReverseRequest { +func (x *PauseActivityRequest) GetFrontendRequest() *v1.PauseActivityRequest { if x != nil { - return x.Request + return x.FrontendRequest } return nil } -type GetWorkflowExecutionHistoryReverseResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type PauseActivityResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields - - Response *v1.GetWorkflowExecutionHistoryReverseResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetWorkflowExecutionHistoryReverseResponse) Reset() { - *x = GetWorkflowExecutionHistoryReverseResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[110] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PauseActivityResponse) Reset() { + *x = PauseActivityResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[145] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkflowExecutionHistoryReverseResponse) String() string { +func (x *PauseActivityResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowExecutionHistoryReverseResponse) ProtoMessage() {} +func (*PauseActivityResponse) ProtoMessage() {} -func (x *GetWorkflowExecutionHistoryReverseResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[110] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PauseActivityResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[145] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -6958,48 +9490,36 @@ func (x *GetWorkflowExecutionHistoryReverseResponse) ProtoReflect() protoreflect return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowExecutionHistoryReverseResponse.ProtoReflect.Descriptor instead. -func (*GetWorkflowExecutionHistoryReverseResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{110} -} - -func (x *GetWorkflowExecutionHistoryReverseResponse) GetResponse() *v1.GetWorkflowExecutionHistoryReverseResponse { - if x != nil { - return x.Response - } - return nil +// Deprecated: Use PauseActivityResponse.ProtoReflect.Descriptor instead. +func (*PauseActivityResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{145} } -// * -// StartEventId defines the beginning of the event to fetch. The first event is exclusive. -// EndEventId and EndEventVersion defines the end of the event to fetch. The end event is exclusive. -type GetWorkflowExecutionRawHistoryV2Request struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v116.GetWorkflowExecutionRawHistoryV2Request `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +type UnpauseActivityRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace ID of the workflow which scheduled this activity + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.UnpauseActivityRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetWorkflowExecutionRawHistoryV2Request) Reset() { - *x = GetWorkflowExecutionRawHistoryV2Request{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[111] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UnpauseActivityRequest) Reset() { + *x = UnpauseActivityRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[146] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkflowExecutionRawHistoryV2Request) String() string { +func (x *UnpauseActivityRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowExecutionRawHistoryV2Request) ProtoMessage() {} +func (*UnpauseActivityRequest) ProtoMessage() {} -func (x *GetWorkflowExecutionRawHistoryV2Request) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[111] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UnpauseActivityRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[146] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7009,51 +9529,47 @@ func (x *GetWorkflowExecutionRawHistoryV2Request) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowExecutionRawHistoryV2Request.ProtoReflect.Descriptor instead. -func (*GetWorkflowExecutionRawHistoryV2Request) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{111} +// Deprecated: Use UnpauseActivityRequest.ProtoReflect.Descriptor instead. +func (*UnpauseActivityRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{146} } -func (x *GetWorkflowExecutionRawHistoryV2Request) GetNamespaceId() string { +func (x *UnpauseActivityRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *GetWorkflowExecutionRawHistoryV2Request) GetRequest() *v116.GetWorkflowExecutionRawHistoryV2Request { +func (x *UnpauseActivityRequest) GetFrontendRequest() *v1.UnpauseActivityRequest { if x != nil { - return x.Request + return x.FrontendRequest } return nil } -type GetWorkflowExecutionRawHistoryV2Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type UnpauseActivityResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields - - Response *v116.GetWorkflowExecutionRawHistoryV2Response `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetWorkflowExecutionRawHistoryV2Response) Reset() { - *x = GetWorkflowExecutionRawHistoryV2Response{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[112] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UnpauseActivityResponse) Reset() { + *x = UnpauseActivityResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[147] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkflowExecutionRawHistoryV2Response) String() string { +func (x *UnpauseActivityResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowExecutionRawHistoryV2Response) ProtoMessage() {} +func (*UnpauseActivityResponse) ProtoMessage() {} -func (x *GetWorkflowExecutionRawHistoryV2Response) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[112] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UnpauseActivityResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[147] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7063,45 +9579,36 @@ func (x *GetWorkflowExecutionRawHistoryV2Response) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowExecutionRawHistoryV2Response.ProtoReflect.Descriptor instead. -func (*GetWorkflowExecutionRawHistoryV2Response) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{112} -} - -func (x *GetWorkflowExecutionRawHistoryV2Response) GetResponse() *v116.GetWorkflowExecutionRawHistoryV2Response { - if x != nil { - return x.Response - } - return nil +// Deprecated: Use UnpauseActivityResponse.ProtoReflect.Descriptor instead. +func (*UnpauseActivityResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{147} } -type GetWorkflowExecutionRawHistoryRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v116.GetWorkflowExecutionRawHistoryRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +type ResetActivityRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace ID of the workflow which scheduled this activity + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.ResetActivityRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetWorkflowExecutionRawHistoryRequest) Reset() { - *x = GetWorkflowExecutionRawHistoryRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[113] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ResetActivityRequest) Reset() { + *x = ResetActivityRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[148] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkflowExecutionRawHistoryRequest) String() string { +func (x *ResetActivityRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowExecutionRawHistoryRequest) ProtoMessage() {} +func (*ResetActivityRequest) ProtoMessage() {} -func (x *GetWorkflowExecutionRawHistoryRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[113] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ResetActivityRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[148] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7111,51 +9618,47 @@ func (x *GetWorkflowExecutionRawHistoryRequest) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowExecutionRawHistoryRequest.ProtoReflect.Descriptor instead. -func (*GetWorkflowExecutionRawHistoryRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{113} +// Deprecated: Use ResetActivityRequest.ProtoReflect.Descriptor instead. +func (*ResetActivityRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{148} } -func (x *GetWorkflowExecutionRawHistoryRequest) GetNamespaceId() string { +func (x *ResetActivityRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *GetWorkflowExecutionRawHistoryRequest) GetRequest() *v116.GetWorkflowExecutionRawHistoryRequest { +func (x *ResetActivityRequest) GetFrontendRequest() *v1.ResetActivityRequest { if x != nil { - return x.Request + return x.FrontendRequest } return nil } -type GetWorkflowExecutionRawHistoryResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ResetActivityResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields - - Response *v116.GetWorkflowExecutionRawHistoryResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetWorkflowExecutionRawHistoryResponse) Reset() { - *x = GetWorkflowExecutionRawHistoryResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[114] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ResetActivityResponse) Reset() { + *x = ResetActivityResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[149] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkflowExecutionRawHistoryResponse) String() string { +func (x *ResetActivityResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkflowExecutionRawHistoryResponse) ProtoMessage() {} +func (*ResetActivityResponse) ProtoMessage() {} -func (x *GetWorkflowExecutionRawHistoryResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[114] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ResetActivityResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[149] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7165,45 +9668,37 @@ func (x *GetWorkflowExecutionRawHistoryResponse) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use GetWorkflowExecutionRawHistoryResponse.ProtoReflect.Descriptor instead. -func (*GetWorkflowExecutionRawHistoryResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{114} -} - -func (x *GetWorkflowExecutionRawHistoryResponse) GetResponse() *v116.GetWorkflowExecutionRawHistoryResponse { - if x != nil { - return x.Response - } - return nil +// Deprecated: Use ResetActivityResponse.ProtoReflect.Descriptor instead. +func (*ResetActivityResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{149} } -type ForceDeleteWorkflowExecutionRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// (-- api-linter: core::0134::request-mask-required=disabled +// (-- api-linter: core::0134::request-resource-required=disabled +type UpdateWorkflowExecutionOptionsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + UpdateRequest *v1.UpdateWorkflowExecutionOptionsRequest `protobuf:"bytes,2,opt,name=update_request,json=updateRequest,proto3" json:"update_request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v116.DeleteWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ForceDeleteWorkflowExecutionRequest) Reset() { - *x = ForceDeleteWorkflowExecutionRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[115] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateWorkflowExecutionOptionsRequest) Reset() { + *x = UpdateWorkflowExecutionOptionsRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[150] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ForceDeleteWorkflowExecutionRequest) String() string { +func (x *UpdateWorkflowExecutionOptionsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ForceDeleteWorkflowExecutionRequest) ProtoMessage() {} +func (*UpdateWorkflowExecutionOptionsRequest) ProtoMessage() {} -func (x *ForceDeleteWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[115] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateWorkflowExecutionOptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[150] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7213,51 +9708,49 @@ func (x *ForceDeleteWorkflowExecutionRequest) ProtoReflect() protoreflect.Messag return mi.MessageOf(x) } -// Deprecated: Use ForceDeleteWorkflowExecutionRequest.ProtoReflect.Descriptor instead. -func (*ForceDeleteWorkflowExecutionRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{115} +// Deprecated: Use UpdateWorkflowExecutionOptionsRequest.ProtoReflect.Descriptor instead. +func (*UpdateWorkflowExecutionOptionsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{150} } -func (x *ForceDeleteWorkflowExecutionRequest) GetNamespaceId() string { +func (x *UpdateWorkflowExecutionOptionsRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *ForceDeleteWorkflowExecutionRequest) GetRequest() *v116.DeleteWorkflowExecutionRequest { +func (x *UpdateWorkflowExecutionOptionsRequest) GetUpdateRequest() *v1.UpdateWorkflowExecutionOptionsRequest { if x != nil { - return x.Request + return x.UpdateRequest } return nil } -type ForceDeleteWorkflowExecutionResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Response *v116.DeleteWorkflowExecutionResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` +type UpdateWorkflowExecutionOptionsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Workflow Execution options after update. + WorkflowExecutionOptions *v15.WorkflowExecutionOptions `protobuf:"bytes,1,opt,name=workflow_execution_options,json=workflowExecutionOptions,proto3" json:"workflow_execution_options,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ForceDeleteWorkflowExecutionResponse) Reset() { - *x = ForceDeleteWorkflowExecutionResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[116] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateWorkflowExecutionOptionsResponse) Reset() { + *x = UpdateWorkflowExecutionOptionsResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[151] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ForceDeleteWorkflowExecutionResponse) String() string { +func (x *UpdateWorkflowExecutionOptionsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ForceDeleteWorkflowExecutionResponse) ProtoMessage() {} +func (*UpdateWorkflowExecutionOptionsResponse) ProtoMessage() {} -func (x *ForceDeleteWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[116] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateWorkflowExecutionOptionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[151] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7267,47 +9760,43 @@ func (x *ForceDeleteWorkflowExecutionResponse) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use ForceDeleteWorkflowExecutionResponse.ProtoReflect.Descriptor instead. -func (*ForceDeleteWorkflowExecutionResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{116} +// Deprecated: Use UpdateWorkflowExecutionOptionsResponse.ProtoReflect.Descriptor instead. +func (*UpdateWorkflowExecutionOptionsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{151} } -func (x *ForceDeleteWorkflowExecutionResponse) GetResponse() *v116.DeleteWorkflowExecutionResponse { +func (x *UpdateWorkflowExecutionOptionsResponse) GetWorkflowExecutionOptions() *v15.WorkflowExecutionOptions { if x != nil { - return x.Response + return x.WorkflowExecutionOptions } return nil } -type GetDLQTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type PauseWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace ID of the workflow which is being paused + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + PauseRequest *v1.PauseWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=pause_request,json=pauseRequest,proto3" json:"pause_request,omitempty"` unknownFields protoimpl.UnknownFields - - DlqKey *v117.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` - // page_size must be positive. Up to this many tasks will be returned. - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetDLQTasksRequest) Reset() { - *x = GetDLQTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[117] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PauseWorkflowExecutionRequest) Reset() { + *x = PauseWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[152] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetDLQTasksRequest) String() string { +func (x *PauseWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetDLQTasksRequest) ProtoMessage() {} +func (*PauseWorkflowExecutionRequest) ProtoMessage() {} -func (x *GetDLQTasksRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[117] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PauseWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[152] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7317,62 +9806,47 @@ func (x *GetDLQTasksRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetDLQTasksRequest.ProtoReflect.Descriptor instead. -func (*GetDLQTasksRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{117} -} - -func (x *GetDLQTasksRequest) GetDlqKey() *v117.HistoryDLQKey { - if x != nil { - return x.DlqKey - } - return nil +// Deprecated: Use PauseWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*PauseWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{152} } -func (x *GetDLQTasksRequest) GetPageSize() int32 { +func (x *PauseWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { - return x.PageSize + return x.NamespaceId } - return 0 + return "" } -func (x *GetDLQTasksRequest) GetNextPageToken() []byte { +func (x *PauseWorkflowExecutionRequest) GetPauseRequest() *v1.PauseWorkflowExecutionRequest { if x != nil { - return x.NextPageToken + return x.PauseRequest } return nil } -type GetDLQTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type PauseWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields - - DlqTasks []*v117.HistoryDLQTask `protobuf:"bytes,1,rep,name=dlq_tasks,json=dlqTasks,proto3" json:"dlq_tasks,omitempty"` - // next_page_token is empty if there are no more results. However, the converse is not true. If there are no more - // results, this field may still be non-empty. This is to avoid having to do a count query to determine whether - // there are more results. - NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetDLQTasksResponse) Reset() { - *x = GetDLQTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[118] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PauseWorkflowExecutionResponse) Reset() { + *x = PauseWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[153] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetDLQTasksResponse) String() string { +func (x *PauseWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetDLQTasksResponse) ProtoMessage() {} +func (*PauseWorkflowExecutionResponse) ProtoMessage() {} -func (x *GetDLQTasksResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[118] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PauseWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[153] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7382,52 +9856,36 @@ func (x *GetDLQTasksResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetDLQTasksResponse.ProtoReflect.Descriptor instead. -func (*GetDLQTasksResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{118} -} - -func (x *GetDLQTasksResponse) GetDlqTasks() []*v117.HistoryDLQTask { - if x != nil { - return x.DlqTasks - } - return nil -} - -func (x *GetDLQTasksResponse) GetNextPageToken() []byte { - if x != nil { - return x.NextPageToken - } - return nil +// Deprecated: Use PauseWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*PauseWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{153} } -type DeleteDLQTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - DlqKey *v117.HistoryDLQKey `protobuf:"bytes,1,opt,name=dlq_key,json=dlqKey,proto3" json:"dlq_key,omitempty"` - InclusiveMaxTaskMetadata *v117.HistoryDLQTaskMetadata `protobuf:"bytes,2,opt,name=inclusive_max_task_metadata,json=inclusiveMaxTaskMetadata,proto3" json:"inclusive_max_task_metadata,omitempty"` +type UnpauseWorkflowExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace ID of the workflow which is being unpaused + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + UnpauseRequest *v1.UnpauseWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=unpause_request,json=unpauseRequest,proto3" json:"unpause_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DeleteDLQTasksRequest) Reset() { - *x = DeleteDLQTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[119] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UnpauseWorkflowExecutionRequest) Reset() { + *x = UnpauseWorkflowExecutionRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[154] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DeleteDLQTasksRequest) String() string { +func (x *UnpauseWorkflowExecutionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteDLQTasksRequest) ProtoMessage() {} +func (*UnpauseWorkflowExecutionRequest) ProtoMessage() {} -func (x *DeleteDLQTasksRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[119] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UnpauseWorkflowExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[154] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7437,52 +9895,47 @@ func (x *DeleteDLQTasksRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteDLQTasksRequest.ProtoReflect.Descriptor instead. -func (*DeleteDLQTasksRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{119} +// Deprecated: Use UnpauseWorkflowExecutionRequest.ProtoReflect.Descriptor instead. +func (*UnpauseWorkflowExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{154} } -func (x *DeleteDLQTasksRequest) GetDlqKey() *v117.HistoryDLQKey { +func (x *UnpauseWorkflowExecutionRequest) GetNamespaceId() string { if x != nil { - return x.DlqKey + return x.NamespaceId } - return nil + return "" } -func (x *DeleteDLQTasksRequest) GetInclusiveMaxTaskMetadata() *v117.HistoryDLQTaskMetadata { +func (x *UnpauseWorkflowExecutionRequest) GetUnpauseRequest() *v1.UnpauseWorkflowExecutionRequest { if x != nil { - return x.InclusiveMaxTaskMetadata + return x.UnpauseRequest } return nil } -type DeleteDLQTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type UnpauseWorkflowExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields - - // messages_deleted is the total number of messages deleted in DeleteDLQTasks operation. - MessagesDeleted int64 `protobuf:"varint,1,opt,name=messages_deleted,json=messagesDeleted,proto3" json:"messages_deleted,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *DeleteDLQTasksResponse) Reset() { - *x = DeleteDLQTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[120] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UnpauseWorkflowExecutionResponse) Reset() { + *x = UnpauseWorkflowExecutionResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[155] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DeleteDLQTasksResponse) String() string { +func (x *UnpauseWorkflowExecutionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteDLQTasksResponse) ProtoMessage() {} +func (*UnpauseWorkflowExecutionResponse) ProtoMessage() {} -func (x *DeleteDLQTasksResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[120] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UnpauseWorkflowExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[155] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7492,46 +9945,36 @@ func (x *DeleteDLQTasksResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeleteDLQTasksResponse.ProtoReflect.Descriptor instead. -func (*DeleteDLQTasksResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{120} -} - -func (x *DeleteDLQTasksResponse) GetMessagesDeleted() int64 { - if x != nil { - return x.MessagesDeleted - } - return 0 +// Deprecated: Use UnpauseWorkflowExecutionResponse.ProtoReflect.Descriptor instead. +func (*UnpauseWorkflowExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{155} } -type ListQueuesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type StartNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Request *v121.StartOperationRequest `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - QueueType int32 `protobuf:"varint,1,opt,name=queue_type,json=queueType,proto3" json:"queue_type,omitempty"` - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - NextPageToken []byte `protobuf:"bytes,3,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ListQueuesRequest) Reset() { - *x = ListQueuesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[121] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *StartNexusOperationRequest) Reset() { + *x = StartNexusOperationRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[156] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ListQueuesRequest) String() string { +func (x *StartNexusOperationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListQueuesRequest) ProtoMessage() {} +func (*StartNexusOperationRequest) ProtoMessage() {} -func (x *ListQueuesRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[121] - if protoimpl.UnsafeEnabled && x != nil { +func (x *StartNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[156] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7541,59 +9984,55 @@ func (x *ListQueuesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListQueuesRequest.ProtoReflect.Descriptor instead. -func (*ListQueuesRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{121} +// Deprecated: Use StartNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*StartNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{156} } -func (x *ListQueuesRequest) GetQueueType() int32 { +func (x *StartNexusOperationRequest) GetNamespaceId() string { if x != nil { - return x.QueueType + return x.NamespaceId } - return 0 + return "" } -func (x *ListQueuesRequest) GetPageSize() int32 { +func (x *StartNexusOperationRequest) GetShardId() int32 { if x != nil { - return x.PageSize + return x.ShardId } return 0 } -func (x *ListQueuesRequest) GetNextPageToken() []byte { +func (x *StartNexusOperationRequest) GetRequest() *v121.StartOperationRequest { if x != nil { - return x.NextPageToken + return x.Request } return nil } -type ListQueuesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type StartNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v121.StartOperationResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields - - Queues []*ListQueuesResponse_QueueInfo `protobuf:"bytes,1,rep,name=queues,proto3" json:"queues,omitempty"` - NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ListQueuesResponse) Reset() { - *x = ListQueuesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[122] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *StartNexusOperationResponse) Reset() { + *x = StartNexusOperationResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[157] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ListQueuesResponse) String() string { +func (x *StartNexusOperationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListQueuesResponse) ProtoMessage() {} +func (*StartNexusOperationResponse) ProtoMessage() {} -func (x *ListQueuesResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[122] - if protoimpl.UnsafeEnabled && x != nil { +func (x *StartNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[157] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7603,56 +10042,43 @@ func (x *ListQueuesResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListQueuesResponse.ProtoReflect.Descriptor instead. -func (*ListQueuesResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{122} -} - -func (x *ListQueuesResponse) GetQueues() []*ListQueuesResponse_QueueInfo { - if x != nil { - return x.Queues - } - return nil +// Deprecated: Use StartNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*StartNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{157} } -func (x *ListQueuesResponse) GetNextPageToken() []byte { +func (x *StartNexusOperationResponse) GetResponse() *v121.StartOperationResponse { if x != nil { - return x.NextPageToken + return x.Response } return nil } -type AddTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type CancelNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + Request *v121.CancelOperationRequest `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - // Even though we can obtain the shard ID from the tasks, we still need the shard_id in the request for routing. If - // not, it would be possible to include tasks for shards that belong to different hosts, and we'd need to fan-out the - // request, which would be more complicated. - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - // A list of tasks to enqueue or re-enqueue. - Tasks []*AddTasksRequest_Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *AddTasksRequest) Reset() { - *x = AddTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[123] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CancelNexusOperationRequest) Reset() { + *x = CancelNexusOperationRequest{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[158] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AddTasksRequest) String() string { +func (x *CancelNexusOperationRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AddTasksRequest) ProtoMessage() {} +func (*CancelNexusOperationRequest) ProtoMessage() {} -func (x *AddTasksRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[123] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CancelNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[158] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7662,49 +10088,55 @@ func (x *AddTasksRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AddTasksRequest.ProtoReflect.Descriptor instead. -func (*AddTasksRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{123} +// Deprecated: Use CancelNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*CancelNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{158} } -func (x *AddTasksRequest) GetShardId() int32 { +func (x *CancelNexusOperationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CancelNexusOperationRequest) GetShardId() int32 { if x != nil { return x.ShardId } return 0 } -func (x *AddTasksRequest) GetTasks() []*AddTasksRequest_Task { +func (x *CancelNexusOperationRequest) GetRequest() *v121.CancelOperationRequest { if x != nil { - return x.Tasks + return x.Request } return nil } -type AddTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type CancelNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v121.CancelOperationResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *AddTasksResponse) Reset() { - *x = AddTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[124] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CancelNexusOperationResponse) Reset() { + *x = CancelNexusOperationResponse{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[159] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *AddTasksResponse) String() string { +func (x *CancelNexusOperationResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*AddTasksResponse) ProtoMessage() {} +func (*CancelNexusOperationResponse) ProtoMessage() {} -func (x *AddTasksResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[124] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CancelNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[159] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7714,37 +10146,45 @@ func (x *AddTasksResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use AddTasksResponse.ProtoReflect.Descriptor instead. -func (*AddTasksResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{124} +// Deprecated: Use CancelNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*CancelNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{159} } -type ListTasksRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *CancelNexusOperationResponse) GetResponse() *v121.CancelOperationResponse { + if x != nil { + return x.Response + } + return nil +} - Request *v116.ListHistoryTasksRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` +type ExecuteMultiOperationRequest_Operation struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Operation: + // + // *ExecuteMultiOperationRequest_Operation_StartWorkflow + // *ExecuteMultiOperationRequest_Operation_UpdateWorkflow + Operation isExecuteMultiOperationRequest_Operation_Operation `protobuf_oneof:"operation"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ListTasksRequest) Reset() { - *x = ListTasksRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[125] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ExecuteMultiOperationRequest_Operation) Reset() { + *x = ExecuteMultiOperationRequest_Operation{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[160] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ListTasksRequest) String() string { +func (x *ExecuteMultiOperationRequest_Operation) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListTasksRequest) ProtoMessage() {} +func (*ExecuteMultiOperationRequest_Operation) ProtoMessage() {} -func (x *ListTasksRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[125] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ExecuteMultiOperationRequest_Operation) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[160] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7754,44 +10194,81 @@ func (x *ListTasksRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListTasksRequest.ProtoReflect.Descriptor instead. -func (*ListTasksRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{125} +// Deprecated: Use ExecuteMultiOperationRequest_Operation.ProtoReflect.Descriptor instead. +func (*ExecuteMultiOperationRequest_Operation) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{9, 0} } -func (x *ListTasksRequest) GetRequest() *v116.ListHistoryTasksRequest { +func (x *ExecuteMultiOperationRequest_Operation) GetOperation() isExecuteMultiOperationRequest_Operation_Operation { if x != nil { - return x.Request + return x.Operation } return nil } -type ListTasksResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Response *v116.ListHistoryTasksResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` +func (x *ExecuteMultiOperationRequest_Operation) GetStartWorkflow() *StartWorkflowExecutionRequest { + if x != nil { + if x, ok := x.Operation.(*ExecuteMultiOperationRequest_Operation_StartWorkflow); ok { + return x.StartWorkflow + } + } + return nil } -func (x *ListTasksResponse) Reset() { - *x = ListTasksResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[126] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ExecuteMultiOperationRequest_Operation) GetUpdateWorkflow() *UpdateWorkflowExecutionRequest { + if x != nil { + if x, ok := x.Operation.(*ExecuteMultiOperationRequest_Operation_UpdateWorkflow); ok { + return x.UpdateWorkflow + } } + return nil } -func (x *ListTasksResponse) String() string { +type isExecuteMultiOperationRequest_Operation_Operation interface { + isExecuteMultiOperationRequest_Operation_Operation() +} + +type ExecuteMultiOperationRequest_Operation_StartWorkflow struct { + StartWorkflow *StartWorkflowExecutionRequest `protobuf:"bytes,1,opt,name=start_workflow,json=startWorkflow,proto3,oneof"` +} + +type ExecuteMultiOperationRequest_Operation_UpdateWorkflow struct { + UpdateWorkflow *UpdateWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=update_workflow,json=updateWorkflow,proto3,oneof"` +} + +func (*ExecuteMultiOperationRequest_Operation_StartWorkflow) isExecuteMultiOperationRequest_Operation_Operation() { +} + +func (*ExecuteMultiOperationRequest_Operation_UpdateWorkflow) isExecuteMultiOperationRequest_Operation_Operation() { +} + +type ExecuteMultiOperationResponse_Response struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Response: + // + // *ExecuteMultiOperationResponse_Response_StartWorkflow + // *ExecuteMultiOperationResponse_Response_UpdateWorkflow + Response isExecuteMultiOperationResponse_Response_Response `protobuf_oneof:"response"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExecuteMultiOperationResponse_Response) Reset() { + *x = ExecuteMultiOperationResponse_Response{} + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[161] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExecuteMultiOperationResponse_Response) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListTasksResponse) ProtoMessage() {} +func (*ExecuteMultiOperationResponse_Response) ProtoMessage() {} -func (x *ListTasksResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[126] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ExecuteMultiOperationResponse_Response) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[161] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7801,34 +10278,68 @@ func (x *ListTasksResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListTasksResponse.ProtoReflect.Descriptor instead. -func (*ListTasksResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{126} +// Deprecated: Use ExecuteMultiOperationResponse_Response.ProtoReflect.Descriptor instead. +func (*ExecuteMultiOperationResponse_Response) Descriptor() ([]byte, []int) { + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{10, 0} } -func (x *ListTasksResponse) GetResponse() *v116.ListHistoryTasksResponse { +func (x *ExecuteMultiOperationResponse_Response) GetResponse() isExecuteMultiOperationResponse_Response_Response { if x != nil { return x.Response } return nil } +func (x *ExecuteMultiOperationResponse_Response) GetStartWorkflow() *StartWorkflowExecutionResponse { + if x != nil { + if x, ok := x.Response.(*ExecuteMultiOperationResponse_Response_StartWorkflow); ok { + return x.StartWorkflow + } + } + return nil +} + +func (x *ExecuteMultiOperationResponse_Response) GetUpdateWorkflow() *UpdateWorkflowExecutionResponse { + if x != nil { + if x, ok := x.Response.(*ExecuteMultiOperationResponse_Response_UpdateWorkflow); ok { + return x.UpdateWorkflow + } + } + return nil +} + +type isExecuteMultiOperationResponse_Response_Response interface { + isExecuteMultiOperationResponse_Response_Response() +} + +type ExecuteMultiOperationResponse_Response_StartWorkflow struct { + StartWorkflow *StartWorkflowExecutionResponse `protobuf:"bytes,1,opt,name=start_workflow,json=startWorkflow,proto3,oneof"` +} + +type ExecuteMultiOperationResponse_Response_UpdateWorkflow struct { + UpdateWorkflow *UpdateWorkflowExecutionResponse `protobuf:"bytes,2,opt,name=update_workflow,json=updateWorkflow,proto3,oneof"` +} + +func (*ExecuteMultiOperationResponse_Response_StartWorkflow) isExecuteMultiOperationResponse_Response_Response() { +} + +func (*ExecuteMultiOperationResponse_Response_UpdateWorkflow) isExecuteMultiOperationResponse_Response_Response() { +} + type ListQueuesResponse_QueueInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + QueueName string `protobuf:"bytes,1,opt,name=queue_name,json=queueName,proto3" json:"queue_name,omitempty"` + MessageCount int64 `protobuf:"varint,2,opt,name=message_count,json=messageCount,proto3" json:"message_count,omitempty"` + LastMessageId int64 `protobuf:"varint,3,opt,name=last_message_id,json=lastMessageId,proto3" json:"last_message_id,omitempty"` unknownFields protoimpl.UnknownFields - - QueueName string `protobuf:"bytes,1,opt,name=queue_name,json=queueName,proto3" json:"queue_name,omitempty"` - MessageCount int64 `protobuf:"varint,2,opt,name=message_count,json=messageCount,proto3" json:"message_count,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListQueuesResponse_QueueInfo) Reset() { *x = ListQueuesResponse_QueueInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[131] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[167] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListQueuesResponse_QueueInfo) String() string { @@ -7838,8 +10349,8 @@ func (x *ListQueuesResponse_QueueInfo) String() string { func (*ListQueuesResponse_QueueInfo) ProtoMessage() {} func (x *ListQueuesResponse_QueueInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[131] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[167] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7851,7 +10362,7 @@ func (x *ListQueuesResponse_QueueInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use ListQueuesResponse_QueueInfo.ProtoReflect.Descriptor instead. func (*ListQueuesResponse_QueueInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{122, 0} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{127, 0} } func (x *ListQueuesResponse_QueueInfo) GetQueueName() string { @@ -7868,26 +10379,30 @@ func (x *ListQueuesResponse_QueueInfo) GetMessageCount() int64 { return 0 } -type AddTasksRequest_Task struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ListQueuesResponse_QueueInfo) GetLastMessageId() int64 { + if x != nil { + return x.LastMessageId + } + return 0 +} +type AddTasksRequest_Task struct { + state protoimpl.MessageState `protogen:"open.v1"` // category_id is needed to deserialize the tasks. See TaskCategory for a list of options here. However, keep in mind // that the list of valid options is registered dynamically with the server in the history/tasks package, so that // enum is not comprehensive. CategoryId int32 `protobuf:"varint,1,opt,name=category_id,json=categoryId,proto3" json:"category_id,omitempty"` // blob is the serialized task. - Blob *v14.DataBlob `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"` + Blob *v14.DataBlob `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddTasksRequest_Task) Reset() { *x = AddTasksRequest_Task{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[132] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[168] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddTasksRequest_Task) String() string { @@ -7897,8 +10412,8 @@ func (x *AddTasksRequest_Task) String() string { func (*AddTasksRequest_Task) ProtoMessage() {} func (x *AddTasksRequest_Task) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[132] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[168] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -7910,7 +10425,7 @@ func (x *AddTasksRequest_Task) ProtoReflect() protoreflect.Message { // Deprecated: Use AddTasksRequest_Task.ProtoReflect.Descriptor instead. func (*AddTasksRequest_Task) Descriptor() ([]byte, []int) { - return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{123, 0} + return file_temporal_server_api_historyservice_v1_request_response_proto_rawDescGZIP(), []int{128, 0} } func (x *AddTasksRequest_Task) GetCategoryId() int32 { @@ -7927,2177 +10442,1359 @@ func (x *AddTasksRequest_Task) GetBlob() *v14.DataBlob { return nil } +var file_temporal_server_api_historyservice_v1_request_response_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*RoutingOptions)(nil), + Field: 7234, + Name: "temporal.server.api.historyservice.v1.routing", + Tag: "bytes,7234,opt,name=routing", + Filename: "temporal/server/api/historyservice/v1/request_response.proto", + }, +} + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional temporal.server.api.historyservice.v1.RoutingOptions routing = 7234; + E_Routing = &file_temporal_server_api_historyservice_v1_request_response_proto_extTypes[0] +) + var File_temporal_server_api_historyservice_v1_request_response_proto protoreflect.FileDescriptor -var file_temporal_server_api_historyservice_v1_request_response_proto_rawDesc = []byte{ - 0x0a, 0x3c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x25, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, - 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, - 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, - 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x2d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x30, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x33, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, - 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3f, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x3a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x6c, 0x71, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x22, 0x85, 0x07, 0x0a, 0x1d, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x67, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6c, 0x0a, 0x15, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x13, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x6b, 0x0a, 0x22, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x1f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6c, 0x0a, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, - 0x75, 0x65, 0x5f, 0x61, 0x73, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, - 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x41, 0x73, 0x4e, 0x65, 0x77, 0x49, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x16, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x41, 0x73, - 0x4e, 0x65, 0x77, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x51, 0x0a, 0x11, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x64, 0x5f, 0x66, 0x61, 0x69, 0x6c, - 0x75, 0x72, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x69, - 0x6e, 0x75, 0x65, 0x64, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5a, - 0x0a, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x52, 0x14, 0x6c, - 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x1b, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x62, 0x61, 0x63, 0x6b, - 0x6f, 0x66, 0x66, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x66, 0x69, 0x72, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x54, 0x61, 0x73, 0x6b, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x60, 0x0a, 0x14, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x12, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x68, 0x00, 0x22, 0x92, 0x02, 0x0a, - 0x1e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, - 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, - 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, - 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x72, 0x0a, 0x13, 0x65, 0x61, 0x67, - 0x65, 0x72, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x52, 0x11, 0x65, 0x61, 0x67, 0x65, 0x72, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x54, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x22, 0xe5, 0x02, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x75, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, - 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x37, 0x0a, 0x16, - 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x65, 0x78, 0x70, - 0x65, 0x63, 0x74, 0x65, 0x64, 0x4e, 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x72, - 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x68, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, - 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x49, 0x74, 0x65, 0x6d, 0x52, 0x12, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x42, 0x02, 0x68, 0x00, 0x22, 0xf7, 0x09, 0x0a, 0x17, - 0x47, 0x65, 0x74, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x4d, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, 0x0d, 0x6e, 0x65, - 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x3d, 0x0a, 0x19, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x16, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x31, 0x0a, 0x13, - 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x69, 0x72, - 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, - 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, - 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x54, 0x0a, 0x11, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, - 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x52, 0x0f, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x79, 0x0a, 0x2b, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x5f, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, - 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x25, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x72, - 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5f, 0x0a, 0x0e, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0e, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x61, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x10, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x42, 0x0a, 0x1c, 0x69, 0x73, 0x5f, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x5f, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x73, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x38, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x78, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x69, 0x72, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x78, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x37, 0x0a, 0x16, 0x66, 0x69, 0x72, 0x73, 0x74, - 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x66, 0x69, 0x72, 0x73, 0x74, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x60, - 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x08, 0x10, - 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, 0x08, 0x0c, - 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x22, 0xe6, 0x02, 0x0a, 0x17, 0x50, 0x6f, 0x6c, 0x6c, - 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x37, 0x0a, 0x16, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x78, - 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x13, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x4e, 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x72, 0x61, 0x6e, - 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x68, 0x0a, 0x14, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x12, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x42, 0x02, 0x68, 0x00, 0x22, - 0xcc, 0x08, 0x0a, 0x18, 0x50, 0x6f, 0x6c, 0x6c, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x09, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x26, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x19, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, - 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x31, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6c, - 0x61, 0x73, 0x74, 0x46, 0x69, 0x72, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x54, 0x0a, 0x11, 0x73, 0x74, 0x69, 0x63, 0x6b, - 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x0f, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x79, 0x0a, 0x2b, 0x73, 0x74, 0x69, - 0x63, 0x6b, 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x25, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x6f, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x34, 0x0a, 0x14, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x61, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x10, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x5f, 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x69, - 0x72, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x78, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x69, 0x72, 0x73, 0x74, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x78, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x37, 0x0a, 0x16, - 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, - 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x66, 0x69, 0x72, - 0x73, 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, - 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, 0x08, 0x0d, 0x10, 0x0e, 0x22, 0x91, 0x01, 0x0a, 0x1b, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x53, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, - 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, - 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xad, 0x03, 0x0a, 0x20, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, - 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, 0x0a, 0x0c, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x70, 0x6f, 0x6c, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, - 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, - 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x22, 0xa1, 0x0a, 0x0a, 0x21, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x3d, 0x0a, 0x19, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x16, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6e, - 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, - 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x61, - 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x18, 0x73, 0x74, 0x69, - 0x63, 0x6b, 0x79, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x73, 0x74, 0x69, 0x63, - 0x6b, 0x79, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x75, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, - 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x15, 0x74, 0x72, - 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, - 0x73, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6b, 0x0a, 0x1d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, - 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, - 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, - 0x0c, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x73, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, - 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x55, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x51, 0x75, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, - 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, - 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, - 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2e, - 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, - 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x68, 0x0a, 0x0c, 0x51, 0x75, - 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x65, 0x72, - 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x4a, - 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x22, 0xad, 0x03, 0x0a, 0x20, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, - 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, 0x0a, 0x0c, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x70, 0x6f, 0x6c, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, - 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, - 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x22, 0xf5, 0x04, 0x0a, 0x21, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x52, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x63, 0x0a, 0x1e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, - 0x6d, 0x70, 0x74, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x1b, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, - 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x51, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x64, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x52, 0x10, 0x68, 0x65, 0x61, - 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4d, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x31, 0x0a, 0x12, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, - 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc1, 0x01, 0x0a, 0x23, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, - 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x73, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, - 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb5, 0x03, 0x0a, 0x24, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x10, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x04, 0x18, 0x01, 0x68, 0x00, - 0x52, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x69, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x61, 0x73, - 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x37, 0x0a, 0x16, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x72, 0x65, 0x73, 0x65, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6e, 0x0a, - 0x11, 0x6e, 0x65, 0x77, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, - 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x52, 0x0f, 0x6e, 0x65, 0x77, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x54, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb7, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x6c, 0x0a, 0x0e, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x87, 0x02, 0x0a, 0x1a, - 0x49, 0x73, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, - 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, - 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x3c, 0x0a, 0x1b, 0x49, 0x73, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x22, 0xc1, 0x01, 0x0a, 0x22, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, - 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x74, - 0x0a, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, - 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x10, 0x68, 0x65, 0x61, 0x72, 0x74, - 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x54, - 0x0a, 0x23, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xc1, 0x01, 0x0a, 0x23, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x73, 0x0a, 0x10, 0x63, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x26, 0x0a, 0x24, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6c, 0x0a, 0x0e, 0x66, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x52, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, - 0x68, 0x00, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x22, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x6e, 0x0a, 0x0e, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x65, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x25, 0x0a, 0x23, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, - 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x87, 0x02, 0x0a, 0x1a, 0x49, 0x73, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, - 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, - 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, - 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, - 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x3c, 0x0a, 0x1b, - 0x49, 0x73, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x08, 0x69, 0x73, - 0x5f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xd6, 0x02, 0x0a, 0x1e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, - 0x0a, 0x0e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x0d, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6d, 0x0a, 0x1b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x19, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, - 0x0a, 0x13, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x68, 0x69, 0x6c, - 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4f, 0x6e, 0x6c, 0x79, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x21, 0x0a, 0x1f, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0xda, 0x01, 0x0a, 0x27, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x57, 0x69, 0x74, 0x68, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x87, 0x01, 0x0a, 0x19, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x69, 0x74, 0x68, - 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x16, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x6c, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x42, 0x02, 0x68, 0x00, 0x22, 0x63, 0x0a, 0x28, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x57, 0x69, 0x74, - 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, - 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, - 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x22, 0xc9, 0x01, 0x0a, 0x1f, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x6c, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x12, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x22, 0x0a, 0x20, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe2, 0x02, 0x0a, 0x21, 0x54, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x73, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x72, 0x6d, - 0x69, 0x6e, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x10, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x6d, 0x0a, 0x1b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x65, - 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x6e, 0x6c, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4f, 0x6e, 0x6c, 0x79, 0x42, 0x02, 0x68, 0x00, 0x22, 0x24, 0x0a, 0x22, - 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x8a, 0x02, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x2d, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, - 0x14, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, - 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x63, 0x6c, 0x6f, 0x73, 0x65, - 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4f, 0x6e, 0x6c, 0x79, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x21, 0x0a, 0x1f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0xaf, 0x01, 0x0a, 0x1d, 0x52, 0x65, 0x73, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x67, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x74, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x3b, 0x0a, 0x1e, 0x52, - 0x65, 0x73, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x06, 0x72, - 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa7, 0x03, 0x0a, 0x25, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, - 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x71, 0x0a, 0x0e, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0d, - 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x41, 0x0a, 0x1b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x18, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x6d, 0x0a, 0x1b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x19, 0x65, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x4f, 0x6e, 0x6c, 0x79, 0x42, 0x02, 0x68, 0x00, 0x22, 0x28, 0x0a, 0x26, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0xfd, 0x02, 0x0a, 0x1b, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x37, 0x0a, 0x16, 0x69, 0x73, 0x5f, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, - 0x69, 0x73, 0x46, 0x69, 0x72, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, - 0x73, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, - 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x50, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x6f, - 0x63, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, - 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x1e, 0x0a, 0x1c, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0xf3, 0x01, 0x0a, 0x27, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x46, 0x69, 0x72, 0x73, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, - 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, - 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x22, 0x2a, 0x0a, 0x28, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x46, 0x69, 0x72, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, - 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x8c, 0x04, 0x0a, 0x24, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x43, 0x68, 0x69, - 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x58, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, - 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, - 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, - 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, - 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, - 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x27, 0x0a, 0x25, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xbf, 0x03, 0x0a, 0x2d, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x58, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x56, 0x0a, 0x0f, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, - 0x0a, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, - 0x64, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x3c, 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x16, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, - 0x6f, 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, - 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x22, 0x30, 0x0a, 0x2e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x20, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x5f, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xed, 0x04, 0x0a, 0x21, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x6b, 0x0a, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x60, 0x0a, 0x12, 0x70, 0x65, 0x6e, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x50, - 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x62, 0x0a, 0x10, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x69, 0x0a, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, - 0x67, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x13, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, 0x0a, 0x09, - 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x9f, 0x04, 0x0a, 0x18, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, - 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, 0x0a, 0x15, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, - 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x13, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, - 0x6c, 0x6f, 0x62, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, - 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x62, 0x61, 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, 0x0a, 0x6e, 0x65, - 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, - 0x65, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x56, 0x32, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd6, 0x01, 0x0a, 0x1d, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x63, 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, - 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x20, 0x0a, 0x1e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa3, 0x01, 0x0a, 0x16, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, - 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf6, 0x06, 0x0a, - 0x13, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, - 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, - 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x4e, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, - 0x61, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x65, 0x61, - 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, - 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x52, 0x07, 0x64, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x74, - 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x61, 0x74, 0x74, 0x65, - 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x73, - 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, - 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x61, 0x73, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x5b, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x62, 0x61, 0x73, 0x65, 0x5f, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x62, 0x61, 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x22, 0xe4, 0x01, 0x0a, 0x15, - 0x53, 0x79, 0x6e, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, - 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, 0x0a, 0x0f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x53, 0x79, 0x6e, 0x63, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x69, 0x65, 0x73, 0x49, - 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa4, 0x05, 0x0a, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x53, 0x79, 0x6e, 0x63, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, - 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x6c, 0x61, - 0x73, 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x3e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x73, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1c, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0c, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x34, 0x0a, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, - 0x6c, 0x61, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x22, 0x16, 0x0a, - 0x14, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x91, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, - 0x00, 0x22, 0x80, 0x02, 0x0a, 0x1c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x75, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x6c, 0x0a, 0x13, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x11, 0x63, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x72, 0x0a, 0x16, 0x64, 0x61, 0x74, - 0x61, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x14, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xe7, 0x01, 0x0a, 0x1a, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x62, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x6f, 0x73, 0x74, - 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, - 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xee, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x5f, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x73, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x73, - 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, - 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x61, 0x0a, 0x0f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x64, 0x64, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x32, 0x0a, 0x11, 0x43, - 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x14, 0x0a, - 0x12, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x30, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x22, 0x64, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0a, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x6e, - 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, - 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, - 0x02, 0x68, 0x00, 0x22, 0xb8, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1e, 0x0a, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x47, 0x0a, 0x0f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x14, 0x0a, 0x12, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x61, - 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x1d, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x06, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x06, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xaa, 0x02, - 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x83, 0x01, 0x0a, 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x81, - 0x01, 0x0a, 0x12, 0x53, 0x68, 0x61, 0x72, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x51, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x7e, 0x0a, 0x20, - 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5a, - 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, - 0x6f, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x89, 0x01, 0x0a, 0x21, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x11, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x92, 0x01, 0x0a, 0x14, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x53, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x6f, 0x0a, 0x15, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x56, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x96, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, 0x79, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x57, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x17, 0x0a, 0x15, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, - 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc5, - 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, - 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, - 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x45, - 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2e, - 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, - 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x50, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xe8, 0x02, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x44, 0x4c, - 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, - 0x65, 0x74, 0x74, 0x65, 0x72, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x61, 0x73, 0x6b, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x71, 0x0a, 0x16, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x14, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xeb, 0x01, 0x0a, 0x17, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, - 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x29, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, - 0x69, 0x76, 0x65, 0x45, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x22, 0x1a, 0x0a, 0x18, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc7, 0x02, - 0x0a, 0x17, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x45, 0x6e, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x2e, 0x0a, 0x11, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x50, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, - 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x46, 0x0a, 0x18, 0x4d, 0x65, 0x72, 0x67, 0x65, - 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa4, 0x01, 0x0a, 0x1b, - 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, - 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x5e, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x1e, 0x0a, 0x1c, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa0, 0x01, 0x0a, 0x2a, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x42, 0x02, 0x68, 0x00, 0x22, 0x92, 0x01, 0x0a, 0x2b, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x4c, 0x61, 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x38, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0d, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x42, 0x02, - 0x68, 0x00, 0x22, 0x4a, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x2b, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x79, 0x0a, 0x1c, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x06, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x68, 0x61, 0x72, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xdc, 0x06, 0x0a, 0x16, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x39, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x48, 0x0a, 0x10, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0e, 0x73, 0x68, 0x61, 0x72, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x54, 0x69, - 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x7e, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x51, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x8a, 0x01, 0x0a, 0x13, 0x68, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x55, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x12, 0x68, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6e, 0x0a, 0x24, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x76, 0x69, 0x73, - 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x20, 0x6d, 0x61, - 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x56, - 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x1a, 0x92, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x61, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x8b, - 0x01, 0x0a, 0x17, 0x48, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x56, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5c, 0x0a, 0x15, 0x48, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, - 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x43, 0x0a, - 0x1c, 0x68, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x19, 0x68, 0x61, 0x6e, 0x64, 0x6f, 0x76, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, - 0xa7, 0x01, 0x0a, 0x20, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x65, 0x72, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0d, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x54, 0x61, - 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x1a, 0x61, 0x63, 0x6b, 0x65, - 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x17, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x56, - 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x90, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x75, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, - 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x1d, 0x0a, 0x1b, - 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xda, 0x02, 0x0a, 0x1e, 0x49, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0f, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, - 0x6f, 0x62, 0x52, 0x0e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, - 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0x66, 0x0a, 0x1f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x29, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x65, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x41, - 0x70, 0x70, 0x6c, 0x69, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xbb, 0x02, 0x0a, 0x25, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x69, 0x73, 0x69, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4e, 0x0a, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x13, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x6c, 0x6f, 0x73, 0x65, - 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x28, 0x0a, 0x26, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xa6, 0x01, 0x0a, 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5d, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x22, 0x83, 0x01, 0x0a, 0x1f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xae, 0x01, 0x0a, 0x28, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x74, 0x0a, 0x16, 0x73, - 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, - 0x6e, 0x63, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x48, 0x00, 0x52, 0x14, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x42, 0x0c, 0x0a, 0x0a, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x29, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x48, 0x00, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x42, 0x0c, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x22, 0xae, 0x01, 0x0a, 0x22, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x61, 0x0a, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x8b, 0x01, 0x0a, 0x23, 0x50, 0x6f, 0x6c, - 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x64, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xae, 0x01, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x61, 0x0a, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x8b, 0x01, 0x0a, 0x23, 0x47, 0x65, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, - 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xbc, 0x01, 0x0a, 0x29, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, - 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x68, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x76, 0x65, - 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x99, 0x01, 0x0a, 0x2a, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, - 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xbc, 0x01, 0x0a, 0x27, 0x47, - 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x56, 0x32, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, 0x0a, 0x07, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, - 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x56, 0x32, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x99, 0x01, 0x0a, 0x28, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x56, 0x32, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x08, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4d, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x56, 0x32, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb8, 0x01, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x68, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x95, 0x01, 0x0a, 0x26, 0x47, 0x65, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, - 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x6b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x4b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xaf, 0x01, 0x0a, 0x23, 0x46, 0x6f, 0x72, 0x63, - 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, - 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x61, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x8c, 0x01, 0x0a, 0x24, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xac, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, - 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, - 0x0a, 0x07, 0x64, 0x6c, 0x71, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x64, 0x6c, 0x71, 0x4b, - 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, - 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, - 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x91, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x09, 0x64, 0x6c, 0x71, 0x5f, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, - 0x51, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x08, 0x64, 0x6c, 0x71, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, - 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xdc, 0x01, 0x0a, - 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x07, 0x64, 0x6c, 0x71, 0x5f, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x4b, 0x65, - 0x79, 0x52, 0x06, 0x64, 0x6c, 0x71, 0x4b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x78, 0x0a, 0x1b, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, - 0x78, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x47, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x83, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x71, - 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, - 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, - 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, - 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xfa, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x06, 0x71, - 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x71, 0x75, - 0x65, 0x75, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x1a, 0x57, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x75, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, - 0x0a, 0x0a, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x71, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x27, 0x0a, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0xee, 0x01, 0x0a, 0x0f, 0x41, 0x64, 0x64, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, - 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x55, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x54, - 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x65, 0x0a, 0x04, 0x54, 0x61, 0x73, - 0x6b, 0x12, 0x23, 0x0a, 0x0b, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, - 0x6f, 0x62, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x68, 0x00, 0x22, 0x12, 0x0a, 0x10, - 0x41, 0x64, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x6e, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x72, 0x0a, 0x11, 0x4c, 0x69, 0x73, - 0x74, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, 0x42, 0x3c, 0x5a, 0x3a, 0x67, 0x6f, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_historyservice_v1_request_response_proto_rawDesc = "" + + "\n" + + ".temporal.api.workflowservice.v1.StartWorkflowExecutionRequestR\fstartRequest\x12h\n" + + "\x15parent_execution_info\x18\x03 \x01(\v24.temporal.server.api.workflow.v1.ParentExecutionInfoR\x13parentExecutionInfo\x12\x18\n" + + "\aattempt\x18\x04 \x01(\x05R\aattempt\x12g\n" + + "\"workflow_execution_expiration_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\x1fworkflowExecutionExpirationTime\x12h\n" + + "\x19continue_as_new_initiator\x18\x06 \x01(\x0e2-.temporal.api.enums.v1.ContinueAsNewInitiatorR\x16continueAsNewInitiator\x12M\n" + + "\x11continued_failure\x18\a \x01(\v2 .temporal.api.failure.v1.FailureR\x10continuedFailure\x12V\n" + + "\x16last_completion_result\x18\b \x01(\v2 .temporal.api.common.v1.PayloadsR\x14lastCompletionResult\x12X\n" + + "\x1bfirst_workflow_task_backoff\x18\t \x01(\v2\x19.google.protobuf.DurationR\x18firstWorkflowTaskBackoff\x12\\\n" + + "\x14source_version_stamp\x18\n" + + " \x01(\v2*.temporal.api.common.v1.WorkerVersionStampR\x12sourceVersionStamp\x12b\n" + + "\x13root_execution_info\x18\v \x01(\v22.temporal.server.api.workflow.v1.RootExecutionInfoR\x11rootExecutionInfo\x12,\n" + + "\x12inherited_build_id\x18\f \x01(\tR\x10inheritedBuildId\x12]\n" + + "\x13versioning_override\x18\r \x01(\v2,.temporal.api.workflow.v1.VersioningOverrideR\x12versioningOverride\x12.\n" + + "\x13child_workflow_only\x18\x0e \x01(\bR\x11childWorkflowOnly\x12m\n" + + "\x18inherited_pinned_version\x18\x0f \x01(\v23.temporal.api.deployment.v1.WorkerDeploymentVersionR\x16inheritedPinnedVersion\x12s\n" + + "\x1binherited_auto_upgrade_info\x18\x10 \x01(\v24.temporal.api.deployment.v1.InheritedAutoUpgradeInfoR\x18inheritedAutoUpgradeInfo\x12|\n" + + "\x1fdeclined_target_version_upgrade\x18\x11 \x01(\v25.temporal.api.history.v1.DeclinedTargetVersionUpgradeR\x1cdeclinedTargetVersionUpgrade\x12S\n" + + "\x18initial_skipped_duration\x18\x12 \x01(\v2\x19.google.protobuf.DurationR\x16initialSkippedDuration:\x1f\x92\xc4\x03\x1b*\x19start_request.workflow_id\"\xfc\x02\n" + + "\x1eStartWorkflowExecutionResponse\x12\x15\n" + + "\x06run_id\x18\x01 \x01(\tR\x05runId\x12?\n" + + "\x05clock\x18\x02 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12n\n" + + "\x13eager_workflow_task\x18\x03 \x01(\v2>.temporal.api.workflowservice.v1.PollWorkflowTaskQueueResponseR\x11eagerWorkflowTask\x12\x18\n" + + "\astarted\x18\x04 \x01(\bR\astarted\x12F\n" + + "\x06status\x18\x05 \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x06status\x120\n" + + "\x04link\x18\x06 \x01(\v2\x1c.temporal.api.common.v1.LinkR\x04link\"\xda\x03\n" + + "\x16GetMutableStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x123\n" + + "\x16expected_next_event_id\x18\x03 \x01(\x03R\x13expectedNextEventId\x120\n" + + "\x14current_branch_token\x18\x04 \x01(\fR\x12currentBranchToken\x12d\n" + + "\x14version_history_item\x18\x05 \x01(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x12versionHistoryItem\x12j\n" + + "\x14versioned_transition\x18\x06 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransition:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"\xf3\f\n" + + "\x17GetMutableStateResponse\x12G\n" + + "\texecution\x18\x01 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12I\n" + + "\rworkflow_type\x18\x02 \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\fworkflowType\x12\"\n" + + "\rnext_event_id\x18\x03 \x01(\x03R\vnextEventId\x129\n" + + "\x19previous_started_event_id\x18\x04 \x01(\x03R\x16previousStartedEventId\x12-\n" + + "\x13last_first_event_id\x18\x05 \x01(\x03R\x10lastFirstEventId\x12C\n" + + "\n" + + "task_queue\x18\x06 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12P\n" + + "\x11sticky_task_queue\x18\a \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\x0fstickyTaskQueue\x12u\n" + + "+sticky_task_queue_schedule_to_start_timeout\x18\v \x01(\v2\x19.google.protobuf.DurationR%stickyTaskQueueScheduleToStartTimeout\x120\n" + + "\x14current_branch_token\x18\r \x01(\fR\x12currentBranchToken\x12[\n" + + "\x0eworkflow_state\x18\x0f \x01(\x0e24.temporal.server.api.enums.v1.WorkflowExecutionStateR\rworkflowState\x12W\n" + + "\x0fworkflow_status\x18\x10 \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x0eworkflowStatus\x12]\n" + + "\x11version_histories\x18\x11 \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistories\x12>\n" + + "\x1cis_sticky_task_queue_enabled\x18\x12 \x01(\bR\x18isStickyTaskQueueEnabled\x124\n" + + "\x17last_first_event_txn_id\x18\x13 \x01(\x03R\x13lastFirstEventTxnId\x123\n" + + "\x16first_execution_run_id\x18\x14 \x01(\tR\x13firstExecutionRunId\x12r\n" + + " most_recent_worker_version_stamp\x18\x15 \x01(\v2*.temporal.api.common.v1.WorkerVersionStampR\x1cmostRecentWorkerVersionStamp\x12*\n" + + "\x11assigned_build_id\x18\x16 \x01(\tR\x0fassignedBuildId\x12,\n" + + "\x12inherited_build_id\x18\x17 \x01(\tR\x10inheritedBuildId\x12f\n" + + "\x12transition_history\x18\x18 \x03(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x11transitionHistory\x12b\n" + + "\x0fversioning_info\x18\x19 \x01(\v29.temporal.api.workflow.v1.WorkflowExecutionVersioningInfoR\x0eversioningInfo\x12~\n" + + "\x1etransient_or_speculative_tasks\x18\x1a \x01(\v29.temporal.server.api.history.v1.TransientWorkflowTaskInfoR\x1btransientOrSpeculativeTasksJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "J\x04\b\n" + + "\x10\vJ\x04\b\f\x10\rJ\x04\b\x0e\x10\x0f\"\xef\x02\n" + + "\x17PollMutableStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x123\n" + + "\x16expected_next_event_id\x18\x03 \x01(\x03R\x13expectedNextEventId\x120\n" + + "\x14current_branch_token\x18\x04 \x01(\fR\x12currentBranchToken\x12d\n" + + "\x14version_history_item\x18\x05 \x01(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x12versionHistoryItem:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"\x94\b\n" + + "\x18PollMutableStateResponse\x12G\n" + + "\texecution\x18\x01 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12I\n" + + "\rworkflow_type\x18\x02 \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\fworkflowType\x12\"\n" + + "\rnext_event_id\x18\x03 \x01(\x03R\vnextEventId\x129\n" + + "\x19previous_started_event_id\x18\x04 \x01(\x03R\x16previousStartedEventId\x12-\n" + + "\x13last_first_event_id\x18\x05 \x01(\x03R\x10lastFirstEventId\x12C\n" + + "\n" + + "task_queue\x18\x06 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12P\n" + + "\x11sticky_task_queue\x18\a \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\x0fstickyTaskQueue\x12u\n" + + "+sticky_task_queue_schedule_to_start_timeout\x18\v \x01(\v2\x19.google.protobuf.DurationR%stickyTaskQueueScheduleToStartTimeout\x120\n" + + "\x14current_branch_token\x18\f \x01(\fR\x12currentBranchToken\x12]\n" + + "\x11version_histories\x18\x0e \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistories\x12[\n" + + "\x0eworkflow_state\x18\x0f \x01(\x0e24.temporal.server.api.enums.v1.WorkflowExecutionStateR\rworkflowState\x12W\n" + + "\x0fworkflow_status\x18\x10 \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x0eworkflowStatus\x124\n" + + "\x17last_first_event_txn_id\x18\x11 \x01(\x03R\x13lastFirstEventTxnId\x123\n" + + "\x16first_execution_run_id\x18\x12 \x01(\tR\x13firstExecutionRunIdJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "J\x04\b\n" + + "\x10\vJ\x04\b\r\x10\x0e\"\xa6\x01\n" + + "\x1bResetStickyTaskQueueRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"\x1e\n" + + "\x1cResetStickyTaskQueueResponse\"\xe0\x03\n" + + "\x1cExecuteMultiOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12m\n" + + "\n" + + "operations\x18\x03 \x03(\v2M.temporal.server.api.historyservice.v1.ExecuteMultiOperationRequest.OperationR\n" + + "operations\x1a\xf9\x01\n" + + "\tOperation\x12m\n" + + "\x0estart_workflow\x18\x01 \x01(\v2D.temporal.server.api.historyservice.v1.StartWorkflowExecutionRequestH\x00R\rstartWorkflow\x12p\n" + + "\x0fupdate_workflow\x18\x02 \x01(\v2E.temporal.server.api.historyservice.v1.UpdateWorkflowExecutionRequestH\x00R\x0eupdateWorkflowB\v\n" + + "\toperation:\x11\x92\xc4\x03\r*\vworkflow_id\"\x88\x03\n" + + "\x1dExecuteMultiOperationResponse\x12k\n" + + "\tresponses\x18\x01 \x03(\v2M.temporal.server.api.historyservice.v1.ExecuteMultiOperationResponse.ResponseR\tresponses\x1a\xf9\x01\n" + + "\bResponse\x12n\n" + + "\x0estart_workflow\x18\x01 \x01(\v2E.temporal.server.api.historyservice.v1.StartWorkflowExecutionResponseH\x00R\rstartWorkflow\x12q\n" + + "\x0fupdate_workflow\x18\x02 \x01(\v2F.temporal.server.api.historyservice.v1.UpdateWorkflowExecutionResponseH\x00R\x0eupdateWorkflowB\n" + + "\n" + + "\bresponse\"\xb1\a\n" + + " RecordWorkflowTaskStartedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12,\n" + + "\x12scheduled_event_id\x18\x03 \x01(\x03R\x10scheduledEventId\x12\x1d\n" + + "\n" + + "request_id\x18\x05 \x01(\tR\trequestId\x12`\n" + + "\fpoll_request\x18\x06 \x01(\v2=.temporal.api.workflowservice.v1.PollWorkflowTaskQueueRequestR\vpollRequest\x12?\n" + + "\x05clock\x18\a \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12j\n" + + "\x16build_id_redirect_info\x18\b \x01(\v25.temporal.server.api.taskqueue.v1.BuildIdRedirectInfoR\x13buildIdRedirectInfo\x12Y\n" + + "\x14scheduled_deployment\x18\t \x01(\v2&.temporal.api.deployment.v1.DeploymentR\x13scheduledDeployment\x12c\n" + + "\x11version_directive\x18\n" + + " \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12\x14\n" + + "\x05stamp\x18\v \x01(\x05R\x05stamp\x12A\n" + + "\x1dtask_dispatch_revision_number\x18\f \x01(\x03R\x1ataskDispatchRevisionNumber\x12o\n" + + "\x19target_deployment_version\x18\r \x01(\v23.temporal.api.deployment.v1.WorkerDeploymentVersionR\x17targetDeploymentVersion:$\x92\xc4\x03 *\x1eworkflow_execution.workflow_idJ\x04\b\x04\x10\x05\"\xc4\n" + + "\n" + + "!RecordWorkflowTaskStartedResponse\x12I\n" + + "\rworkflow_type\x18\x01 \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\fworkflowType\x129\n" + + "\x19previous_started_event_id\x18\x02 \x01(\x03R\x16previousStartedEventId\x12,\n" + + "\x12scheduled_event_id\x18\x03 \x01(\x03R\x10scheduledEventId\x12(\n" + + "\x10started_event_id\x18\x04 \x01(\x03R\x0estartedEventId\x12\"\n" + + "\rnext_event_id\x18\x05 \x01(\x03R\vnextEventId\x12\x18\n" + + "\aattempt\x18\x06 \x01(\x05R\aattempt\x128\n" + + "\x18sticky_execution_enabled\x18\a \x01(\bR\x16stickyExecutionEnabled\x12q\n" + + "\x17transient_workflow_task\x18\b \x01(\v29.temporal.server.api.history.v1.TransientWorkflowTaskInfoR\x15transientWorkflowTask\x12g\n" + + "\x1dworkflow_execution_task_queue\x18\t \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\x1aworkflowExecutionTaskQueue\x12!\n" + + "\fbranch_token\x18\v \x01(\fR\vbranchToken\x12A\n" + + "\x0escheduled_time\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12=\n" + + "\fstarted_time\x18\r \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12o\n" + + "\aqueries\x18\x0e \x03(\v2U.temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.QueriesEntryR\aqueries\x12?\n" + + "\x05clock\x18\x0f \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12=\n" + + "\bmessages\x18\x10 \x03(\v2!.temporal.api.protocol.v1.MessageR\bmessages\x12\x18\n" + + "\aversion\x18\x11 \x01(\x03R\aversion\x12:\n" + + "\ahistory\x18\x12 \x01(\v2 .temporal.api.history.v1.HistoryR\ahistory\x12&\n" + + "\x0fnext_page_token\x18\x13 \x01(\fR\rnextPageToken\x12E\n" + + "\vraw_history\x18\x14 \x01(\v2 .temporal.api.history.v1.HistoryB\x02\x18\x01R\n" + + "rawHistory\x12*\n" + + "\x11raw_history_bytes\x18\x15 \x03(\fR\x0frawHistoryBytes\x1a`\n" + + "\fQueriesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12:\n" + + "\x05value\x18\x02 \x01(\v2$.temporal.api.query.v1.WorkflowQueryR\x05value:\x028\x01J\x04\b\n" + + "\x10\v\"\xbe\n" + + "\n" + + "/RecordWorkflowTaskStartedResponseWithRawHistory\x12I\n" + + "\rworkflow_type\x18\x01 \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\fworkflowType\x129\n" + + "\x19previous_started_event_id\x18\x02 \x01(\x03R\x16previousStartedEventId\x12,\n" + + "\x12scheduled_event_id\x18\x03 \x01(\x03R\x10scheduledEventId\x12(\n" + + "\x10started_event_id\x18\x04 \x01(\x03R\x0estartedEventId\x12\"\n" + + "\rnext_event_id\x18\x05 \x01(\x03R\vnextEventId\x12\x18\n" + + "\aattempt\x18\x06 \x01(\x05R\aattempt\x128\n" + + "\x18sticky_execution_enabled\x18\a \x01(\bR\x16stickyExecutionEnabled\x12q\n" + + "\x17transient_workflow_task\x18\b \x01(\v29.temporal.server.api.history.v1.TransientWorkflowTaskInfoR\x15transientWorkflowTask\x12g\n" + + "\x1dworkflow_execution_task_queue\x18\t \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\x1aworkflowExecutionTaskQueue\x12!\n" + + "\fbranch_token\x18\v \x01(\fR\vbranchToken\x12A\n" + + "\x0escheduled_time\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12=\n" + + "\fstarted_time\x18\r \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12}\n" + + "\aqueries\x18\x0e \x03(\v2c.temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.QueriesEntryR\aqueries\x12?\n" + + "\x05clock\x18\x0f \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12=\n" + + "\bmessages\x18\x10 \x03(\v2!.temporal.api.protocol.v1.MessageR\bmessages\x12\x18\n" + + "\aversion\x18\x11 \x01(\x03R\aversion\x12:\n" + + "\ahistory\x18\x12 \x01(\v2 .temporal.api.history.v1.HistoryR\ahistory\x12&\n" + + "\x0fnext_page_token\x18\x13 \x01(\fR\rnextPageToken\x12#\n" + + "\vraw_history\x18\x14 \x03(\fB\x02\x18\x01R\n" + + "rawHistory\x12*\n" + + "\x11raw_history_bytes\x18\x15 \x03(\fR\x0frawHistoryBytes\x1a`\n" + + "\fQueriesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12:\n" + + "\x05value\x18\x02 \x01(\v2$.temporal.api.query.v1.WorkflowQueryR\x05value:\x028\x01J\x04\b\n" + + "\x10\v\"\xcd\x06\n" + + " RecordActivityTaskStartedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12,\n" + + "\x12scheduled_event_id\x18\x03 \x01(\x03R\x10scheduledEventId\x12\x1d\n" + + "\n" + + "request_id\x18\x05 \x01(\tR\trequestId\x12`\n" + + "\fpoll_request\x18\x06 \x01(\v2=.temporal.api.workflowservice.v1.PollActivityTaskQueueRequestR\vpollRequest\x12?\n" + + "\x05clock\x18\a \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12j\n" + + "\x16build_id_redirect_info\x18\b \x01(\v25.temporal.server.api.taskqueue.v1.BuildIdRedirectInfoR\x13buildIdRedirectInfo\x12\x14\n" + + "\x05stamp\x18\t \x01(\x05R\x05stamp\x12Y\n" + + "\x14scheduled_deployment\x18\n" + + " \x01(\v2&.temporal.api.deployment.v1.DeploymentR\x13scheduledDeployment\x12c\n" + + "\x11version_directive\x18\f \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12A\n" + + "\x1dtask_dispatch_revision_number\x18\r \x01(\x03R\x1ataskDispatchRevisionNumber\x12#\n" + + "\rcomponent_ref\x18\x0e \x01(\fR\fcomponentRef:\x06\x92\xc4\x03\x02\b\x01J\x04\b\x04\x10\x05J\x04\b\v\x10\f\"\xa4\x06\n" + + "!RecordActivityTaskStartedResponse\x12N\n" + + "\x0fscheduled_event\x18\x01 \x01(\v2%.temporal.api.history.v1.HistoryEventR\x0escheduledEvent\x12=\n" + + "\fstarted_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12\x18\n" + + "\aattempt\x18\x03 \x01(\x05R\aattempt\x12_\n" + + "\x1ecurrent_attempt_scheduled_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x1bcurrentAttemptScheduledTime\x12M\n" + + "\x11heartbeat_details\x18\x05 \x01(\v2 .temporal.api.common.v1.PayloadsR\x10heartbeatDetails\x12I\n" + + "\rworkflow_type\x18\x06 \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\fworkflowType\x12-\n" + + "\x12workflow_namespace\x18\a \x01(\tR\x11workflowNamespace\x12?\n" + + "\x05clock\x18\b \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12\x18\n" + + "\aversion\x18\t \x01(\x03R\aversion\x12<\n" + + "\bpriority\x18\n" + + " \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12F\n" + + "\fretry_policy\x18\v \x01(\v2#.temporal.api.common.v1.RetryPolicyR\vretryPolicy\x12#\n" + + "\rstart_version\x18\f \x01(\x03R\fstartVersion\x12&\n" + + "\x0factivity_run_id\x18\r \x01(\tR\ractivityRunId\"\xdc\x01\n" + + "#RespondWorkflowTaskCompletedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12o\n" + + "\x10complete_request\x18\x02 \x01(\v2D.temporal.api.workflowservice.v1.RespondWorkflowTaskCompletedRequestR\x0fcompleteRequest:!\x92\xc4\x03\x1d2\x1bcomplete_request.task_token\"\xa7\x03\n" + + "$RespondWorkflowTaskCompletedResponse\x12w\n" + + "\x10started_response\x18\x01 \x01(\v2H.temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseB\x02\x18\x01R\x0fstartedResponse\x12e\n" + + "\x0eactivity_tasks\x18\x02 \x03(\v2>.temporal.api.workflowservice.v1.PollActivityTaskQueueResponseR\ractivityTasks\x123\n" + + "\x16reset_history_event_id\x18\x03 \x01(\x03R\x13resetHistoryEventId\x12j\n" + + "\x11new_workflow_task\x18\x04 \x01(\v2>.temporal.api.workflowservice.v1.PollWorkflowTaskQueueResponseR\x0fnewWorkflowTask\"\xd0\x01\n" + + " RespondWorkflowTaskFailedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12h\n" + + "\x0efailed_request\x18\x02 \x01(\v2A.temporal.api.workflowservice.v1.RespondWorkflowTaskFailedRequestR\rfailedRequest:\x1f\x92\xc4\x03\x1b2\x19failed_request.task_token\"#\n" + + "!RespondWorkflowTaskFailedResponse\"\xaa\x02\n" + + "\x1aIsWorkflowTaskValidRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12?\n" + + "\x05clock\x18\x03 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12,\n" + + "\x12scheduled_event_id\x18\x04 \x01(\x03R\x10scheduledEventId\x12\x14\n" + + "\x05stamp\x18\x05 \x01(\x05R\x05stamp:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"8\n" + + "\x1bIsWorkflowTaskValidResponse\x12\x19\n" + + "\bis_valid\x18\x01 \x01(\bR\aisValid\"\xdd\x01\n" + + "\"RecordActivityTaskHeartbeatRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12p\n" + + "\x11heartbeat_request\x18\x02 \x01(\v2C.temporal.api.workflowservice.v1.RecordActivityTaskHeartbeatRequestR\x10heartbeatRequest:\"\x92\xc4\x03\x1e2\x1cheartbeat_request.task_token\"\xa0\x01\n" + + "#RecordActivityTaskHeartbeatResponse\x12)\n" + + "\x10cancel_requested\x18\x01 \x01(\bR\x0fcancelRequested\x12'\n" + + "\x0factivity_paused\x18\x02 \x01(\bR\x0eactivityPaused\x12%\n" + + "\x0eactivity_reset\x18\x03 \x01(\bR\ractivityReset\"\xdc\x01\n" + + "#RespondActivityTaskCompletedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12o\n" + + "\x10complete_request\x18\x02 \x01(\v2D.temporal.api.workflowservice.v1.RespondActivityTaskCompletedRequestR\x0fcompleteRequest:!\x92\xc4\x03\x1d2\x1bcomplete_request.task_token\"&\n" + + "$RespondActivityTaskCompletedResponse\"\xd0\x01\n" + + " RespondActivityTaskFailedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12h\n" + + "\x0efailed_request\x18\x02 \x01(\v2A.temporal.api.workflowservice.v1.RespondActivityTaskFailedRequestR\rfailedRequest:\x1f\x92\xc4\x03\x1b2\x19failed_request.task_token\"#\n" + + "!RespondActivityTaskFailedResponse\"\xd4\x01\n" + + "\"RespondActivityTaskCanceledRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12j\n" + + "\x0ecancel_request\x18\x02 \x01(\v2C.temporal.api.workflowservice.v1.RespondActivityTaskCanceledRequestR\rcancelRequest:\x1f\x92\xc4\x03\x1b2\x19cancel_request.task_token\"%\n" + + "#RespondActivityTaskCanceledResponse\"\xaa\x02\n" + + "\x1aIsActivityTaskValidRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12?\n" + + "\x05clock\x18\x03 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12,\n" + + "\x12scheduled_event_id\x18\x04 \x01(\x03R\x10scheduledEventId\x12\x14\n" + + "\x05stamp\x18\x05 \x01(\x05R\x05stamp:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"8\n" + + "\x1bIsActivityTaskValidResponse\x12\x19\n" + + "\bis_valid\x18\x01 \x01(\bR\aisValid\"\xfb\x02\n" + + "\x1eSignalWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12f\n" + + "\x0esignal_request\x18\x02 \x01(\v2?.temporal.api.workflowservice.v1.SignalWorkflowExecutionRequestR\rsignalRequest\x12i\n" + + "\x1bexternal_workflow_execution\x18\x03 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x19externalWorkflowExecution\x12.\n" + + "\x13child_workflow_only\x18\x04 \x01(\bR\x11childWorkflowOnly:3\x92\xc4\x03/*-signal_request.workflow_execution.workflow_id\"S\n" + + "\x1fSignalWorkflowExecutionResponse\x120\n" + + "\x04link\x18\x01 \x01(\v2\x1c.temporal.api.common.v1.LinkR\x04link\"\xff\x01\n" + + "'SignalWithStartWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x83\x01\n" + + "\x19signal_with_start_request\x18\x02 \x01(\v2H.temporal.api.workflowservice.v1.SignalWithStartWorkflowExecutionRequestR\x16signalWithStartRequest:+\x92\xc4\x03'*%signal_with_start_request.workflow_id\"\x9a\x01\n" + + "(SignalWithStartWorkflowExecutionResponse\x12\x15\n" + + "\x06run_id\x18\x01 \x01(\tR\x05runId\x12\x18\n" + + "\astarted\x18\x02 \x01(\bR\astarted\x12=\n" + + "\vsignal_link\x18\x03 \x01(\v2\x1c.temporal.api.common.v1.LinkR\n" + + "signalLink\"\xe3\x01\n" + + "\x1fRemoveSignalMutableStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12\x1d\n" + + "\n" + + "request_id\x18\x03 \x01(\tR\trequestId:$\x92\xc4\x03 *\x1eworkflow_execution.workflow_id\"\"\n" + + " RemoveSignalMutableStateResponse\"\x8a\x03\n" + + "!TerminateWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12o\n" + + "\x11terminate_request\x18\x02 \x01(\v2B.temporal.api.workflowservice.v1.TerminateWorkflowExecutionRequestR\x10terminateRequest\x12i\n" + + "\x1bexternal_workflow_execution\x18\x03 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x19externalWorkflowExecution\x12.\n" + + "\x13child_workflow_only\x18\x04 \x01(\bR\x11childWorkflowOnly:6\x92\xc4\x032*0terminate_request.workflow_execution.workflow_id\"$\n" + + "\"TerminateWorkflowExecutionResponse\"\xfb\x01\n" + + "\x1eDeleteWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x120\n" + + "\x14closed_workflow_only\x18\x04 \x01(\bR\x12closedWorkflowOnly:$\x92\xc4\x03 *\x1eworkflow_execution.workflow_idJ\x04\b\x03\x10\x04\"!\n" + + "\x1fDeleteWorkflowExecutionResponse\"\xdb\x01\n" + + "\x1dResetWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12c\n" + + "\rreset_request\x18\x02 \x01(\v2>.temporal.api.workflowservice.v1.ResetWorkflowExecutionRequestR\fresetRequest:2\x92\xc4\x03.*,reset_request.workflow_execution.workflow_id\"7\n" + + "\x1eResetWorkflowExecutionResponse\x12\x15\n" + + "\x06run_id\x18\x01 \x01(\tR\x05runId\"\xc8\x03\n" + + "%RequestCancelWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12m\n" + + "\x0ecancel_request\x18\x02 \x01(\v2F.temporal.api.workflowservice.v1.RequestCancelWorkflowExecutionRequestR\rcancelRequest\x12=\n" + + "\x1bexternal_initiated_event_id\x18\x03 \x01(\x03R\x18externalInitiatedEventId\x12i\n" + + "\x1bexternal_workflow_execution\x18\x04 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x19externalWorkflowExecution\x12.\n" + + "\x13child_workflow_only\x18\x05 \x01(\bR\x11childWorkflowOnly:3\x92\xc4\x03/*-cancel_request.workflow_execution.workflow_id\"(\n" + + "&RequestCancelWorkflowExecutionResponse\"\x8f\x03\n" + + "\x1bScheduleWorkflowTaskRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x123\n" + + "\x16is_first_workflow_task\x18\x03 \x01(\bR\x13isFirstWorkflowTask\x12J\n" + + "\vchild_clock\x18\x04 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\n" + + "childClock\x12L\n" + + "\fparent_clock\x18\x05 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\vparentClock:$\x92\xc4\x03 *\x1eworkflow_execution.workflow_id\"\x1e\n" + + "\x1cScheduleWorkflowTaskResponse\"\x8d\x02\n" + + "'VerifyFirstWorkflowTaskScheduledRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12?\n" + + "\x05clock\x18\x03 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock:$\x92\xc4\x03 *\x1eworkflow_execution.workflow_id\"*\n" + + "(VerifyFirstWorkflowTaskScheduledResponse\"\xd4\x04\n" + + "$RecordChildExecutionCompletedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12T\n" + + "\x10parent_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x0fparentExecution\x12.\n" + + "\x13parent_initiated_id\x18\x03 \x01(\x03R\x11parentInitiatedId\x12R\n" + + "\x0fchild_execution\x18\x04 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x0echildExecution\x12P\n" + + "\x10completion_event\x18\x05 \x01(\v2%.temporal.api.history.v1.HistoryEventR\x0fcompletionEvent\x12?\n" + + "\x05clock\x18\x06 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x128\n" + + "\x18parent_initiated_version\x18\a \x01(\x03R\x16parentInitiatedVersion\x12>\n" + + "\x1cchild_first_execution_run_id\x18\b \x01(\tR\x18childFirstExecutionRunId:\"\x92\xc4\x03\x1e*\x1cparent_execution.workflow_id\"'\n" + + "%RecordChildExecutionCompletedResponse\"\xf0\x03\n" + + "-VerifyChildExecutionCompletionRecordedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12T\n" + + "\x10parent_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x0fparentExecution\x12R\n" + + "\x0fchild_execution\x18\x03 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x0echildExecution\x12.\n" + + "\x13parent_initiated_id\x18\x04 \x01(\x03R\x11parentInitiatedId\x128\n" + + "\x18parent_initiated_version\x18\x05 \x01(\x03R\x16parentInitiatedVersion\x12?\n" + + "\x05clock\x18\x06 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12#\n" + + "\rresend_parent\x18\a \x01(\bR\fresendParent:\"\x92\xc4\x03\x1e*\x1cparent_execution.workflow_id\"0\n" + + ".VerifyChildExecutionCompletionRecordedResponse\"\xc7\x01\n" + + " DescribeWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12[\n" + + "\arequest\x18\x02 \x01(\v2A.temporal.api.workflowservice.v1.DescribeWorkflowExecutionRequestR\arequest:#\x92\xc4\x03\x1f*\x1drequest.execution.workflow_id\"\xb3\x06\n" + + "!DescribeWorkflowExecutionResponse\x12\\\n" + + "\x10execution_config\x18\x01 \x01(\v21.temporal.api.workflow.v1.WorkflowExecutionConfigR\x0fexecutionConfig\x12g\n" + + "\x17workflow_execution_info\x18\x02 \x01(\v2/.temporal.api.workflow.v1.WorkflowExecutionInfoR\x15workflowExecutionInfo\x12\\\n" + + "\x12pending_activities\x18\x03 \x03(\v2-.temporal.api.workflow.v1.PendingActivityInfoR\x11pendingActivities\x12^\n" + + "\x10pending_children\x18\x04 \x03(\v23.temporal.api.workflow.v1.PendingChildExecutionInfoR\x0fpendingChildren\x12e\n" + + "\x15pending_workflow_task\x18\x05 \x01(\v21.temporal.api.workflow.v1.PendingWorkflowTaskInfoR\x13pendingWorkflowTask\x12D\n" + + "\tcallbacks\x18\x06 \x03(\v2&.temporal.api.workflow.v1.CallbackInfoR\tcallbacks\x12m\n" + + "\x18pending_nexus_operations\x18\a \x03(\v23.temporal.api.workflow.v1.PendingNexusOperationInfoR\x16pendingNexusOperations\x12m\n" + + "\x16workflow_extended_info\x18\b \x01(\v27.temporal.api.workflow.v1.WorkflowExecutionExtendedInfoR\x14workflowExtendedInfo\"\xa9\x04\n" + + "\x18ReplicateEventsV2Request\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12f\n" + + "\x15version_history_items\x18\x03 \x03(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x13versionHistoryItems\x128\n" + + "\x06events\x18\x04 \x01(\v2 .temporal.api.common.v1.DataBlobR\x06events\x12F\n" + + "\x0enew_run_events\x18\x05 \x01(\v2 .temporal.api.common.v1.DataBlobR\fnewRunEvents\x12b\n" + + "\x13base_execution_info\x18\x06 \x01(\v22.temporal.server.api.workflow.v1.BaseExecutionInfoR\x11baseExecutionInfo\x12\x1c\n" + + "\n" + + "new_run_id\x18\a \x01(\tR\bnewRunId:$\x92\xc4\x03 *\x1eworkflow_execution.workflow_id\"\x1b\n" + + "\x19ReplicateEventsV2Response\"\xed\x02\n" + + "\x1dReplicateWorkflowStateRequest\x12_\n" + + "\x0eworkflow_state\x18\x01 \x01(\v28.temporal.server.api.persistence.v1.WorkflowMutableStateR\rworkflowState\x12%\n" + + "\x0eremote_cluster\x18\x02 \x01(\tR\rremoteCluster\x12!\n" + + "\fnamespace_id\x18\x03 \x01(\tR\vnamespaceId\x120\n" + + "\x14is_force_replication\x18\x04 \x01(\bR\x12isForceReplication\x12>\n" + + "\x1cis_close_transfer_task_acked\x18\x05 \x01(\bR\x18isCloseTransferTaskAcked:/\x92\xc4\x03+*)workflow_state.execution_info.workflow_id\" \n" + + "\x1eReplicateWorkflowStateResponse\"\xa7\x01\n" + + "\x16SyncShardStatusRequest\x12%\n" + + "\x0esource_cluster\x18\x01 \x01(\tR\rsourceCluster\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12;\n" + + "\vstatus_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "statusTime:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"\x19\n" + + "\x17SyncShardStatusResponse\"\xd1\v\n" + + "\x13SyncActivityRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12\x18\n" + + "\aversion\x18\x04 \x01(\x03R\aversion\x12,\n" + + "\x12scheduled_event_id\x18\x05 \x01(\x03R\x10scheduledEventId\x12A\n" + + "\x0escheduled_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12(\n" + + "\x10started_event_id\x18\a \x01(\x03R\x0estartedEventId\x12=\n" + + "\fstarted_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12J\n" + + "\x13last_heartbeat_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\x11lastHeartbeatTime\x12:\n" + + "\adetails\x18\n" + + " \x01(\v2 .temporal.api.common.v1.PayloadsR\adetails\x12\x18\n" + + "\aattempt\x18\v \x01(\x05R\aattempt\x12C\n" + + "\flast_failure\x18\f \x01(\v2 .temporal.api.failure.v1.FailureR\vlastFailure\x120\n" + + "\x14last_worker_identity\x18\r \x01(\tR\x12lastWorkerIdentity\x12W\n" + + "\x0fversion_history\x18\x0e \x01(\v2..temporal.server.api.history.v1.VersionHistoryR\x0eversionHistory\x12b\n" + + "\x13base_execution_info\x18\x0f \x01(\v22.temporal.server.api.workflow.v1.BaseExecutionInfoR\x11baseExecutionInfo\x121\n" + + "\x15last_started_build_id\x18\x10 \x01(\tR\x12lastStartedBuildId\x12A\n" + + "\x1dlast_started_redirect_counter\x18\x11 \x01(\x03R\x1alastStartedRedirectCounter\x12L\n" + + "\x14first_scheduled_time\x18\x12 \x01(\v2\x1a.google.protobuf.TimestampR\x12firstScheduledTime\x12W\n" + + "\x1alast_attempt_complete_time\x18\x13 \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12\x14\n" + + "\x05stamp\x18\x14 \x01(\x05R\x05stamp\x12\x16\n" + + "\x06paused\x18\x15 \x01(\bR\x06paused\x12O\n" + + "\x16retry_initial_interval\x18\x16 \x01(\v2\x19.google.protobuf.DurationR\x14retryInitialInterval\x12O\n" + + "\x16retry_maximum_interval\x18\x17 \x01(\v2\x19.google.protobuf.DurationR\x14retryMaximumInterval\x124\n" + + "\x16retry_maximum_attempts\x18\x18 \x01(\x05R\x14retryMaximumAttempts\x12:\n" + + "\x19retry_backoff_coefficient\x18\x19 \x01(\x01R\x17retryBackoffCoefficient\x12#\n" + + "\rstart_version\x18\x1a \x01(\x03R\fstartVersion:\x11\x92\xc4\x03\r*\vworkflow_id\"\xe7\x01\n" + + "\x15SyncActivitiesRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12`\n" + + "\x0factivities_info\x18\x04 \x03(\v27.temporal.server.api.historyservice.v1.ActivitySyncInfoR\x0eactivitiesInfo:\x11\x92\xc4\x03\r*\vworkflow_id\"\xfc\t\n" + + "\x10ActivitySyncInfo\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x12,\n" + + "\x12scheduled_event_id\x18\x02 \x01(\x03R\x10scheduledEventId\x12A\n" + + "\x0escheduled_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12(\n" + + "\x10started_event_id\x18\x04 \x01(\x03R\x0estartedEventId\x12=\n" + + "\fstarted_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12J\n" + + "\x13last_heartbeat_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x11lastHeartbeatTime\x12:\n" + + "\adetails\x18\a \x01(\v2 .temporal.api.common.v1.PayloadsR\adetails\x12\x18\n" + + "\aattempt\x18\b \x01(\x05R\aattempt\x12C\n" + + "\flast_failure\x18\t \x01(\v2 .temporal.api.failure.v1.FailureR\vlastFailure\x120\n" + + "\x14last_worker_identity\x18\n" + + " \x01(\tR\x12lastWorkerIdentity\x12W\n" + + "\x0fversion_history\x18\v \x01(\v2..temporal.server.api.history.v1.VersionHistoryR\x0eversionHistory\x121\n" + + "\x15last_started_build_id\x18\f \x01(\tR\x12lastStartedBuildId\x12A\n" + + "\x1dlast_started_redirect_counter\x18\r \x01(\x03R\x1alastStartedRedirectCounter\x12L\n" + + "\x14first_scheduled_time\x18\x12 \x01(\v2\x1a.google.protobuf.TimestampR\x12firstScheduledTime\x12W\n" + + "\x1alast_attempt_complete_time\x18\x13 \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12\x14\n" + + "\x05stamp\x18\x14 \x01(\x05R\x05stamp\x12\x16\n" + + "\x06paused\x18\x15 \x01(\bR\x06paused\x12O\n" + + "\x16retry_initial_interval\x18\x16 \x01(\v2\x19.google.protobuf.DurationR\x14retryInitialInterval\x12O\n" + + "\x16retry_maximum_interval\x18\x17 \x01(\v2\x19.google.protobuf.DurationR\x14retryMaximumInterval\x124\n" + + "\x16retry_maximum_attempts\x18\x18 \x01(\x05R\x14retryMaximumAttempts\x12:\n" + + "\x19retry_backoff_coefficient\x18\x19 \x01(\x01R\x17retryBackoffCoefficient\x12#\n" + + "\rstart_version\x18\x1a \x01(\x03R\fstartVersion\"\x16\n" + + "\x14SyncActivityResponse\"\xf5\x01\n" + + "\x1bDescribeMutableStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12*\n" + + "\x11skip_force_reload\x18\x03 \x01(\bR\x0fskipForceReload\x12!\n" + + "\farchetype_id\x18\x04 \x01(\rR\varchetypeId:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"\xf8\x01\n" + + "\x1cDescribeMutableStateResponse\x12h\n" + + "\x13cache_mutable_state\x18\x01 \x01(\v28.temporal.server.api.persistence.v1.WorkflowMutableStateR\x11cacheMutableState\x12n\n" + + "\x16database_mutable_state\x18\x02 \x01(\v28.temporal.server.api.persistence.v1.WorkflowMutableStateR\x14databaseMutableState\"\xdf\x01\n" + + "\x1aDescribeHistoryHostRequest\x12!\n" + + "\fhost_address\x18\x01 \x01(\tR\vhostAddress\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12!\n" + + "\fnamespace_id\x18\x03 \x01(\tR\vnamespaceId\x12X\n" + + "\x12workflow_execution\x18\x04 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution:\x06\x92\xc4\x03\x02\b\x01\"\xde\x01\n" + + "\x1bDescribeHistoryHostResponse\x12#\n" + + "\rshards_number\x18\x01 \x01(\x05R\fshardsNumber\x12\x1b\n" + + "\tshard_ids\x18\x02 \x03(\x05R\bshardIds\x12]\n" + + "\x0fnamespace_cache\x18\x03 \x01(\v24.temporal.server.api.namespace.v1.NamespaceCacheInfoR\x0enamespaceCache\x12\x18\n" + + "\aaddress\x18\x05 \x01(\tR\aaddressJ\x04\b\x04\x10\x05\">\n" + + "\x11CloseShardRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"\x14\n" + + "\x12CloseShardResponse\"<\n" + + "\x0fGetShardRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"`\n" + + "\x10GetShardResponse\x12L\n" + + "\n" + + "shard_info\x18\x01 \x01(\v2-.temporal.server.api.persistence.v1.ShardInfoR\tshardInfo\"\xb8\x01\n" + + "\x11RemoveTaskRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x12\x1a\n" + + "\bcategory\x18\x02 \x01(\x05R\bcategory\x12\x17\n" + + "\atask_id\x18\x03 \x01(\x03R\x06taskId\x12C\n" + + "\x0fvisibility_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"\x14\n" + + "\x12RemoveTaskResponse\"\x98\x01\n" + + "\x1dGetReplicationMessagesRequest\x12L\n" + + "\x06tokens\x18\x01 \x03(\v24.temporal.server.api.replication.v1.ReplicationTokenR\x06tokens\x12!\n" + + "\fcluster_name\x18\x02 \x01(\tR\vclusterName:\x06\x92\xc4\x03\x02\b\x01\"\x9c\x02\n" + + "\x1eGetReplicationMessagesResponse\x12\x7f\n" + + "\x0eshard_messages\x18\x01 \x03(\v2X.temporal.server.api.historyservice.v1.GetReplicationMessagesResponse.ShardMessagesEntryR\rshardMessages\x1ay\n" + + "\x12ShardMessagesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x05R\x03key\x12M\n" + + "\x05value\x18\x02 \x01(\v27.temporal.server.api.replication.v1.ReplicationMessagesR\x05value:\x028\x01\"\x8c\x01\n" + + " GetDLQReplicationMessagesRequest\x12V\n" + + "\n" + + "task_infos\x18\x01 \x03(\v27.temporal.server.api.replication.v1.ReplicationTaskInfoR\ttaskInfos:\x10\x92\xc4\x03\f:\n" + + "task_infos\"\x85\x01\n" + + "!GetDLQReplicationMessagesResponse\x12`\n" + + "\x11replication_tasks\x18\x01 \x03(\v23.temporal.server.api.replication.v1.ReplicationTaskR\x10replicationTasks\"\xaf\x01\n" + + "\x14QueryWorkflowRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12O\n" + + "\arequest\x18\x02 \x01(\v25.temporal.api.workflowservice.v1.QueryWorkflowRequestR\arequest:#\x92\xc4\x03\x1f*\x1drequest.execution.workflow_id\"k\n" + + "\x15QueryWorkflowResponse\x12R\n" + + "\bresponse\x18\x01 \x01(\v26.temporal.api.workflowservice.v1.QueryWorkflowResponseR\bresponse\"\xbc\x01\n" + + "\x14ReapplyEventsRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12S\n" + + "\arequest\x18\x02 \x01(\v29.temporal.server.api.adminservice.v1.ReapplyEventsRequestR\arequest:,\x92\xc4\x03(*&request.workflow_execution.workflow_id\"\x17\n" + + "\x15ReapplyEventsResponse\"\xbd\x02\n" + + "\x15GetDLQMessagesRequest\x12E\n" + + "\x04type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.DeadLetterQueueTypeR\x04type\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12%\n" + + "\x0esource_cluster\x18\x03 \x01(\tR\rsourceCluster\x127\n" + + "\x18inclusive_end_message_id\x18\x04 \x01(\x03R\x15inclusiveEndMessageId\x12*\n" + + "\x11maximum_page_size\x18\x05 \x01(\x05R\x0fmaximumPageSize\x12&\n" + + "\x0fnext_page_token\x18\x06 \x01(\fR\rnextPageToken:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"\xd8\x02\n" + + "\x16GetDLQMessagesResponse\x12E\n" + + "\x04type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.DeadLetterQueueTypeR\x04type\x12`\n" + + "\x11replication_tasks\x18\x02 \x03(\v23.temporal.server.api.replication.v1.ReplicationTaskR\x10replicationTasks\x12&\n" + + "\x0fnext_page_token\x18\x03 \x01(\fR\rnextPageToken\x12m\n" + + "\x16replication_tasks_info\x18\x04 \x03(\v27.temporal.server.api.replication.v1.ReplicationTaskInfoR\x14replicationTasksInfo\"\xeb\x01\n" + + "\x17PurgeDLQMessagesRequest\x12E\n" + + "\x04type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.DeadLetterQueueTypeR\x04type\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12%\n" + + "\x0esource_cluster\x18\x03 \x01(\tR\rsourceCluster\x127\n" + + "\x18inclusive_end_message_id\x18\x04 \x01(\x03R\x15inclusiveEndMessageId:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"\x1a\n" + + "\x18PurgeDLQMessagesResponse\"\xbf\x02\n" + + "\x17MergeDLQMessagesRequest\x12E\n" + + "\x04type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.DeadLetterQueueTypeR\x04type\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12%\n" + + "\x0esource_cluster\x18\x03 \x01(\tR\rsourceCluster\x127\n" + + "\x18inclusive_end_message_id\x18\x04 \x01(\x03R\x15inclusiveEndMessageId\x12*\n" + + "\x11maximum_page_size\x18\x05 \x01(\x05R\x0fmaximumPageSize\x12&\n" + + "\x0fnext_page_token\x18\x06 \x01(\fR\rnextPageToken:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"B\n" + + "\x18MergeDLQMessagesResponse\x12&\n" + + "\x0fnext_page_token\x18\x01 \x01(\fR\rnextPageToken\"\xe4\x01\n" + + "\x1bRefreshWorkflowTasksRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12!\n" + + "\farchetype_id\x18\x03 \x01(\rR\varchetypeId\x12Z\n" + + "\arequest\x18\x02 \x01(\v2@.temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequestR\arequest:#\x92\xc4\x03\x1f*\x1drequest.execution.workflow_id\"\x1e\n" + + "\x1cRefreshWorkflowTasksResponse\"\x81\x02\n" + + "*GenerateLastHistoryReplicationTasksRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12'\n" + + "\x0ftarget_clusters\x18\x03 \x03(\tR\x0etargetClusters\x12!\n" + + "\farchetype_id\x18\x04 \x01(\rR\varchetypeId:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"\x8a\x01\n" + + "+GenerateLastHistoryReplicationTasksResponse\x124\n" + + "\x16state_transition_count\x18\x01 \x01(\x03R\x14stateTransitionCount\x12%\n" + + "\x0ehistory_length\x18\x02 \x01(\x03R\rhistoryLength\"N\n" + + "\x1bGetReplicationStatusRequest\x12'\n" + + "\x0fremote_clusters\x18\x01 \x03(\tR\x0eremoteClusters:\x06\x92\xc4\x03\x02\b\x01\"u\n" + + "\x1cGetReplicationStatusResponse\x12U\n" + + "\x06shards\x18\x01 \x03(\v2=.temporal.server.api.historyservice.v1.ShardReplicationStatusR\x06shards\"\xb4\x06\n" + + "\x16ShardReplicationStatus\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x125\n" + + "\x17max_replication_task_id\x18\x02 \x01(\x03R\x14maxReplicationTaskId\x12D\n" + + "\x10shard_local_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x0eshardLocalTime\x12z\n" + + "\x0fremote_clusters\x18\x04 \x03(\v2Q.temporal.server.api.historyservice.v1.ShardReplicationStatus.RemoteClustersEntryR\x0eremoteClusters\x12\x86\x01\n" + + "\x13handover_namespaces\x18\x05 \x03(\v2U.temporal.server.api.historyservice.v1.ShardReplicationStatus.HandoverNamespacesEntryR\x12handoverNamespaces\x12j\n" + + "$max_replication_task_visibility_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR maxReplicationTaskVisibilityTime\x1a\x8a\x01\n" + + "\x13RemoteClustersEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12]\n" + + "\x05value\x18\x02 \x01(\v2G.temporal.server.api.historyservice.v1.ShardReplicationStatusPerClusterR\x05value:\x028\x01\x1a\x83\x01\n" + + "\x17HandoverNamespacesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12R\n" + + "\x05value\x18\x02 \x01(\v2<.temporal.server.api.historyservice.v1.HandoverNamespaceInfoR\x05value:\x028\x01\"X\n" + + "\x15HandoverNamespaceInfo\x12?\n" + + "\x1chandover_replication_task_id\x18\x01 \x01(\x03R\x19handoverReplicationTaskId\"\x9f\x01\n" + + " ShardReplicationStatusPerCluster\x12\"\n" + + "\racked_task_id\x18\x01 \x01(\x03R\vackedTaskId\x12W\n" + + "\x1aacked_task_visibility_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x17ackedTaskVisibilityTime\"\xa5\x01\n" + + "\x1aRebuildMutableStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"\x1d\n" + + "\x1bRebuildMutableStateResponse\"\xe3\x02\n" + + "\x1eImportWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12I\n" + + "\x0fhistory_batches\x18\x03 \x03(\v2 .temporal.api.common.v1.DataBlobR\x0ehistoryBatches\x12W\n" + + "\x0fversion_history\x18\x04 \x01(\v2..temporal.server.api.history.v1.VersionHistoryR\x0eversionHistory\x12\x14\n" + + "\x05token\x18\x05 \x01(\fR\x05token:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"^\n" + + "\x1fImportWorkflowExecutionResponse\x12\x14\n" + + "\x05token\x18\x01 \x01(\fR\x05token\x12%\n" + + "\x0eevents_applied\x18\x02 \x01(\bR\reventsApplied\"\xc8\x02\n" + + "%DeleteWorkflowVisibilityRecordRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12J\n" + + "\x13workflow_start_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x11workflowStartTime\x12J\n" + + "\x13workflow_close_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x11workflowCloseTime:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"(\n" + + "&DeleteWorkflowVisibilityRecordResponse\"\xcc\x01\n" + + "\x1eUpdateWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12Y\n" + + "\arequest\x18\x02 \x01(\v2?.temporal.api.workflowservice.v1.UpdateWorkflowExecutionRequestR\arequest:,\x92\xc4\x03(*&request.workflow_execution.workflow_id\"\x7f\n" + + "\x1fUpdateWorkflowExecutionResponse\x12\\\n" + + "\bresponse\x18\x01 \x01(\v2@.temporal.api.workflowservice.v1.UpdateWorkflowExecutionResponseR\bresponse\"\xb2\x01\n" + + "(StreamWorkflowReplicationMessagesRequest\x12p\n" + + "\x16sync_replication_state\x18\x01 \x01(\v28.temporal.server.api.replication.v1.SyncReplicationStateH\x00R\x14syncReplicationState:\x06\x92\xc4\x03\x02\b\x01B\f\n" + + "\n" + + "attributes\"\x98\x01\n" + + ")StreamWorkflowReplicationMessagesResponse\x12]\n" + + "\bmessages\x18\x01 \x01(\v2?.temporal.server.api.replication.v1.WorkflowReplicationMessagesH\x00R\bmessagesB\f\n" + + "\n" + + "attributes\"\xdf\x01\n" + + "\"PollWorkflowExecutionUpdateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12]\n" + + "\arequest\x18\x02 \x01(\v2C.temporal.api.workflowservice.v1.PollWorkflowExecutionUpdateRequestR\arequest:7\x92\xc4\x033*1request.update_ref.workflow_execution.workflow_id\"\x87\x01\n" + + "#PollWorkflowExecutionUpdateResponse\x12`\n" + + "\bresponse\x18\x01 \x01(\v2D.temporal.api.workflowservice.v1.PollWorkflowExecutionUpdateResponseR\bresponse\"\xcb\x01\n" + + "\"GetWorkflowExecutionHistoryRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12]\n" + + "\arequest\x18\x02 \x01(\v2C.temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryRequestR\arequest:#\x92\xc4\x03\x1f*\x1drequest.execution.workflow_id\"\xc3\x01\n" + + "#GetWorkflowExecutionHistoryResponse\x12`\n" + + "\bresponse\x18\x01 \x01(\v2D.temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryResponseR\bresponse\x12:\n" + + "\ahistory\x18\x02 \x01(\v2 .temporal.api.history.v1.HistoryR\ahistory\"\xa8\x01\n" + + "*GetWorkflowExecutionHistoryResponseWithRaw\x12`\n" + + "\bresponse\x18\x01 \x01(\v2D.temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryResponseR\bresponse\x12\x18\n" + + "\ahistory\x18\x02 \x03(\fR\ahistory\"\xd9\x01\n" + + ")GetWorkflowExecutionHistoryReverseRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12d\n" + + "\arequest\x18\x02 \x01(\v2J.temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryReverseRequestR\arequest:#\x92\xc4\x03\x1f*\x1drequest.execution.workflow_id\"\x95\x01\n" + + "*GetWorkflowExecutionHistoryReverseResponse\x12g\n" + + "\bresponse\x18\x01 \x01(\v2K.temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryReverseResponseR\bresponse\"\xd9\x01\n" + + "'GetWorkflowExecutionRawHistoryV2Request\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12f\n" + + "\arequest\x18\x02 \x01(\v2L.temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2RequestR\arequest:#\x92\xc4\x03\x1f*\x1drequest.execution.workflow_id\"\x95\x01\n" + + "(GetWorkflowExecutionRawHistoryV2Response\x12i\n" + + "\bresponse\x18\x01 \x01(\v2M.temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2ResponseR\bresponse\"\xd5\x01\n" + + "%GetWorkflowExecutionRawHistoryRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12d\n" + + "\arequest\x18\x02 \x01(\v2J.temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequestR\arequest:#\x92\xc4\x03\x1f*\x1drequest.execution.workflow_id\"\x91\x01\n" + + "&GetWorkflowExecutionRawHistoryResponse\x12g\n" + + "\bresponse\x18\x01 \x01(\v2K.temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponseR\bresponse\"\xef\x01\n" + + "#ForceDeleteWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12!\n" + + "\farchetype_id\x18\x03 \x01(\rR\varchetypeId\x12]\n" + + "\arequest\x18\x02 \x01(\v2C.temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequestR\arequest:#\x92\xc4\x03\x1f*\x1drequest.execution.workflow_id\"\x88\x01\n" + + "$ForceDeleteWorkflowExecutionResponse\x12`\n" + + "\bresponse\x18\x01 \x01(\v2D.temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponseR\bresponse\"\xa8\x01\n" + + "\x12GetDLQTasksRequest\x12E\n" + + "\adlq_key\x18\x01 \x01(\v2,.temporal.server.api.common.v1.HistoryDLQKeyR\x06dlqKey\x12\x1b\n" + + "\tpage_size\x18\x02 \x01(\x05R\bpageSize\x12&\n" + + "\x0fnext_page_token\x18\x03 \x01(\fR\rnextPageToken:\x06\x92\xc4\x03\x02\x10\x01\"\x89\x01\n" + + "\x13GetDLQTasksResponse\x12J\n" + + "\tdlq_tasks\x18\x01 \x03(\v2-.temporal.server.api.common.v1.HistoryDLQTaskR\bdlqTasks\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\"\xdc\x01\n" + + "\x15DeleteDLQTasksRequest\x12E\n" + + "\adlq_key\x18\x01 \x01(\v2,.temporal.server.api.common.v1.HistoryDLQKeyR\x06dlqKey\x12t\n" + + "\x1binclusive_max_task_metadata\x18\x02 \x01(\v25.temporal.server.api.common.v1.HistoryDLQTaskMetadataR\x18inclusiveMaxTaskMetadata:\x06\x92\xc4\x03\x02\x10\x01\"C\n" + + "\x16DeleteDLQTasksResponse\x12)\n" + + "\x10messages_deleted\x18\x01 \x01(\x03R\x0fmessagesDeleted\"\x7f\n" + + "\x11ListQueuesRequest\x12\x1d\n" + + "\n" + + "queue_type\x18\x01 \x01(\x05R\tqueueType\x12\x1b\n" + + "\tpage_size\x18\x02 \x01(\x05R\bpageSize\x12&\n" + + "\x0fnext_page_token\x18\x03 \x01(\fR\rnextPageToken:\x06\x92\xc4\x03\x02\x10\x01\"\x92\x02\n" + + "\x12ListQueuesResponse\x12[\n" + + "\x06queues\x18\x01 \x03(\v2C.temporal.server.api.historyservice.v1.ListQueuesResponse.QueueInfoR\x06queues\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\x1aw\n" + + "\tQueueInfo\x12\x1d\n" + + "\n" + + "queue_name\x18\x01 \x01(\tR\tqueueName\x12#\n" + + "\rmessage_count\x18\x02 \x01(\x03R\fmessageCount\x12&\n" + + "\x0flast_message_id\x18\x03 \x01(\x03R\rlastMessageId\"\xee\x01\n" + + "\x0fAddTasksRequest\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x12Q\n" + + "\x05tasks\x18\x02 \x03(\v2;.temporal.server.api.historyservice.v1.AddTasksRequest.TaskR\x05tasks\x1a]\n" + + "\x04Task\x12\x1f\n" + + "\vcategory_id\x18\x01 \x01(\x05R\n" + + "categoryId\x124\n" + + "\x04blob\x18\x02 \x01(\v2 .temporal.api.common.v1.DataBlobR\x04blob:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"\x12\n" + + "\x10AddTasksResponse\"\x82\x01\n" + + "\x10ListTasksRequest\x12V\n" + + "\arequest\x18\x01 \x01(\v2<.temporal.server.api.adminservice.v1.ListHistoryTasksRequestR\arequest:\x16\x92\xc4\x03\x12\x1a\x10request.shard_id\"n\n" + + "\x11ListTasksResponse\x12Y\n" + + "\bresponse\x18\x01 \x01(\v2=.temporal.server.api.adminservice.v1.ListHistoryTasksResponseR\bresponse\"\xf5\x03\n" + + "\"CompleteNexusOperationChasmRequest\x12V\n" + + "\n" + + "completion\x18\x01 \x01(\v26.temporal.server.api.token.v1.NexusOperationCompletionR\n" + + "completion\x12;\n" + + "\asuccess\x18\x02 \x01(\v2\x1f.temporal.api.common.v1.PayloadH\x00R\asuccess\x12<\n" + + "\afailure\x18\x03 \x01(\v2 .temporal.api.failure.v1.FailureH\x00R\afailure\x129\n" + + "\n" + + "close_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTime\x122\n" + + "\x05links\x18\x05 \x03(\v2\x1c.temporal.api.common.v1.LinkR\x05links\x12'\n" + + "\x0foperation_token\x18\x06 \x01(\tR\x0eoperationToken\x129\n" + + "\n" + + "start_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime:\x1e\x92\xc4\x03\x1aB\x18completion.component_refB\t\n" + + "\aoutcome\"%\n" + + "#CompleteNexusOperationChasmResponse\"\xe0\x03\n" + + "\x1dCompleteNexusOperationRequest\x12V\n" + + "\n" + + "completion\x18\x01 \x01(\v26.temporal.server.api.token.v1.NexusOperationCompletionR\n" + + "completion\x12\x14\n" + + "\x05state\x18\x02 \x01(\tR\x05state\x12;\n" + + "\asuccess\x18\x03 \x01(\v2\x1f.temporal.api.common.v1.PayloadH\x00R\asuccess\x12:\n" + + "\afailure\x18\x04 \x01(\v2\x1e.temporal.api.nexus.v1.FailureH\x00R\afailure\x12'\n" + + "\x0foperation_token\x18\x05 \x01(\tR\x0eoperationToken\x129\n" + + "\n" + + "start_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x122\n" + + "\x05links\x18\a \x03(\v2\x1c.temporal.api.common.v1.LinkR\x05links:5\x92\xc4\x031\"\x17completion.namespace_id*\x16completion.workflow_idB\t\n" + + "\aoutcome\" \n" + + "\x1eCompleteNexusOperationResponse\"\x8d\x02\n" + + "\x1fInvokeStateMachineMethodRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12E\n" + + "\x03ref\x18\x04 \x01(\v23.temporal.server.api.persistence.v1.StateMachineRefR\x03ref\x12\x1f\n" + + "\vmethod_name\x18\x05 \x01(\tR\n" + + "methodName\x12\x14\n" + + "\x05input\x18\x06 \x01(\fR\x05input:\x11\x92\xc4\x03\r*\vworkflow_id\":\n" + + " InvokeStateMachineMethodResponse\x12\x16\n" + + "\x06output\x18\x01 \x01(\fR\x06output\"C\n" + + "\x16DeepHealthCheckRequest\x12!\n" + + "\fhost_address\x18\x01 \x01(\tR\vhostAddress:\x06\x92\xc4\x03\x02\b\x01\"\x9e\x01\n" + + "\x17DeepHealthCheckResponse\x12?\n" + + "\x05state\x18\x01 \x01(\x0e2).temporal.server.api.enums.v1.HealthStateR\x05state\x12B\n" + + "\x06checks\x18\x02 \x03(\v2*.temporal.server.api.health.v1.HealthCheckR\x06checks\"\xbd\x03\n" + + "\x18SyncWorkflowStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12j\n" + + "\x14versioned_transition\x18\x03 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransition\x12]\n" + + "\x11version_histories\x18\x04 \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistories\x12*\n" + + "\x11target_cluster_id\x18\x05 \x01(\x05R\x0ftargetClusterId\x12!\n" + + "\farchetype_id\x18\x06 \x01(\rR\varchetypeId:\x1b\x92\xc4\x03\x17*\x15execution.workflow_id\"\xb9\x01\n" + + "\x19SyncWorkflowStateResponse\x12\x83\x01\n" + + "\x1dversioned_transition_artifact\x18\x05 \x01(\v2?.temporal.server.api.replication.v1.VersionedTransitionArtifactR\x1bversionedTransitionArtifactJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03J\x04\b\x03\x10\x04J\x04\b\x04\x10\x05\"\xd3\x01\n" + + "\x1cUpdateActivityOptionsRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12d\n" + + "\x0eupdate_request\x18\x02 \x01(\v2=.temporal.api.workflowservice.v1.UpdateActivityOptionsRequestR\rupdateRequest:*\x92\xc4\x03&*$update_request.execution.workflow_id\"u\n" + + "\x1dUpdateActivityOptionsResponse\x12T\n" + + "\x10activity_options\x18\x01 \x01(\v2).temporal.api.activity.v1.ActivityOptionsR\x0factivityOptions\"\xc9\x01\n" + + "\x14PauseActivityRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12`\n" + + "\x10frontend_request\x18\x02 \x01(\v25.temporal.api.workflowservice.v1.PauseActivityRequestR\x0ffrontendRequest:,\x92\xc4\x03(*&frontend_request.execution.workflow_id\"\x17\n" + + "\x15PauseActivityResponse\"\xcd\x01\n" + + "\x16UnpauseActivityRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12b\n" + + "\x10frontend_request\x18\x02 \x01(\v27.temporal.api.workflowservice.v1.UnpauseActivityRequestR\x0ffrontendRequest:,\x92\xc4\x03(*&frontend_request.execution.workflow_id\"\x19\n" + + "\x17UnpauseActivityResponse\"\xc9\x01\n" + + "\x14ResetActivityRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12`\n" + + "\x10frontend_request\x18\x02 \x01(\v25.temporal.api.workflowservice.v1.ResetActivityRequestR\x0ffrontendRequest:,\x92\xc4\x03(*&frontend_request.execution.workflow_id\"\x17\n" + + "\x15ResetActivityResponse\"\xee\x01\n" + + "%UpdateWorkflowExecutionOptionsRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12m\n" + + "\x0eupdate_request\x18\x02 \x01(\v2F.temporal.api.workflowservice.v1.UpdateWorkflowExecutionOptionsRequestR\rupdateRequest:3\x92\xc4\x03/*-update_request.workflow_execution.workflow_id\"\x9a\x01\n" + + "&UpdateWorkflowExecutionOptionsResponse\x12p\n" + + "\x1aworkflow_execution_options\x18\x01 \x01(\v22.temporal.api.workflow.v1.WorkflowExecutionOptionsR\x18workflowExecutionOptions\"\xc8\x01\n" + + "\x1dPauseWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12c\n" + + "\rpause_request\x18\x02 \x01(\v2>.temporal.api.workflowservice.v1.PauseWorkflowExecutionRequestR\fpauseRequest:\x1f\x92\xc4\x03\x1b*\x19pause_request.workflow_id\" \n" + + "\x1ePauseWorkflowExecutionResponse\"\xd2\x01\n" + + "\x1fUnpauseWorkflowExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12i\n" + + "\x0funpause_request\x18\x02 \x01(\v2@.temporal.api.workflowservice.v1.UnpauseWorkflowExecutionRequestR\x0eunpauseRequest:!\x92\xc4\x03\x1d*\x1bunpause_request.workflow_id\"\"\n" + + " UnpauseWorkflowExecutionResponse\"\xb2\x01\n" + + "\x1aStartNexusOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12F\n" + + "\arequest\x18\x03 \x01(\v2,.temporal.api.nexus.v1.StartOperationRequestR\arequest:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"h\n" + + "\x1bStartNexusOperationResponse\x12I\n" + + "\bresponse\x18\x01 \x01(\v2-.temporal.api.nexus.v1.StartOperationResponseR\bresponse\"\xb4\x01\n" + + "\x1bCancelNexusOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12G\n" + + "\arequest\x18\x03 \x01(\v2-.temporal.api.nexus.v1.CancelOperationRequestR\arequest:\x0e\x92\xc4\x03\n" + + "\x1a\bshard_id\"j\n" + + "\x1cCancelNexusOperationResponse\x12J\n" + + "\bresponse\x18\x01 \x01(\v2..temporal.api.nexus.v1.CancelOperationResponseR\bresponse:t\n" + + "\arouting\x12\x1f.google.protobuf.MessageOptions\x18\xc28 \x01(\v25.temporal.server.api.historyservice.v1.RoutingOptionsR\arouting\x88\x01\x01B temporal.api.workflowservice.v1.StartWorkflowExecutionRequest - 134, // 1: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.parent_execution_info:type_name -> temporal.server.api.workflow.v1.ParentExecutionInfo - 135, // 2: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.workflow_execution_expiration_time:type_name -> google.protobuf.Timestamp - 136, // 3: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.continue_as_new_initiator:type_name -> temporal.api.enums.v1.ContinueAsNewInitiator - 137, // 4: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.continued_failure:type_name -> temporal.api.failure.v1.Failure - 138, // 5: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.last_completion_result:type_name -> temporal.api.common.v1.Payloads - 139, // 6: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.first_workflow_task_backoff:type_name -> google.protobuf.Duration - 140, // 7: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.source_version_stamp:type_name -> temporal.api.common.v1.WorkerVersionStamp - 141, // 8: temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 142, // 9: temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse.eager_workflow_task:type_name -> temporal.api.workflowservice.v1.PollWorkflowTaskQueueResponse - 143, // 10: temporal.server.api.historyservice.v1.GetMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 144, // 11: temporal.server.api.historyservice.v1.GetMutableStateRequest.version_history_item:type_name -> temporal.server.api.history.v1.VersionHistoryItem - 143, // 12: temporal.server.api.historyservice.v1.GetMutableStateResponse.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 145, // 13: temporal.server.api.historyservice.v1.GetMutableStateResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType - 146, // 14: temporal.server.api.historyservice.v1.GetMutableStateResponse.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 146, // 15: temporal.server.api.historyservice.v1.GetMutableStateResponse.sticky_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 139, // 16: temporal.server.api.historyservice.v1.GetMutableStateResponse.sticky_task_queue_schedule_to_start_timeout:type_name -> google.protobuf.Duration - 147, // 17: temporal.server.api.historyservice.v1.GetMutableStateResponse.workflow_state:type_name -> temporal.server.api.enums.v1.WorkflowExecutionState - 148, // 18: temporal.server.api.historyservice.v1.GetMutableStateResponse.workflow_status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus - 149, // 19: temporal.server.api.historyservice.v1.GetMutableStateResponse.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories - 140, // 20: temporal.server.api.historyservice.v1.GetMutableStateResponse.worker_version_stamp:type_name -> temporal.api.common.v1.WorkerVersionStamp - 143, // 21: temporal.server.api.historyservice.v1.PollMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 144, // 22: temporal.server.api.historyservice.v1.PollMutableStateRequest.version_history_item:type_name -> temporal.server.api.history.v1.VersionHistoryItem - 143, // 23: temporal.server.api.historyservice.v1.PollMutableStateResponse.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 145, // 24: temporal.server.api.historyservice.v1.PollMutableStateResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType - 146, // 25: temporal.server.api.historyservice.v1.PollMutableStateResponse.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 146, // 26: temporal.server.api.historyservice.v1.PollMutableStateResponse.sticky_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 139, // 27: temporal.server.api.historyservice.v1.PollMutableStateResponse.sticky_task_queue_schedule_to_start_timeout:type_name -> google.protobuf.Duration - 149, // 28: temporal.server.api.historyservice.v1.PollMutableStateResponse.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories - 147, // 29: temporal.server.api.historyservice.v1.PollMutableStateResponse.workflow_state:type_name -> temporal.server.api.enums.v1.WorkflowExecutionState - 148, // 30: temporal.server.api.historyservice.v1.PollMutableStateResponse.workflow_status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus - 143, // 31: temporal.server.api.historyservice.v1.ResetStickyTaskQueueRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 143, // 32: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 150, // 33: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.poll_request:type_name -> temporal.api.workflowservice.v1.PollWorkflowTaskQueueRequest - 141, // 34: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 145, // 35: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType - 151, // 36: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.transient_workflow_task:type_name -> temporal.server.api.history.v1.TransientWorkflowTaskInfo - 146, // 37: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.workflow_execution_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 135, // 38: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.scheduled_time:type_name -> google.protobuf.Timestamp - 135, // 39: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.started_time:type_name -> google.protobuf.Timestamp - 127, // 40: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.queries:type_name -> temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.QueriesEntry - 141, // 41: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 152, // 42: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.messages:type_name -> temporal.api.protocol.v1.Message - 153, // 43: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.history:type_name -> temporal.api.history.v1.History - 143, // 44: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 154, // 45: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.poll_request:type_name -> temporal.api.workflowservice.v1.PollActivityTaskQueueRequest - 141, // 46: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 155, // 47: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.scheduled_event:type_name -> temporal.api.history.v1.HistoryEvent - 135, // 48: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.started_time:type_name -> google.protobuf.Timestamp - 135, // 49: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.current_attempt_scheduled_time:type_name -> google.protobuf.Timestamp - 138, // 50: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.heartbeat_details:type_name -> temporal.api.common.v1.Payloads - 145, // 51: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType - 141, // 52: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 156, // 53: temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedRequest.complete_request:type_name -> temporal.api.workflowservice.v1.RespondWorkflowTaskCompletedRequest - 9, // 54: temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse.started_response:type_name -> temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse - 157, // 55: temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse.activity_tasks:type_name -> temporal.api.workflowservice.v1.PollActivityTaskQueueResponse - 142, // 56: temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse.new_workflow_task:type_name -> temporal.api.workflowservice.v1.PollWorkflowTaskQueueResponse - 158, // 57: temporal.server.api.historyservice.v1.RespondWorkflowTaskFailedRequest.failed_request:type_name -> temporal.api.workflowservice.v1.RespondWorkflowTaskFailedRequest - 143, // 58: temporal.server.api.historyservice.v1.IsWorkflowTaskValidRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 141, // 59: temporal.server.api.historyservice.v1.IsWorkflowTaskValidRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 159, // 60: temporal.server.api.historyservice.v1.RecordActivityTaskHeartbeatRequest.heartbeat_request:type_name -> temporal.api.workflowservice.v1.RecordActivityTaskHeartbeatRequest - 160, // 61: temporal.server.api.historyservice.v1.RespondActivityTaskCompletedRequest.complete_request:type_name -> temporal.api.workflowservice.v1.RespondActivityTaskCompletedRequest - 161, // 62: temporal.server.api.historyservice.v1.RespondActivityTaskFailedRequest.failed_request:type_name -> temporal.api.workflowservice.v1.RespondActivityTaskFailedRequest - 162, // 63: temporal.server.api.historyservice.v1.RespondActivityTaskCanceledRequest.cancel_request:type_name -> temporal.api.workflowservice.v1.RespondActivityTaskCanceledRequest - 143, // 64: temporal.server.api.historyservice.v1.IsActivityTaskValidRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 141, // 65: temporal.server.api.historyservice.v1.IsActivityTaskValidRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 163, // 66: temporal.server.api.historyservice.v1.SignalWorkflowExecutionRequest.signal_request:type_name -> temporal.api.workflowservice.v1.SignalWorkflowExecutionRequest - 143, // 67: temporal.server.api.historyservice.v1.SignalWorkflowExecutionRequest.external_workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 164, // 68: temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionRequest.signal_with_start_request:type_name -> temporal.api.workflowservice.v1.SignalWithStartWorkflowExecutionRequest - 143, // 69: temporal.server.api.historyservice.v1.RemoveSignalMutableStateRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 165, // 70: temporal.server.api.historyservice.v1.TerminateWorkflowExecutionRequest.terminate_request:type_name -> temporal.api.workflowservice.v1.TerminateWorkflowExecutionRequest - 143, // 71: temporal.server.api.historyservice.v1.TerminateWorkflowExecutionRequest.external_workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 143, // 72: temporal.server.api.historyservice.v1.DeleteWorkflowExecutionRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 166, // 73: temporal.server.api.historyservice.v1.ResetWorkflowExecutionRequest.reset_request:type_name -> temporal.api.workflowservice.v1.ResetWorkflowExecutionRequest - 167, // 74: temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionRequest.cancel_request:type_name -> temporal.api.workflowservice.v1.RequestCancelWorkflowExecutionRequest - 143, // 75: temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionRequest.external_workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 143, // 76: temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 141, // 77: temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest.child_clock:type_name -> temporal.server.api.clock.v1.VectorClock - 141, // 78: temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest.parent_clock:type_name -> temporal.server.api.clock.v1.VectorClock - 143, // 79: temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 141, // 80: temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 143, // 81: temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest.parent_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 143, // 82: temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest.child_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 155, // 83: temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest.completion_event:type_name -> temporal.api.history.v1.HistoryEvent - 141, // 84: temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 143, // 85: temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest.parent_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 143, // 86: temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest.child_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 141, // 87: temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 168, // 88: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionRequest.request:type_name -> temporal.api.workflowservice.v1.DescribeWorkflowExecutionRequest - 169, // 89: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.execution_config:type_name -> temporal.api.workflow.v1.WorkflowExecutionConfig - 170, // 90: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.workflow_execution_info:type_name -> temporal.api.workflow.v1.WorkflowExecutionInfo - 171, // 91: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.pending_activities:type_name -> temporal.api.workflow.v1.PendingActivityInfo - 172, // 92: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.pending_children:type_name -> temporal.api.workflow.v1.PendingChildExecutionInfo - 173, // 93: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.pending_workflow_task:type_name -> temporal.api.workflow.v1.PendingWorkflowTaskInfo - 174, // 94: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.callbacks:type_name -> temporal.api.workflow.v1.CallbackInfo - 143, // 95: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 144, // 96: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.version_history_items:type_name -> temporal.server.api.history.v1.VersionHistoryItem - 175, // 97: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.events:type_name -> temporal.api.common.v1.DataBlob - 175, // 98: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.new_run_events:type_name -> temporal.api.common.v1.DataBlob - 176, // 99: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo - 177, // 100: temporal.server.api.historyservice.v1.ReplicateWorkflowStateRequest.workflow_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState - 135, // 101: temporal.server.api.historyservice.v1.SyncShardStatusRequest.status_time:type_name -> google.protobuf.Timestamp - 135, // 102: temporal.server.api.historyservice.v1.SyncActivityRequest.scheduled_time:type_name -> google.protobuf.Timestamp - 135, // 103: temporal.server.api.historyservice.v1.SyncActivityRequest.started_time:type_name -> google.protobuf.Timestamp - 135, // 104: temporal.server.api.historyservice.v1.SyncActivityRequest.last_heartbeat_time:type_name -> google.protobuf.Timestamp - 138, // 105: temporal.server.api.historyservice.v1.SyncActivityRequest.details:type_name -> temporal.api.common.v1.Payloads - 137, // 106: temporal.server.api.historyservice.v1.SyncActivityRequest.last_failure:type_name -> temporal.api.failure.v1.Failure - 178, // 107: temporal.server.api.historyservice.v1.SyncActivityRequest.version_history:type_name -> temporal.server.api.history.v1.VersionHistory - 176, // 108: temporal.server.api.historyservice.v1.SyncActivityRequest.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo - 60, // 109: temporal.server.api.historyservice.v1.SyncActivitiesRequest.activities_info:type_name -> temporal.server.api.historyservice.v1.ActivitySyncInfo - 135, // 110: temporal.server.api.historyservice.v1.ActivitySyncInfo.scheduled_time:type_name -> google.protobuf.Timestamp - 135, // 111: temporal.server.api.historyservice.v1.ActivitySyncInfo.started_time:type_name -> google.protobuf.Timestamp - 135, // 112: temporal.server.api.historyservice.v1.ActivitySyncInfo.last_heartbeat_time:type_name -> google.protobuf.Timestamp - 138, // 113: temporal.server.api.historyservice.v1.ActivitySyncInfo.details:type_name -> temporal.api.common.v1.Payloads - 137, // 114: temporal.server.api.historyservice.v1.ActivitySyncInfo.last_failure:type_name -> temporal.api.failure.v1.Failure - 178, // 115: temporal.server.api.historyservice.v1.ActivitySyncInfo.version_history:type_name -> temporal.server.api.history.v1.VersionHistory - 143, // 116: temporal.server.api.historyservice.v1.DescribeMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 177, // 117: temporal.server.api.historyservice.v1.DescribeMutableStateResponse.cache_mutable_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState - 177, // 118: temporal.server.api.historyservice.v1.DescribeMutableStateResponse.database_mutable_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState - 143, // 119: temporal.server.api.historyservice.v1.DescribeHistoryHostRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 179, // 120: temporal.server.api.historyservice.v1.DescribeHistoryHostResponse.namespace_cache:type_name -> temporal.server.api.namespace.v1.NamespaceCacheInfo - 180, // 121: temporal.server.api.historyservice.v1.GetShardResponse.shard_info:type_name -> temporal.server.api.persistence.v1.ShardInfo - 135, // 122: temporal.server.api.historyservice.v1.RemoveTaskRequest.visibility_time:type_name -> google.protobuf.Timestamp - 181, // 123: temporal.server.api.historyservice.v1.GetReplicationMessagesRequest.tokens:type_name -> temporal.server.api.replication.v1.ReplicationToken - 128, // 124: temporal.server.api.historyservice.v1.GetReplicationMessagesResponse.shard_messages:type_name -> temporal.server.api.historyservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry - 182, // 125: temporal.server.api.historyservice.v1.GetDLQReplicationMessagesRequest.task_infos:type_name -> temporal.server.api.replication.v1.ReplicationTaskInfo - 183, // 126: temporal.server.api.historyservice.v1.GetDLQReplicationMessagesResponse.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask - 184, // 127: temporal.server.api.historyservice.v1.QueryWorkflowRequest.request:type_name -> temporal.api.workflowservice.v1.QueryWorkflowRequest - 185, // 128: temporal.server.api.historyservice.v1.QueryWorkflowResponse.response:type_name -> temporal.api.workflowservice.v1.QueryWorkflowResponse - 186, // 129: temporal.server.api.historyservice.v1.ReapplyEventsRequest.request:type_name -> temporal.server.api.adminservice.v1.ReapplyEventsRequest - 187, // 130: temporal.server.api.historyservice.v1.GetDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType - 187, // 131: temporal.server.api.historyservice.v1.GetDLQMessagesResponse.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType - 183, // 132: temporal.server.api.historyservice.v1.GetDLQMessagesResponse.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask - 182, // 133: temporal.server.api.historyservice.v1.GetDLQMessagesResponse.replication_tasks_info:type_name -> temporal.server.api.replication.v1.ReplicationTaskInfo - 187, // 134: temporal.server.api.historyservice.v1.PurgeDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType - 187, // 135: temporal.server.api.historyservice.v1.MergeDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType - 188, // 136: temporal.server.api.historyservice.v1.RefreshWorkflowTasksRequest.request:type_name -> temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest - 143, // 137: temporal.server.api.historyservice.v1.GenerateLastHistoryReplicationTasksRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 92, // 138: temporal.server.api.historyservice.v1.GetReplicationStatusResponse.shards:type_name -> temporal.server.api.historyservice.v1.ShardReplicationStatus - 135, // 139: temporal.server.api.historyservice.v1.ShardReplicationStatus.shard_local_time:type_name -> google.protobuf.Timestamp - 129, // 140: temporal.server.api.historyservice.v1.ShardReplicationStatus.remote_clusters:type_name -> temporal.server.api.historyservice.v1.ShardReplicationStatus.RemoteClustersEntry - 130, // 141: temporal.server.api.historyservice.v1.ShardReplicationStatus.handover_namespaces:type_name -> temporal.server.api.historyservice.v1.ShardReplicationStatus.HandoverNamespacesEntry - 135, // 142: temporal.server.api.historyservice.v1.ShardReplicationStatus.max_replication_task_visibility_time:type_name -> google.protobuf.Timestamp - 135, // 143: temporal.server.api.historyservice.v1.ShardReplicationStatusPerCluster.acked_task_visibility_time:type_name -> google.protobuf.Timestamp - 143, // 144: temporal.server.api.historyservice.v1.RebuildMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 143, // 145: temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 175, // 146: temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest.history_batches:type_name -> temporal.api.common.v1.DataBlob - 178, // 147: temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest.version_history:type_name -> temporal.server.api.history.v1.VersionHistory - 143, // 148: temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 135, // 149: temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest.workflow_start_time:type_name -> google.protobuf.Timestamp - 135, // 150: temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest.workflow_close_time:type_name -> google.protobuf.Timestamp - 189, // 151: temporal.server.api.historyservice.v1.UpdateWorkflowExecutionRequest.request:type_name -> temporal.api.workflowservice.v1.UpdateWorkflowExecutionRequest - 190, // 152: temporal.server.api.historyservice.v1.UpdateWorkflowExecutionResponse.response:type_name -> temporal.api.workflowservice.v1.UpdateWorkflowExecutionResponse - 191, // 153: temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesRequest.sync_replication_state:type_name -> temporal.server.api.replication.v1.SyncReplicationState - 192, // 154: temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesResponse.messages:type_name -> temporal.server.api.replication.v1.WorkflowReplicationMessages - 193, // 155: temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateRequest.request:type_name -> temporal.api.workflowservice.v1.PollWorkflowExecutionUpdateRequest - 194, // 156: temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateResponse.response:type_name -> temporal.api.workflowservice.v1.PollWorkflowExecutionUpdateResponse - 195, // 157: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryRequest.request:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryRequest - 196, // 158: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryResponse.response:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryResponse - 197, // 159: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseRequest.request:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryReverseRequest - 198, // 160: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseResponse.response:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryReverseResponse - 199, // 161: temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Request.request:type_name -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request - 200, // 162: temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Response.response:type_name -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response - 201, // 163: temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryRequest.request:type_name -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest - 202, // 164: temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryResponse.response:type_name -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse - 203, // 165: temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionRequest.request:type_name -> temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest - 204, // 166: temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionResponse.response:type_name -> temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse - 205, // 167: temporal.server.api.historyservice.v1.GetDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey - 206, // 168: temporal.server.api.historyservice.v1.GetDLQTasksResponse.dlq_tasks:type_name -> temporal.server.api.common.v1.HistoryDLQTask - 205, // 169: temporal.server.api.historyservice.v1.DeleteDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey - 207, // 170: temporal.server.api.historyservice.v1.DeleteDLQTasksRequest.inclusive_max_task_metadata:type_name -> temporal.server.api.common.v1.HistoryDLQTaskMetadata - 131, // 171: temporal.server.api.historyservice.v1.ListQueuesResponse.queues:type_name -> temporal.server.api.historyservice.v1.ListQueuesResponse.QueueInfo - 132, // 172: temporal.server.api.historyservice.v1.AddTasksRequest.tasks:type_name -> temporal.server.api.historyservice.v1.AddTasksRequest.Task - 208, // 173: temporal.server.api.historyservice.v1.ListTasksRequest.request:type_name -> temporal.server.api.adminservice.v1.ListHistoryTasksRequest - 209, // 174: temporal.server.api.historyservice.v1.ListTasksResponse.response:type_name -> temporal.server.api.adminservice.v1.ListHistoryTasksResponse - 210, // 175: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.QueriesEntry.value:type_name -> temporal.api.query.v1.WorkflowQuery - 211, // 176: temporal.server.api.historyservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry.value:type_name -> temporal.server.api.replication.v1.ReplicationMessages - 94, // 177: temporal.server.api.historyservice.v1.ShardReplicationStatus.RemoteClustersEntry.value:type_name -> temporal.server.api.historyservice.v1.ShardReplicationStatusPerCluster - 93, // 178: temporal.server.api.historyservice.v1.ShardReplicationStatus.HandoverNamespacesEntry.value:type_name -> temporal.server.api.historyservice.v1.HandoverNamespaceInfo - 175, // 179: temporal.server.api.historyservice.v1.AddTasksRequest.Task.blob:type_name -> temporal.api.common.v1.DataBlob - 180, // [180:180] is the sub-list for method output_type - 180, // [180:180] is the sub-list for method input_type - 180, // [180:180] is the sub-list for extension type_name - 180, // [180:180] is the sub-list for extension extendee - 0, // [0:180] is the sub-list for field type_name + 169, // 0: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.start_request:type_name -> temporal.api.workflowservice.v1.StartWorkflowExecutionRequest + 170, // 1: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.parent_execution_info:type_name -> temporal.server.api.workflow.v1.ParentExecutionInfo + 171, // 2: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.workflow_execution_expiration_time:type_name -> google.protobuf.Timestamp + 172, // 3: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.continue_as_new_initiator:type_name -> temporal.api.enums.v1.ContinueAsNewInitiator + 173, // 4: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.continued_failure:type_name -> temporal.api.failure.v1.Failure + 174, // 5: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.last_completion_result:type_name -> temporal.api.common.v1.Payloads + 175, // 6: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.first_workflow_task_backoff:type_name -> google.protobuf.Duration + 176, // 7: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.source_version_stamp:type_name -> temporal.api.common.v1.WorkerVersionStamp + 177, // 8: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.root_execution_info:type_name -> temporal.server.api.workflow.v1.RootExecutionInfo + 178, // 9: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.versioning_override:type_name -> temporal.api.workflow.v1.VersioningOverride + 179, // 10: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.inherited_pinned_version:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersion + 180, // 11: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.inherited_auto_upgrade_info:type_name -> temporal.api.deployment.v1.InheritedAutoUpgradeInfo + 181, // 12: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.declined_target_version_upgrade:type_name -> temporal.api.history.v1.DeclinedTargetVersionUpgrade + 175, // 13: temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest.initial_skipped_duration:type_name -> google.protobuf.Duration + 182, // 14: temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 183, // 15: temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse.eager_workflow_task:type_name -> temporal.api.workflowservice.v1.PollWorkflowTaskQueueResponse + 184, // 16: temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse.status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus + 185, // 17: temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse.link:type_name -> temporal.api.common.v1.Link + 186, // 18: temporal.server.api.historyservice.v1.GetMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 187, // 19: temporal.server.api.historyservice.v1.GetMutableStateRequest.version_history_item:type_name -> temporal.server.api.history.v1.VersionHistoryItem + 188, // 20: temporal.server.api.historyservice.v1.GetMutableStateRequest.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 186, // 21: temporal.server.api.historyservice.v1.GetMutableStateResponse.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 189, // 22: temporal.server.api.historyservice.v1.GetMutableStateResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType + 190, // 23: temporal.server.api.historyservice.v1.GetMutableStateResponse.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 190, // 24: temporal.server.api.historyservice.v1.GetMutableStateResponse.sticky_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 175, // 25: temporal.server.api.historyservice.v1.GetMutableStateResponse.sticky_task_queue_schedule_to_start_timeout:type_name -> google.protobuf.Duration + 191, // 26: temporal.server.api.historyservice.v1.GetMutableStateResponse.workflow_state:type_name -> temporal.server.api.enums.v1.WorkflowExecutionState + 184, // 27: temporal.server.api.historyservice.v1.GetMutableStateResponse.workflow_status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus + 192, // 28: temporal.server.api.historyservice.v1.GetMutableStateResponse.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories + 176, // 29: temporal.server.api.historyservice.v1.GetMutableStateResponse.most_recent_worker_version_stamp:type_name -> temporal.api.common.v1.WorkerVersionStamp + 188, // 30: temporal.server.api.historyservice.v1.GetMutableStateResponse.transition_history:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 193, // 31: temporal.server.api.historyservice.v1.GetMutableStateResponse.versioning_info:type_name -> temporal.api.workflow.v1.WorkflowExecutionVersioningInfo + 194, // 32: temporal.server.api.historyservice.v1.GetMutableStateResponse.transient_or_speculative_tasks:type_name -> temporal.server.api.history.v1.TransientWorkflowTaskInfo + 186, // 33: temporal.server.api.historyservice.v1.PollMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 187, // 34: temporal.server.api.historyservice.v1.PollMutableStateRequest.version_history_item:type_name -> temporal.server.api.history.v1.VersionHistoryItem + 186, // 35: temporal.server.api.historyservice.v1.PollMutableStateResponse.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 189, // 36: temporal.server.api.historyservice.v1.PollMutableStateResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType + 190, // 37: temporal.server.api.historyservice.v1.PollMutableStateResponse.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 190, // 38: temporal.server.api.historyservice.v1.PollMutableStateResponse.sticky_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 175, // 39: temporal.server.api.historyservice.v1.PollMutableStateResponse.sticky_task_queue_schedule_to_start_timeout:type_name -> google.protobuf.Duration + 192, // 40: temporal.server.api.historyservice.v1.PollMutableStateResponse.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories + 191, // 41: temporal.server.api.historyservice.v1.PollMutableStateResponse.workflow_state:type_name -> temporal.server.api.enums.v1.WorkflowExecutionState + 184, // 42: temporal.server.api.historyservice.v1.PollMutableStateResponse.workflow_status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus + 186, // 43: temporal.server.api.historyservice.v1.ResetStickyTaskQueueRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 160, // 44: temporal.server.api.historyservice.v1.ExecuteMultiOperationRequest.operations:type_name -> temporal.server.api.historyservice.v1.ExecuteMultiOperationRequest.Operation + 161, // 45: temporal.server.api.historyservice.v1.ExecuteMultiOperationResponse.responses:type_name -> temporal.server.api.historyservice.v1.ExecuteMultiOperationResponse.Response + 186, // 46: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 195, // 47: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.poll_request:type_name -> temporal.api.workflowservice.v1.PollWorkflowTaskQueueRequest + 182, // 48: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 196, // 49: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.build_id_redirect_info:type_name -> temporal.server.api.taskqueue.v1.BuildIdRedirectInfo + 197, // 50: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.scheduled_deployment:type_name -> temporal.api.deployment.v1.Deployment + 198, // 51: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective + 179, // 52: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest.target_deployment_version:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersion + 189, // 53: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType + 194, // 54: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.transient_workflow_task:type_name -> temporal.server.api.history.v1.TransientWorkflowTaskInfo + 190, // 55: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.workflow_execution_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 171, // 56: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.scheduled_time:type_name -> google.protobuf.Timestamp + 171, // 57: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.started_time:type_name -> google.protobuf.Timestamp + 162, // 58: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.queries:type_name -> temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.QueriesEntry + 182, // 59: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 199, // 60: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.messages:type_name -> temporal.api.protocol.v1.Message + 200, // 61: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.history:type_name -> temporal.api.history.v1.History + 200, // 62: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.raw_history:type_name -> temporal.api.history.v1.History + 189, // 63: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.workflow_type:type_name -> temporal.api.common.v1.WorkflowType + 194, // 64: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.transient_workflow_task:type_name -> temporal.server.api.history.v1.TransientWorkflowTaskInfo + 190, // 65: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.workflow_execution_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 171, // 66: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.scheduled_time:type_name -> google.protobuf.Timestamp + 171, // 67: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.started_time:type_name -> google.protobuf.Timestamp + 163, // 68: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.queries:type_name -> temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.QueriesEntry + 182, // 69: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 199, // 70: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.messages:type_name -> temporal.api.protocol.v1.Message + 200, // 71: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.history:type_name -> temporal.api.history.v1.History + 186, // 72: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 201, // 73: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.poll_request:type_name -> temporal.api.workflowservice.v1.PollActivityTaskQueueRequest + 182, // 74: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 196, // 75: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.build_id_redirect_info:type_name -> temporal.server.api.taskqueue.v1.BuildIdRedirectInfo + 197, // 76: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.scheduled_deployment:type_name -> temporal.api.deployment.v1.Deployment + 198, // 77: temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective + 202, // 78: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.scheduled_event:type_name -> temporal.api.history.v1.HistoryEvent + 171, // 79: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.started_time:type_name -> google.protobuf.Timestamp + 171, // 80: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.current_attempt_scheduled_time:type_name -> google.protobuf.Timestamp + 174, // 81: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.heartbeat_details:type_name -> temporal.api.common.v1.Payloads + 189, // 82: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType + 182, // 83: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 203, // 84: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.priority:type_name -> temporal.api.common.v1.Priority + 204, // 85: temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse.retry_policy:type_name -> temporal.api.common.v1.RetryPolicy + 205, // 86: temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedRequest.complete_request:type_name -> temporal.api.workflowservice.v1.RespondWorkflowTaskCompletedRequest + 12, // 87: temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse.started_response:type_name -> temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse + 206, // 88: temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse.activity_tasks:type_name -> temporal.api.workflowservice.v1.PollActivityTaskQueueResponse + 183, // 89: temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse.new_workflow_task:type_name -> temporal.api.workflowservice.v1.PollWorkflowTaskQueueResponse + 207, // 90: temporal.server.api.historyservice.v1.RespondWorkflowTaskFailedRequest.failed_request:type_name -> temporal.api.workflowservice.v1.RespondWorkflowTaskFailedRequest + 186, // 91: temporal.server.api.historyservice.v1.IsWorkflowTaskValidRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 182, // 92: temporal.server.api.historyservice.v1.IsWorkflowTaskValidRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 208, // 93: temporal.server.api.historyservice.v1.RecordActivityTaskHeartbeatRequest.heartbeat_request:type_name -> temporal.api.workflowservice.v1.RecordActivityTaskHeartbeatRequest + 209, // 94: temporal.server.api.historyservice.v1.RespondActivityTaskCompletedRequest.complete_request:type_name -> temporal.api.workflowservice.v1.RespondActivityTaskCompletedRequest + 210, // 95: temporal.server.api.historyservice.v1.RespondActivityTaskFailedRequest.failed_request:type_name -> temporal.api.workflowservice.v1.RespondActivityTaskFailedRequest + 211, // 96: temporal.server.api.historyservice.v1.RespondActivityTaskCanceledRequest.cancel_request:type_name -> temporal.api.workflowservice.v1.RespondActivityTaskCanceledRequest + 186, // 97: temporal.server.api.historyservice.v1.IsActivityTaskValidRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 182, // 98: temporal.server.api.historyservice.v1.IsActivityTaskValidRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 212, // 99: temporal.server.api.historyservice.v1.SignalWorkflowExecutionRequest.signal_request:type_name -> temporal.api.workflowservice.v1.SignalWorkflowExecutionRequest + 186, // 100: temporal.server.api.historyservice.v1.SignalWorkflowExecutionRequest.external_workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 185, // 101: temporal.server.api.historyservice.v1.SignalWorkflowExecutionResponse.link:type_name -> temporal.api.common.v1.Link + 213, // 102: temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionRequest.signal_with_start_request:type_name -> temporal.api.workflowservice.v1.SignalWithStartWorkflowExecutionRequest + 185, // 103: temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionResponse.signal_link:type_name -> temporal.api.common.v1.Link + 186, // 104: temporal.server.api.historyservice.v1.RemoveSignalMutableStateRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 214, // 105: temporal.server.api.historyservice.v1.TerminateWorkflowExecutionRequest.terminate_request:type_name -> temporal.api.workflowservice.v1.TerminateWorkflowExecutionRequest + 186, // 106: temporal.server.api.historyservice.v1.TerminateWorkflowExecutionRequest.external_workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 186, // 107: temporal.server.api.historyservice.v1.DeleteWorkflowExecutionRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 215, // 108: temporal.server.api.historyservice.v1.ResetWorkflowExecutionRequest.reset_request:type_name -> temporal.api.workflowservice.v1.ResetWorkflowExecutionRequest + 216, // 109: temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionRequest.cancel_request:type_name -> temporal.api.workflowservice.v1.RequestCancelWorkflowExecutionRequest + 186, // 110: temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionRequest.external_workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 186, // 111: temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 182, // 112: temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest.child_clock:type_name -> temporal.server.api.clock.v1.VectorClock + 182, // 113: temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest.parent_clock:type_name -> temporal.server.api.clock.v1.VectorClock + 186, // 114: temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 182, // 115: temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 186, // 116: temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest.parent_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 186, // 117: temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest.child_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 202, // 118: temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest.completion_event:type_name -> temporal.api.history.v1.HistoryEvent + 182, // 119: temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 186, // 120: temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest.parent_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 186, // 121: temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest.child_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 182, // 122: temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 217, // 123: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionRequest.request:type_name -> temporal.api.workflowservice.v1.DescribeWorkflowExecutionRequest + 218, // 124: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.execution_config:type_name -> temporal.api.workflow.v1.WorkflowExecutionConfig + 219, // 125: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.workflow_execution_info:type_name -> temporal.api.workflow.v1.WorkflowExecutionInfo + 220, // 126: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.pending_activities:type_name -> temporal.api.workflow.v1.PendingActivityInfo + 221, // 127: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.pending_children:type_name -> temporal.api.workflow.v1.PendingChildExecutionInfo + 222, // 128: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.pending_workflow_task:type_name -> temporal.api.workflow.v1.PendingWorkflowTaskInfo + 223, // 129: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.callbacks:type_name -> temporal.api.workflow.v1.CallbackInfo + 224, // 130: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.pending_nexus_operations:type_name -> temporal.api.workflow.v1.PendingNexusOperationInfo + 225, // 131: temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse.workflow_extended_info:type_name -> temporal.api.workflow.v1.WorkflowExecutionExtendedInfo + 186, // 132: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 187, // 133: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.version_history_items:type_name -> temporal.server.api.history.v1.VersionHistoryItem + 226, // 134: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.events:type_name -> temporal.api.common.v1.DataBlob + 226, // 135: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.new_run_events:type_name -> temporal.api.common.v1.DataBlob + 227, // 136: temporal.server.api.historyservice.v1.ReplicateEventsV2Request.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo + 228, // 137: temporal.server.api.historyservice.v1.ReplicateWorkflowStateRequest.workflow_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState + 171, // 138: temporal.server.api.historyservice.v1.SyncShardStatusRequest.status_time:type_name -> google.protobuf.Timestamp + 171, // 139: temporal.server.api.historyservice.v1.SyncActivityRequest.scheduled_time:type_name -> google.protobuf.Timestamp + 171, // 140: temporal.server.api.historyservice.v1.SyncActivityRequest.started_time:type_name -> google.protobuf.Timestamp + 171, // 141: temporal.server.api.historyservice.v1.SyncActivityRequest.last_heartbeat_time:type_name -> google.protobuf.Timestamp + 174, // 142: temporal.server.api.historyservice.v1.SyncActivityRequest.details:type_name -> temporal.api.common.v1.Payloads + 173, // 143: temporal.server.api.historyservice.v1.SyncActivityRequest.last_failure:type_name -> temporal.api.failure.v1.Failure + 229, // 144: temporal.server.api.historyservice.v1.SyncActivityRequest.version_history:type_name -> temporal.server.api.history.v1.VersionHistory + 227, // 145: temporal.server.api.historyservice.v1.SyncActivityRequest.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo + 171, // 146: temporal.server.api.historyservice.v1.SyncActivityRequest.first_scheduled_time:type_name -> google.protobuf.Timestamp + 171, // 147: temporal.server.api.historyservice.v1.SyncActivityRequest.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 175, // 148: temporal.server.api.historyservice.v1.SyncActivityRequest.retry_initial_interval:type_name -> google.protobuf.Duration + 175, // 149: temporal.server.api.historyservice.v1.SyncActivityRequest.retry_maximum_interval:type_name -> google.protobuf.Duration + 64, // 150: temporal.server.api.historyservice.v1.SyncActivitiesRequest.activities_info:type_name -> temporal.server.api.historyservice.v1.ActivitySyncInfo + 171, // 151: temporal.server.api.historyservice.v1.ActivitySyncInfo.scheduled_time:type_name -> google.protobuf.Timestamp + 171, // 152: temporal.server.api.historyservice.v1.ActivitySyncInfo.started_time:type_name -> google.protobuf.Timestamp + 171, // 153: temporal.server.api.historyservice.v1.ActivitySyncInfo.last_heartbeat_time:type_name -> google.protobuf.Timestamp + 174, // 154: temporal.server.api.historyservice.v1.ActivitySyncInfo.details:type_name -> temporal.api.common.v1.Payloads + 173, // 155: temporal.server.api.historyservice.v1.ActivitySyncInfo.last_failure:type_name -> temporal.api.failure.v1.Failure + 229, // 156: temporal.server.api.historyservice.v1.ActivitySyncInfo.version_history:type_name -> temporal.server.api.history.v1.VersionHistory + 171, // 157: temporal.server.api.historyservice.v1.ActivitySyncInfo.first_scheduled_time:type_name -> google.protobuf.Timestamp + 171, // 158: temporal.server.api.historyservice.v1.ActivitySyncInfo.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 175, // 159: temporal.server.api.historyservice.v1.ActivitySyncInfo.retry_initial_interval:type_name -> google.protobuf.Duration + 175, // 160: temporal.server.api.historyservice.v1.ActivitySyncInfo.retry_maximum_interval:type_name -> google.protobuf.Duration + 186, // 161: temporal.server.api.historyservice.v1.DescribeMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 228, // 162: temporal.server.api.historyservice.v1.DescribeMutableStateResponse.cache_mutable_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState + 228, // 163: temporal.server.api.historyservice.v1.DescribeMutableStateResponse.database_mutable_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState + 186, // 164: temporal.server.api.historyservice.v1.DescribeHistoryHostRequest.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 230, // 165: temporal.server.api.historyservice.v1.DescribeHistoryHostResponse.namespace_cache:type_name -> temporal.server.api.namespace.v1.NamespaceCacheInfo + 231, // 166: temporal.server.api.historyservice.v1.GetShardResponse.shard_info:type_name -> temporal.server.api.persistence.v1.ShardInfo + 171, // 167: temporal.server.api.historyservice.v1.RemoveTaskRequest.visibility_time:type_name -> google.protobuf.Timestamp + 232, // 168: temporal.server.api.historyservice.v1.GetReplicationMessagesRequest.tokens:type_name -> temporal.server.api.replication.v1.ReplicationToken + 164, // 169: temporal.server.api.historyservice.v1.GetReplicationMessagesResponse.shard_messages:type_name -> temporal.server.api.historyservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry + 233, // 170: temporal.server.api.historyservice.v1.GetDLQReplicationMessagesRequest.task_infos:type_name -> temporal.server.api.replication.v1.ReplicationTaskInfo + 234, // 171: temporal.server.api.historyservice.v1.GetDLQReplicationMessagesResponse.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask + 235, // 172: temporal.server.api.historyservice.v1.QueryWorkflowRequest.request:type_name -> temporal.api.workflowservice.v1.QueryWorkflowRequest + 236, // 173: temporal.server.api.historyservice.v1.QueryWorkflowResponse.response:type_name -> temporal.api.workflowservice.v1.QueryWorkflowResponse + 237, // 174: temporal.server.api.historyservice.v1.ReapplyEventsRequest.request:type_name -> temporal.server.api.adminservice.v1.ReapplyEventsRequest + 238, // 175: temporal.server.api.historyservice.v1.GetDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType + 238, // 176: temporal.server.api.historyservice.v1.GetDLQMessagesResponse.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType + 234, // 177: temporal.server.api.historyservice.v1.GetDLQMessagesResponse.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask + 233, // 178: temporal.server.api.historyservice.v1.GetDLQMessagesResponse.replication_tasks_info:type_name -> temporal.server.api.replication.v1.ReplicationTaskInfo + 238, // 179: temporal.server.api.historyservice.v1.PurgeDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType + 238, // 180: temporal.server.api.historyservice.v1.MergeDLQMessagesRequest.type:type_name -> temporal.server.api.enums.v1.DeadLetterQueueType + 239, // 181: temporal.server.api.historyservice.v1.RefreshWorkflowTasksRequest.request:type_name -> temporal.server.api.adminservice.v1.RefreshWorkflowTasksRequest + 186, // 182: temporal.server.api.historyservice.v1.GenerateLastHistoryReplicationTasksRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 96, // 183: temporal.server.api.historyservice.v1.GetReplicationStatusResponse.shards:type_name -> temporal.server.api.historyservice.v1.ShardReplicationStatus + 171, // 184: temporal.server.api.historyservice.v1.ShardReplicationStatus.shard_local_time:type_name -> google.protobuf.Timestamp + 165, // 185: temporal.server.api.historyservice.v1.ShardReplicationStatus.remote_clusters:type_name -> temporal.server.api.historyservice.v1.ShardReplicationStatus.RemoteClustersEntry + 166, // 186: temporal.server.api.historyservice.v1.ShardReplicationStatus.handover_namespaces:type_name -> temporal.server.api.historyservice.v1.ShardReplicationStatus.HandoverNamespacesEntry + 171, // 187: temporal.server.api.historyservice.v1.ShardReplicationStatus.max_replication_task_visibility_time:type_name -> google.protobuf.Timestamp + 171, // 188: temporal.server.api.historyservice.v1.ShardReplicationStatusPerCluster.acked_task_visibility_time:type_name -> google.protobuf.Timestamp + 186, // 189: temporal.server.api.historyservice.v1.RebuildMutableStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 186, // 190: temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 226, // 191: temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest.history_batches:type_name -> temporal.api.common.v1.DataBlob + 229, // 192: temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest.version_history:type_name -> temporal.server.api.history.v1.VersionHistory + 186, // 193: temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 171, // 194: temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest.workflow_start_time:type_name -> google.protobuf.Timestamp + 171, // 195: temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest.workflow_close_time:type_name -> google.protobuf.Timestamp + 240, // 196: temporal.server.api.historyservice.v1.UpdateWorkflowExecutionRequest.request:type_name -> temporal.api.workflowservice.v1.UpdateWorkflowExecutionRequest + 241, // 197: temporal.server.api.historyservice.v1.UpdateWorkflowExecutionResponse.response:type_name -> temporal.api.workflowservice.v1.UpdateWorkflowExecutionResponse + 242, // 198: temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesRequest.sync_replication_state:type_name -> temporal.server.api.replication.v1.SyncReplicationState + 243, // 199: temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesResponse.messages:type_name -> temporal.server.api.replication.v1.WorkflowReplicationMessages + 244, // 200: temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateRequest.request:type_name -> temporal.api.workflowservice.v1.PollWorkflowExecutionUpdateRequest + 245, // 201: temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateResponse.response:type_name -> temporal.api.workflowservice.v1.PollWorkflowExecutionUpdateResponse + 246, // 202: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryRequest.request:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryRequest + 247, // 203: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryResponse.response:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryResponse + 200, // 204: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryResponse.history:type_name -> temporal.api.history.v1.History + 247, // 205: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryResponseWithRaw.response:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryResponse + 248, // 206: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseRequest.request:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryReverseRequest + 249, // 207: temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseResponse.response:type_name -> temporal.api.workflowservice.v1.GetWorkflowExecutionHistoryReverseResponse + 250, // 208: temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Request.request:type_name -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Request + 251, // 209: temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Response.response:type_name -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryV2Response + 252, // 210: temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryRequest.request:type_name -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryRequest + 253, // 211: temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryResponse.response:type_name -> temporal.server.api.adminservice.v1.GetWorkflowExecutionRawHistoryResponse + 254, // 212: temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionRequest.request:type_name -> temporal.server.api.adminservice.v1.DeleteWorkflowExecutionRequest + 255, // 213: temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionResponse.response:type_name -> temporal.server.api.adminservice.v1.DeleteWorkflowExecutionResponse + 256, // 214: temporal.server.api.historyservice.v1.GetDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey + 257, // 215: temporal.server.api.historyservice.v1.GetDLQTasksResponse.dlq_tasks:type_name -> temporal.server.api.common.v1.HistoryDLQTask + 256, // 216: temporal.server.api.historyservice.v1.DeleteDLQTasksRequest.dlq_key:type_name -> temporal.server.api.common.v1.HistoryDLQKey + 258, // 217: temporal.server.api.historyservice.v1.DeleteDLQTasksRequest.inclusive_max_task_metadata:type_name -> temporal.server.api.common.v1.HistoryDLQTaskMetadata + 167, // 218: temporal.server.api.historyservice.v1.ListQueuesResponse.queues:type_name -> temporal.server.api.historyservice.v1.ListQueuesResponse.QueueInfo + 168, // 219: temporal.server.api.historyservice.v1.AddTasksRequest.tasks:type_name -> temporal.server.api.historyservice.v1.AddTasksRequest.Task + 259, // 220: temporal.server.api.historyservice.v1.ListTasksRequest.request:type_name -> temporal.server.api.adminservice.v1.ListHistoryTasksRequest + 260, // 221: temporal.server.api.historyservice.v1.ListTasksResponse.response:type_name -> temporal.server.api.adminservice.v1.ListHistoryTasksResponse + 261, // 222: temporal.server.api.historyservice.v1.CompleteNexusOperationChasmRequest.completion:type_name -> temporal.server.api.token.v1.NexusOperationCompletion + 262, // 223: temporal.server.api.historyservice.v1.CompleteNexusOperationChasmRequest.success:type_name -> temporal.api.common.v1.Payload + 173, // 224: temporal.server.api.historyservice.v1.CompleteNexusOperationChasmRequest.failure:type_name -> temporal.api.failure.v1.Failure + 171, // 225: temporal.server.api.historyservice.v1.CompleteNexusOperationChasmRequest.close_time:type_name -> google.protobuf.Timestamp + 185, // 226: temporal.server.api.historyservice.v1.CompleteNexusOperationChasmRequest.links:type_name -> temporal.api.common.v1.Link + 171, // 227: temporal.server.api.historyservice.v1.CompleteNexusOperationChasmRequest.start_time:type_name -> google.protobuf.Timestamp + 261, // 228: temporal.server.api.historyservice.v1.CompleteNexusOperationRequest.completion:type_name -> temporal.server.api.token.v1.NexusOperationCompletion + 262, // 229: temporal.server.api.historyservice.v1.CompleteNexusOperationRequest.success:type_name -> temporal.api.common.v1.Payload + 263, // 230: temporal.server.api.historyservice.v1.CompleteNexusOperationRequest.failure:type_name -> temporal.api.nexus.v1.Failure + 171, // 231: temporal.server.api.historyservice.v1.CompleteNexusOperationRequest.start_time:type_name -> google.protobuf.Timestamp + 185, // 232: temporal.server.api.historyservice.v1.CompleteNexusOperationRequest.links:type_name -> temporal.api.common.v1.Link + 264, // 233: temporal.server.api.historyservice.v1.InvokeStateMachineMethodRequest.ref:type_name -> temporal.server.api.persistence.v1.StateMachineRef + 265, // 234: temporal.server.api.historyservice.v1.DeepHealthCheckResponse.state:type_name -> temporal.server.api.enums.v1.HealthState + 266, // 235: temporal.server.api.historyservice.v1.DeepHealthCheckResponse.checks:type_name -> temporal.server.api.health.v1.HealthCheck + 186, // 236: temporal.server.api.historyservice.v1.SyncWorkflowStateRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 188, // 237: temporal.server.api.historyservice.v1.SyncWorkflowStateRequest.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 192, // 238: temporal.server.api.historyservice.v1.SyncWorkflowStateRequest.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories + 267, // 239: temporal.server.api.historyservice.v1.SyncWorkflowStateResponse.versioned_transition_artifact:type_name -> temporal.server.api.replication.v1.VersionedTransitionArtifact + 268, // 240: temporal.server.api.historyservice.v1.UpdateActivityOptionsRequest.update_request:type_name -> temporal.api.workflowservice.v1.UpdateActivityOptionsRequest + 269, // 241: temporal.server.api.historyservice.v1.UpdateActivityOptionsResponse.activity_options:type_name -> temporal.api.activity.v1.ActivityOptions + 270, // 242: temporal.server.api.historyservice.v1.PauseActivityRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.PauseActivityRequest + 271, // 243: temporal.server.api.historyservice.v1.UnpauseActivityRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.UnpauseActivityRequest + 272, // 244: temporal.server.api.historyservice.v1.ResetActivityRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.ResetActivityRequest + 273, // 245: temporal.server.api.historyservice.v1.UpdateWorkflowExecutionOptionsRequest.update_request:type_name -> temporal.api.workflowservice.v1.UpdateWorkflowExecutionOptionsRequest + 274, // 246: temporal.server.api.historyservice.v1.UpdateWorkflowExecutionOptionsResponse.workflow_execution_options:type_name -> temporal.api.workflow.v1.WorkflowExecutionOptions + 275, // 247: temporal.server.api.historyservice.v1.PauseWorkflowExecutionRequest.pause_request:type_name -> temporal.api.workflowservice.v1.PauseWorkflowExecutionRequest + 276, // 248: temporal.server.api.historyservice.v1.UnpauseWorkflowExecutionRequest.unpause_request:type_name -> temporal.api.workflowservice.v1.UnpauseWorkflowExecutionRequest + 277, // 249: temporal.server.api.historyservice.v1.StartNexusOperationRequest.request:type_name -> temporal.api.nexus.v1.StartOperationRequest + 278, // 250: temporal.server.api.historyservice.v1.StartNexusOperationResponse.response:type_name -> temporal.api.nexus.v1.StartOperationResponse + 279, // 251: temporal.server.api.historyservice.v1.CancelNexusOperationRequest.request:type_name -> temporal.api.nexus.v1.CancelOperationRequest + 280, // 252: temporal.server.api.historyservice.v1.CancelNexusOperationResponse.response:type_name -> temporal.api.nexus.v1.CancelOperationResponse + 1, // 253: temporal.server.api.historyservice.v1.ExecuteMultiOperationRequest.Operation.start_workflow:type_name -> temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest + 105, // 254: temporal.server.api.historyservice.v1.ExecuteMultiOperationRequest.Operation.update_workflow:type_name -> temporal.server.api.historyservice.v1.UpdateWorkflowExecutionRequest + 2, // 255: temporal.server.api.historyservice.v1.ExecuteMultiOperationResponse.Response.start_workflow:type_name -> temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse + 106, // 256: temporal.server.api.historyservice.v1.ExecuteMultiOperationResponse.Response.update_workflow:type_name -> temporal.server.api.historyservice.v1.UpdateWorkflowExecutionResponse + 281, // 257: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse.QueriesEntry.value:type_name -> temporal.api.query.v1.WorkflowQuery + 281, // 258: temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponseWithRawHistory.QueriesEntry.value:type_name -> temporal.api.query.v1.WorkflowQuery + 282, // 259: temporal.server.api.historyservice.v1.GetReplicationMessagesResponse.ShardMessagesEntry.value:type_name -> temporal.server.api.replication.v1.ReplicationMessages + 98, // 260: temporal.server.api.historyservice.v1.ShardReplicationStatus.RemoteClustersEntry.value:type_name -> temporal.server.api.historyservice.v1.ShardReplicationStatusPerCluster + 97, // 261: temporal.server.api.historyservice.v1.ShardReplicationStatus.HandoverNamespacesEntry.value:type_name -> temporal.server.api.historyservice.v1.HandoverNamespaceInfo + 226, // 262: temporal.server.api.historyservice.v1.AddTasksRequest.Task.blob:type_name -> temporal.api.common.v1.DataBlob + 283, // 263: temporal.server.api.historyservice.v1.routing:extendee -> google.protobuf.MessageOptions + 0, // 264: temporal.server.api.historyservice.v1.routing:type_name -> temporal.server.api.historyservice.v1.RoutingOptions + 265, // [265:265] is the sub-list for method output_type + 265, // [265:265] is the sub-list for method input_type + 264, // [264:265] is the sub-list for extension type_name + 263, // [263:264] is the sub-list for extension extendee + 0, // [0:263] is the sub-list for field type_name } func init() { file_temporal_server_api_historyservice_v1_request_response_proto_init() } @@ -10105,1578 +11802,44 @@ func file_temporal_server_api_historyservice_v1_request_response_proto_init() { if File_temporal_server_api_historyservice_v1_request_response_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMutableStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMutableStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollMutableStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollMutableStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetStickyTaskQueueRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetStickyTaskQueueResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordWorkflowTaskStartedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordWorkflowTaskStartedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordActivityTaskStartedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordActivityTaskStartedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondWorkflowTaskCompletedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondWorkflowTaskCompletedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondWorkflowTaskFailedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondWorkflowTaskFailedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IsWorkflowTaskValidRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IsWorkflowTaskValidResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordActivityTaskHeartbeatRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordActivityTaskHeartbeatResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondActivityTaskCompletedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondActivityTaskCompletedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondActivityTaskFailedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondActivityTaskFailedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondActivityTaskCanceledRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondActivityTaskCanceledResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IsActivityTaskValidRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IsActivityTaskValidResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignalWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignalWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignalWithStartWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignalWithStartWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveSignalMutableStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveSignalMutableStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TerminateWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TerminateWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResetWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestCancelWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestCancelWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ScheduleWorkflowTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ScheduleWorkflowTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerifyFirstWorkflowTaskScheduledRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerifyFirstWorkflowTaskScheduledResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordChildExecutionCompletedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RecordChildExecutionCompletedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerifyChildExecutionCompletionRecordedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerifyChildExecutionCompletionRecordedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicateEventsV2Request); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicateEventsV2Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicateWorkflowStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicateWorkflowStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncShardStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncShardStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncActivityRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncActivitiesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ActivitySyncInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncActivityResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeMutableStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeMutableStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeHistoryHostRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeHistoryHostResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloseShardRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CloseShardResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetShardResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplicationMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplicationMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQReplicationMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQReplicationMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWorkflowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWorkflowResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReapplyEventsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReapplyEventsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PurgeDLQMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PurgeDLQMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MergeDLQMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MergeDLQMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshWorkflowTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RefreshWorkflowTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateLastHistoryReplicationTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateLastHistoryReplicationTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplicationStatusRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetReplicationStatusResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HandoverNamespaceInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardReplicationStatusPerCluster); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildMutableStateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RebuildMutableStateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteWorkflowVisibilityRecordRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteWorkflowVisibilityRecordResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamWorkflowReplicationMessagesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StreamWorkflowReplicationMessagesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollWorkflowExecutionUpdateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollWorkflowExecutionUpdateResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[107].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionHistoryRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[108].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionHistoryResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionHistoryReverseRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionHistoryReverseResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionRawHistoryV2Request); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionRawHistoryV2Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionRawHistoryRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkflowExecutionRawHistoryResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ForceDeleteWorkflowExecutionRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ForceDeleteWorkflowExecutionResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[117].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[118].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetDLQTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[119].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteDLQTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[120].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteDLQTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[121].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListQueuesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[122].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListQueuesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[123].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[124].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[125].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTasksRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[126].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTasksResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[131].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListQueuesResponse_QueueInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[132].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddTasksRequest_Task); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[103].OneofWrappers = []interface{}{ + file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[107].OneofWrappers = []any{ (*StreamWorkflowReplicationMessagesRequest_SyncReplicationState)(nil), } - file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[104].OneofWrappers = []interface{}{ + file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[108].OneofWrappers = []any{ (*StreamWorkflowReplicationMessagesResponse_Messages)(nil), } + file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[132].OneofWrappers = []any{ + (*CompleteNexusOperationChasmRequest_Success)(nil), + (*CompleteNexusOperationChasmRequest_Failure)(nil), + } + file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[134].OneofWrappers = []any{ + (*CompleteNexusOperationRequest_Success)(nil), + (*CompleteNexusOperationRequest_Failure)(nil), + } + file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[160].OneofWrappers = []any{ + (*ExecuteMultiOperationRequest_Operation_StartWorkflow)(nil), + (*ExecuteMultiOperationRequest_Operation_UpdateWorkflow)(nil), + } + file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes[161].OneofWrappers = []any{ + (*ExecuteMultiOperationResponse_Response_StartWorkflow)(nil), + (*ExecuteMultiOperationResponse_Response_UpdateWorkflow)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_historyservice_v1_request_response_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_historyservice_v1_request_response_proto_rawDesc), len(file_temporal_server_api_historyservice_v1_request_response_proto_rawDesc)), NumEnums: 0, - NumMessages: 133, - NumExtensions: 0, + NumMessages: 169, + NumExtensions: 1, NumServices: 0, }, GoTypes: file_temporal_server_api_historyservice_v1_request_response_proto_goTypes, DependencyIndexes: file_temporal_server_api_historyservice_v1_request_response_proto_depIdxs, MessageInfos: file_temporal_server_api_historyservice_v1_request_response_proto_msgTypes, + ExtensionInfos: file_temporal_server_api_historyservice_v1_request_response_proto_extTypes, }.Build() File_temporal_server_api_historyservice_v1_request_response_proto = out.File - file_temporal_server_api_historyservice_v1_request_response_proto_rawDesc = nil file_temporal_server_api_historyservice_v1_request_response_proto_goTypes = nil file_temporal_server_api_historyservice_v1_request_response_proto_depIdxs = nil } diff --git a/api/historyservice/v1/service.pb.go b/api/historyservice/v1/service.pb.go index 25c29a8dddc..634ddb51a15 100644 --- a/api/historyservice/v1/service.pb.go +++ b/api/historyservice/v1/service.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -30,7 +8,9 @@ package historyservice import ( reflect "reflect" + unsafe "unsafe" + _ "go.temporal.io/server/api/common/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -44,672 +24,91 @@ const ( var File_temporal_server_api_historyservice_v1_service_proto protoreflect.FileDescriptor -var file_temporal_server_api_historyservice_v1_service_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x3c, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0xf0, 0x50, 0x0a, 0x0e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa7, 0x01, - 0x0a, 0x16, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x45, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x92, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4d, - 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3d, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x95, 0x01, 0x0a, - 0x10, 0x50, 0x6f, 0x6c, 0x6c, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x4d, 0x75, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x4d, 0x75, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xa1, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x74, - 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x42, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x74, 0x69, 0x63, 0x6b, - 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, - 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb0, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x63, 0x6f, 0x72, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, - 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb0, 0x01, 0x0a, 0x19, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb9, - 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, - 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4b, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb0, 0x01, 0x0a, 0x19, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, - 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9e, 0x01, - 0x0a, 0x13, 0x49, 0x73, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x73, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x49, 0x73, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb6, - 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, - 0x79, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x12, 0x49, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, 0x63, 0x74, - 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, - 0x61, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb9, 0x01, 0x0a, 0x1c, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, - 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0xb0, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x12, 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x48, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb6, 0x01, 0x0a, 0x1b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x65, 0x64, 0x12, 0x49, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x6e, - 0x63, 0x65, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x9e, 0x01, 0x0a, 0x13, 0x49, 0x73, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x73, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x49, 0x73, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0xaa, 0x01, 0x0a, 0x17, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x46, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xc5, 0x01, - 0x0a, 0x20, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x4e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x6c, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x4f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, - 0x6c, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xad, 0x01, 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x46, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x47, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4d, - 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb3, 0x01, 0x0a, 0x1a, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x72, - 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x49, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xaa, 0x01, 0x0a, 0x17, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x46, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa7, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x73, - 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x65, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0xbf, 0x01, 0x0a, 0x1e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x4d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xa1, 0x01, 0x0a, 0x14, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x42, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xc5, 0x01, 0x0a, 0x20, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x46, 0x69, 0x72, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x54, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x12, 0x4e, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x46, 0x69, 0x72, 0x73, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4f, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x46, 0x69, 0x72, 0x73, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x12, 0xbc, 0x01, 0x0a, 0x1d, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x43, 0x68, 0x69, 0x6c, 0x64, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x12, 0x4b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, - 0x64, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x4c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x43, 0x68, - 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0xd7, 0x01, 0x0a, 0x26, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x12, 0x54, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x55, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x43, - 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb0, 0x01, 0x0a, 0x19, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, - 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x98, 0x01, 0x0a, - 0x11, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x56, 0x32, 0x12, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x56, 0x32, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa7, 0x01, 0x0a, 0x16, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, - 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x89, 0x01, 0x0a, 0x0c, 0x53, 0x79, 0x6e, 0x63, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x79, 0x6e, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, - 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0xa1, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, - 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x42, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x4d, 0x75, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, - 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9e, 0x01, 0x0a, 0x13, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x62, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x41, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x62, 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x48, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x0a, 0x43, 0x6c, 0x6f, 0x73, - 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, - 0x08, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x12, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, - 0x0a, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x38, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0xa7, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x44, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb0, 0x01, 0x0a, - 0x19, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x47, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, - 0x4c, 0x51, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x8c, 0x01, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x12, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3c, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8c, - 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, 0x79, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3c, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x8f, 0x01, - 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, - 0x12, 0x3c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0x95, 0x01, 0x0a, 0x10, 0x50, 0x75, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x12, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x72, - 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x72, - 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x95, 0x01, 0x0a, 0x10, 0x4d, 0x65, 0x72, 0x67, - 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x3e, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3f, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x44, 0x4c, 0x51, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0xa1, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0xce, 0x01, 0x0a, 0x23, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x4c, 0x61, 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x51, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x61, 0x73, 0x74, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x52, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4c, - 0x61, 0x73, 0x74, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xa1, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x42, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9e, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xaa, 0x01, 0x0a, 0x17, 0x49, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x46, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xbf, 0x01, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4c, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x56, 0x69, - 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xaa, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x46, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb6, 0x01, 0x0a, 0x1b, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x49, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xcc, - 0x01, 0x0a, 0x21, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x12, 0x4f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x50, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0xb6, 0x01, - 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x49, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xcb, 0x01, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x12, 0x50, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x51, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x52, 0x65, 0x76, 0x65, 0x72, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xc5, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x56, 0x32, 0x12, 0x4e, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x56, 0x32, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4f, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x56, 0x32, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xbf, 0x01, 0x0a, - 0x1e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, - 0x4c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4d, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x77, 0x48, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb9, - 0x01, 0x0a, 0x1c, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4b, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x86, 0x01, 0x0a, 0x0b, 0x47, - 0x65, 0x74, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x39, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x8f, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x4c, - 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x3c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x44, 0x4c, 0x51, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x73, 0x12, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x7d, 0x0a, 0x08, 0x41, - 0x64, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x64, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x09, 0x4c, - 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, - 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x3c, 0x5a, - 0x3a, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_historyservice_v1_service_proto_rawDesc = "" + + "\n" + + "3temporal/server/api/historyservice/v1/service.proto\x12%temporal.server.api.historyservice.v1\x1a0temporal/server/api/common/v1/api_category.proto\x1a.temporal.server.api.historyservice.v1.GetMutableStateResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x9b\x01\n" + + "\x10PollMutableState\x12>.temporal.server.api.historyservice.v1.PollMutableStateRequest\x1a?.temporal.server.api.historyservice.v1.PollMutableStateResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\xa7\x01\n" + + "\x14ResetStickyTaskQueue\x12B.temporal.server.api.historyservice.v1.ResetStickyTaskQueueRequest\x1aC.temporal.server.api.historyservice.v1.ResetStickyTaskQueueResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb6\x01\n" + + "\x19RecordWorkflowTaskStarted\x12G.temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedRequest\x1aH.temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb6\x01\n" + + "\x19RecordActivityTaskStarted\x12G.temporal.server.api.historyservice.v1.RecordActivityTaskStartedRequest\x1aH.temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbf\x01\n" + + "\x1cRespondWorkflowTaskCompleted\x12J.temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedRequest\x1aK.temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb6\x01\n" + + "\x19RespondWorkflowTaskFailed\x12G.temporal.server.api.historyservice.v1.RespondWorkflowTaskFailedRequest\x1aH.temporal.server.api.historyservice.v1.RespondWorkflowTaskFailedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa4\x01\n" + + "\x13IsWorkflowTaskValid\x12A.temporal.server.api.historyservice.v1.IsWorkflowTaskValidRequest\x1aB.temporal.server.api.historyservice.v1.IsWorkflowTaskValidResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbc\x01\n" + + "\x1bRecordActivityTaskHeartbeat\x12I.temporal.server.api.historyservice.v1.RecordActivityTaskHeartbeatRequest\x1aJ.temporal.server.api.historyservice.v1.RecordActivityTaskHeartbeatResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbf\x01\n" + + "\x1cRespondActivityTaskCompleted\x12J.temporal.server.api.historyservice.v1.RespondActivityTaskCompletedRequest\x1aK.temporal.server.api.historyservice.v1.RespondActivityTaskCompletedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb6\x01\n" + + "\x19RespondActivityTaskFailed\x12G.temporal.server.api.historyservice.v1.RespondActivityTaskFailedRequest\x1aH.temporal.server.api.historyservice.v1.RespondActivityTaskFailedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbc\x01\n" + + "\x1bRespondActivityTaskCanceled\x12I.temporal.server.api.historyservice.v1.RespondActivityTaskCanceledRequest\x1aJ.temporal.server.api.historyservice.v1.RespondActivityTaskCanceledResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa4\x01\n" + + "\x13IsActivityTaskValid\x12A.temporal.server.api.historyservice.v1.IsActivityTaskValidRequest\x1aB.temporal.server.api.historyservice.v1.IsActivityTaskValidResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb0\x01\n" + + "\x17SignalWorkflowExecution\x12E.temporal.server.api.historyservice.v1.SignalWorkflowExecutionRequest\x1aF.temporal.server.api.historyservice.v1.SignalWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xcb\x01\n" + + " SignalWithStartWorkflowExecution\x12N.temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionRequest\x1aO.temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xaa\x01\n" + + "\x15ExecuteMultiOperation\x12C.temporal.server.api.historyservice.v1.ExecuteMultiOperationRequest\x1aD.temporal.server.api.historyservice.v1.ExecuteMultiOperationResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\xb3\x01\n" + + "\x18RemoveSignalMutableState\x12F.temporal.server.api.historyservice.v1.RemoveSignalMutableStateRequest\x1aG.temporal.server.api.historyservice.v1.RemoveSignalMutableStateResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb9\x01\n" + + "\x1aTerminateWorkflowExecution\x12H.temporal.server.api.historyservice.v1.TerminateWorkflowExecutionRequest\x1aI.temporal.server.api.historyservice.v1.TerminateWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb0\x01\n" + + "\x17DeleteWorkflowExecution\x12E.temporal.server.api.historyservice.v1.DeleteWorkflowExecutionRequest\x1aF.temporal.server.api.historyservice.v1.DeleteWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xad\x01\n" + + "\x16ResetWorkflowExecution\x12D.temporal.server.api.historyservice.v1.ResetWorkflowExecutionRequest\x1aE.temporal.server.api.historyservice.v1.ResetWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xc5\x01\n" + + "\x1eUpdateWorkflowExecutionOptions\x12L.temporal.server.api.historyservice.v1.UpdateWorkflowExecutionOptionsRequest\x1aM.temporal.server.api.historyservice.v1.UpdateWorkflowExecutionOptionsResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xc5\x01\n" + + "\x1eRequestCancelWorkflowExecution\x12L.temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionRequest\x1aM.temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa7\x01\n" + + "\x14ScheduleWorkflowTask\x12B.temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest\x1aC.temporal.server.api.historyservice.v1.ScheduleWorkflowTaskResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xcb\x01\n" + + " VerifyFirstWorkflowTaskScheduled\x12N.temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledRequest\x1aO.temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xc2\x01\n" + + "\x1dRecordChildExecutionCompleted\x12K.temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest\x1aL.temporal.server.api.historyservice.v1.RecordChildExecutionCompletedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xdd\x01\n" + + "&VerifyChildExecutionCompletionRecorded\x12T.temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest\x1aU.temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb6\x01\n" + + "\x19DescribeWorkflowExecution\x12G.temporal.server.api.historyservice.v1.DescribeWorkflowExecutionRequest\x1aH.temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x9e\x01\n" + + "\x11ReplicateEventsV2\x12?.temporal.server.api.historyservice.v1.ReplicateEventsV2Request\x1a@.temporal.server.api.historyservice.v1.ReplicateEventsV2Response\"\x06\x8a\xb5\x18\x02\b\x01\x12\xad\x01\n" + + "\x16ReplicateWorkflowState\x12D.temporal.server.api.historyservice.v1.ReplicateWorkflowStateRequest\x1aE.temporal.server.api.historyservice.v1.ReplicateWorkflowStateResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x98\x01\n" + + "\x0fSyncShardStatus\x12=.temporal.server.api.historyservice.v1.SyncShardStatusRequest\x1a>.temporal.server.api.historyservice.v1.SyncShardStatusResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x8f\x01\n" + + "\fSyncActivity\x12:.temporal.server.api.historyservice.v1.SyncActivityRequest\x1a;.temporal.server.api.historyservice.v1.SyncActivityResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa7\x01\n" + + "\x14DescribeMutableState\x12B.temporal.server.api.historyservice.v1.DescribeMutableStateRequest\x1aC.temporal.server.api.historyservice.v1.DescribeMutableStateResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa4\x01\n" + + "\x13DescribeHistoryHost\x12A.temporal.server.api.historyservice.v1.DescribeHistoryHostRequest\x1aB.temporal.server.api.historyservice.v1.DescribeHistoryHostResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x89\x01\n" + + "\n" + + "CloseShard\x128.temporal.server.api.historyservice.v1.CloseShardRequest\x1a9.temporal.server.api.historyservice.v1.CloseShardResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x83\x01\n" + + "\bGetShard\x126.temporal.server.api.historyservice.v1.GetShardRequest\x1a7.temporal.server.api.historyservice.v1.GetShardResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x89\x01\n" + + "\n" + + "RemoveTask\x128.temporal.server.api.historyservice.v1.RemoveTaskRequest\x1a9.temporal.server.api.historyservice.v1.RemoveTaskResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xad\x01\n" + + "\x16GetReplicationMessages\x12D.temporal.server.api.historyservice.v1.GetReplicationMessagesRequest\x1aE.temporal.server.api.historyservice.v1.GetReplicationMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb6\x01\n" + + "\x19GetDLQReplicationMessages\x12G.temporal.server.api.historyservice.v1.GetDLQReplicationMessagesRequest\x1aH.temporal.server.api.historyservice.v1.GetDLQReplicationMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x92\x01\n" + + "\rQueryWorkflow\x12;.temporal.server.api.historyservice.v1.QueryWorkflowRequest\x1a<.temporal.server.api.historyservice.v1.QueryWorkflowResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\x92\x01\n" + + "\rReapplyEvents\x12;.temporal.server.api.historyservice.v1.ReapplyEventsRequest\x1a<.temporal.server.api.historyservice.v1.ReapplyEventsResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x95\x01\n" + + "\x0eGetDLQMessages\x12<.temporal.server.api.historyservice.v1.GetDLQMessagesRequest\x1a=.temporal.server.api.historyservice.v1.GetDLQMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x9b\x01\n" + + "\x10PurgeDLQMessages\x12>.temporal.server.api.historyservice.v1.PurgeDLQMessagesRequest\x1a?.temporal.server.api.historyservice.v1.PurgeDLQMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x9b\x01\n" + + "\x10MergeDLQMessages\x12>.temporal.server.api.historyservice.v1.MergeDLQMessagesRequest\x1a?.temporal.server.api.historyservice.v1.MergeDLQMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xa7\x01\n" + + "\x14RefreshWorkflowTasks\x12B.temporal.server.api.historyservice.v1.RefreshWorkflowTasksRequest\x1aC.temporal.server.api.historyservice.v1.RefreshWorkflowTasksResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xd4\x01\n" + + "#GenerateLastHistoryReplicationTasks\x12Q.temporal.server.api.historyservice.v1.GenerateLastHistoryReplicationTasksRequest\x1aR.temporal.server.api.historyservice.v1.GenerateLastHistoryReplicationTasksResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa7\x01\n" + + "\x14GetReplicationStatus\x12B.temporal.server.api.historyservice.v1.GetReplicationStatusRequest\x1aC.temporal.server.api.historyservice.v1.GetReplicationStatusResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa4\x01\n" + + "\x13RebuildMutableState\x12A.temporal.server.api.historyservice.v1.RebuildMutableStateRequest\x1aB.temporal.server.api.historyservice.v1.RebuildMutableStateResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb0\x01\n" + + "\x17ImportWorkflowExecution\x12E.temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest\x1aF.temporal.server.api.historyservice.v1.ImportWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xc5\x01\n" + + "\x1eDeleteWorkflowVisibilityRecord\x12L.temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest\x1aM.temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\xb0\x01\n" + + "\x17UpdateWorkflowExecution\x12E.temporal.server.api.historyservice.v1.UpdateWorkflowExecutionRequest\x1aF.temporal.server.api.historyservice.v1.UpdateWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\xbc\x01\n" + + "\x1bPollWorkflowExecutionUpdate\x12I.temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateRequest\x1aJ.temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\xd2\x01\n" + + "!StreamWorkflowReplicationMessages\x12O.temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesRequest\x1aP.temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesResponse\"\x06\x8a\xb5\x18\x02\b\x03(\x010\x01\x12\xbc\x01\n" + + "\x1bGetWorkflowExecutionHistory\x12I.temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryRequest\x1aJ.temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xd1\x01\n" + + "\"GetWorkflowExecutionHistoryReverse\x12P.temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseRequest\x1aQ.temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xcb\x01\n" + + " GetWorkflowExecutionRawHistoryV2\x12N.temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Request\x1aO.temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Response\"\x06\x8a\xb5\x18\x02\b\x01\x12\xc5\x01\n" + + "\x1eGetWorkflowExecutionRawHistory\x12L.temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryRequest\x1aM.temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbf\x01\n" + + "\x1cForceDeleteWorkflowExecution\x12J.temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionRequest\x1aK.temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x8c\x01\n" + + "\vGetDLQTasks\x129.temporal.server.api.historyservice.v1.GetDLQTasksRequest\x1a:.temporal.server.api.historyservice.v1.GetDLQTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x95\x01\n" + + "\x0eDeleteDLQTasks\x12<.temporal.server.api.historyservice.v1.DeleteDLQTasksRequest\x1a=.temporal.server.api.historyservice.v1.DeleteDLQTasksResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x89\x01\n" + + "\n" + + "ListQueues\x128.temporal.server.api.historyservice.v1.ListQueuesRequest\x1a9.temporal.server.api.historyservice.v1.ListQueuesResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x83\x01\n" + + "\bAddTasks\x126.temporal.server.api.historyservice.v1.AddTasksRequest\x1a7.temporal.server.api.historyservice.v1.AddTasksResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x86\x01\n" + + "\tListTasks\x127.temporal.server.api.historyservice.v1.ListTasksRequest\x1a8.temporal.server.api.historyservice.v1.ListTasksResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xad\x01\n" + + "\x16CompleteNexusOperation\x12D.temporal.server.api.historyservice.v1.CompleteNexusOperationRequest\x1aE.temporal.server.api.historyservice.v1.CompleteNexusOperationResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbc\x01\n" + + "\x1bCompleteNexusOperationChasm\x12I.temporal.server.api.historyservice.v1.CompleteNexusOperationChasmRequest\x1aJ.temporal.server.api.historyservice.v1.CompleteNexusOperationChasmResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb3\x01\n" + + "\x18InvokeStateMachineMethod\x12F.temporal.server.api.historyservice.v1.InvokeStateMachineMethodRequest\x1aG.temporal.server.api.historyservice.v1.InvokeStateMachineMethodResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x98\x01\n" + + "\x0fDeepHealthCheck\x12=.temporal.server.api.historyservice.v1.DeepHealthCheckRequest\x1a>.temporal.server.api.historyservice.v1.DeepHealthCheckResponse\"\x06\x8a\xb5\x18\x02\b\x03\x12\x9e\x01\n" + + "\x11SyncWorkflowState\x12?.temporal.server.api.historyservice.v1.SyncWorkflowStateRequest\x1a@.temporal.server.api.historyservice.v1.SyncWorkflowStateResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xaa\x01\n" + + "\x15UpdateActivityOptions\x12C.temporal.server.api.historyservice.v1.UpdateActivityOptionsRequest\x1aD.temporal.server.api.historyservice.v1.UpdateActivityOptionsResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x92\x01\n" + + "\rPauseActivity\x12;.temporal.server.api.historyservice.v1.PauseActivityRequest\x1a<.temporal.server.api.historyservice.v1.PauseActivityResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x98\x01\n" + + "\x0fUnpauseActivity\x12=.temporal.server.api.historyservice.v1.UnpauseActivityRequest\x1a>.temporal.server.api.historyservice.v1.UnpauseActivityResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x92\x01\n" + + "\rResetActivity\x12;.temporal.server.api.historyservice.v1.ResetActivityRequest\x1a<.temporal.server.api.historyservice.v1.ResetActivityResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xad\x01\n" + + "\x16PauseWorkflowExecution\x12D.temporal.server.api.historyservice.v1.PauseWorkflowExecutionRequest\x1aE.temporal.server.api.historyservice.v1.PauseWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb3\x01\n" + + "\x18UnpauseWorkflowExecution\x12F.temporal.server.api.historyservice.v1.UnpauseWorkflowExecutionRequest\x1aG.temporal.server.api.historyservice.v1.UnpauseWorkflowExecutionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa4\x01\n" + + "\x13StartNexusOperation\x12A.temporal.server.api.historyservice.v1.StartNexusOperationRequest\x1aB.temporal.server.api.historyservice.v1.StartNexusOperationResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa7\x01\n" + + "\x14CancelNexusOperation\x12B.temporal.server.api.historyservice.v1.CancelNexusOperationRequest\x1aC.temporal.server.api.historyservice.v1.CancelNexusOperationResponse\"\x06\x8a\xb5\x18\x02\b\x01B temporal.server.api.historyservice.v1.StartWorkflowExecutionRequest @@ -850,114 +279,144 @@ var file_temporal_server_api_historyservice_v1_service_proto_depIdxs = []int32{ 13, // 13: temporal.server.api.historyservice.v1.HistoryService.IsActivityTaskValid:input_type -> temporal.server.api.historyservice.v1.IsActivityTaskValidRequest 14, // 14: temporal.server.api.historyservice.v1.HistoryService.SignalWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.SignalWorkflowExecutionRequest 15, // 15: temporal.server.api.historyservice.v1.HistoryService.SignalWithStartWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionRequest - 16, // 16: temporal.server.api.historyservice.v1.HistoryService.RemoveSignalMutableState:input_type -> temporal.server.api.historyservice.v1.RemoveSignalMutableStateRequest - 17, // 17: temporal.server.api.historyservice.v1.HistoryService.TerminateWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.TerminateWorkflowExecutionRequest - 18, // 18: temporal.server.api.historyservice.v1.HistoryService.DeleteWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.DeleteWorkflowExecutionRequest - 19, // 19: temporal.server.api.historyservice.v1.HistoryService.ResetWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.ResetWorkflowExecutionRequest - 20, // 20: temporal.server.api.historyservice.v1.HistoryService.RequestCancelWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionRequest - 21, // 21: temporal.server.api.historyservice.v1.HistoryService.ScheduleWorkflowTask:input_type -> temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest - 22, // 22: temporal.server.api.historyservice.v1.HistoryService.VerifyFirstWorkflowTaskScheduled:input_type -> temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledRequest - 23, // 23: temporal.server.api.historyservice.v1.HistoryService.RecordChildExecutionCompleted:input_type -> temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest - 24, // 24: temporal.server.api.historyservice.v1.HistoryService.VerifyChildExecutionCompletionRecorded:input_type -> temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest - 25, // 25: temporal.server.api.historyservice.v1.HistoryService.DescribeWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.DescribeWorkflowExecutionRequest - 26, // 26: temporal.server.api.historyservice.v1.HistoryService.ReplicateEventsV2:input_type -> temporal.server.api.historyservice.v1.ReplicateEventsV2Request - 27, // 27: temporal.server.api.historyservice.v1.HistoryService.ReplicateWorkflowState:input_type -> temporal.server.api.historyservice.v1.ReplicateWorkflowStateRequest - 28, // 28: temporal.server.api.historyservice.v1.HistoryService.SyncShardStatus:input_type -> temporal.server.api.historyservice.v1.SyncShardStatusRequest - 29, // 29: temporal.server.api.historyservice.v1.HistoryService.SyncActivity:input_type -> temporal.server.api.historyservice.v1.SyncActivityRequest - 30, // 30: temporal.server.api.historyservice.v1.HistoryService.DescribeMutableState:input_type -> temporal.server.api.historyservice.v1.DescribeMutableStateRequest - 31, // 31: temporal.server.api.historyservice.v1.HistoryService.DescribeHistoryHost:input_type -> temporal.server.api.historyservice.v1.DescribeHistoryHostRequest - 32, // 32: temporal.server.api.historyservice.v1.HistoryService.CloseShard:input_type -> temporal.server.api.historyservice.v1.CloseShardRequest - 33, // 33: temporal.server.api.historyservice.v1.HistoryService.GetShard:input_type -> temporal.server.api.historyservice.v1.GetShardRequest - 34, // 34: temporal.server.api.historyservice.v1.HistoryService.RemoveTask:input_type -> temporal.server.api.historyservice.v1.RemoveTaskRequest - 35, // 35: temporal.server.api.historyservice.v1.HistoryService.GetReplicationMessages:input_type -> temporal.server.api.historyservice.v1.GetReplicationMessagesRequest - 36, // 36: temporal.server.api.historyservice.v1.HistoryService.GetDLQReplicationMessages:input_type -> temporal.server.api.historyservice.v1.GetDLQReplicationMessagesRequest - 37, // 37: temporal.server.api.historyservice.v1.HistoryService.QueryWorkflow:input_type -> temporal.server.api.historyservice.v1.QueryWorkflowRequest - 38, // 38: temporal.server.api.historyservice.v1.HistoryService.ReapplyEvents:input_type -> temporal.server.api.historyservice.v1.ReapplyEventsRequest - 39, // 39: temporal.server.api.historyservice.v1.HistoryService.GetDLQMessages:input_type -> temporal.server.api.historyservice.v1.GetDLQMessagesRequest - 40, // 40: temporal.server.api.historyservice.v1.HistoryService.PurgeDLQMessages:input_type -> temporal.server.api.historyservice.v1.PurgeDLQMessagesRequest - 41, // 41: temporal.server.api.historyservice.v1.HistoryService.MergeDLQMessages:input_type -> temporal.server.api.historyservice.v1.MergeDLQMessagesRequest - 42, // 42: temporal.server.api.historyservice.v1.HistoryService.RefreshWorkflowTasks:input_type -> temporal.server.api.historyservice.v1.RefreshWorkflowTasksRequest - 43, // 43: temporal.server.api.historyservice.v1.HistoryService.GenerateLastHistoryReplicationTasks:input_type -> temporal.server.api.historyservice.v1.GenerateLastHistoryReplicationTasksRequest - 44, // 44: temporal.server.api.historyservice.v1.HistoryService.GetReplicationStatus:input_type -> temporal.server.api.historyservice.v1.GetReplicationStatusRequest - 45, // 45: temporal.server.api.historyservice.v1.HistoryService.RebuildMutableState:input_type -> temporal.server.api.historyservice.v1.RebuildMutableStateRequest - 46, // 46: temporal.server.api.historyservice.v1.HistoryService.ImportWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest - 47, // 47: temporal.server.api.historyservice.v1.HistoryService.DeleteWorkflowVisibilityRecord:input_type -> temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest - 48, // 48: temporal.server.api.historyservice.v1.HistoryService.UpdateWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.UpdateWorkflowExecutionRequest - 49, // 49: temporal.server.api.historyservice.v1.HistoryService.PollWorkflowExecutionUpdate:input_type -> temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateRequest - 50, // 50: temporal.server.api.historyservice.v1.HistoryService.StreamWorkflowReplicationMessages:input_type -> temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesRequest - 51, // 51: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionHistory:input_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryRequest - 52, // 52: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionHistoryReverse:input_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseRequest - 53, // 53: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionRawHistoryV2:input_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Request - 54, // 54: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionRawHistory:input_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryRequest - 55, // 55: temporal.server.api.historyservice.v1.HistoryService.ForceDeleteWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionRequest - 56, // 56: temporal.server.api.historyservice.v1.HistoryService.GetDLQTasks:input_type -> temporal.server.api.historyservice.v1.GetDLQTasksRequest - 57, // 57: temporal.server.api.historyservice.v1.HistoryService.DeleteDLQTasks:input_type -> temporal.server.api.historyservice.v1.DeleteDLQTasksRequest - 58, // 58: temporal.server.api.historyservice.v1.HistoryService.ListQueues:input_type -> temporal.server.api.historyservice.v1.ListQueuesRequest - 59, // 59: temporal.server.api.historyservice.v1.HistoryService.AddTasks:input_type -> temporal.server.api.historyservice.v1.AddTasksRequest - 60, // 60: temporal.server.api.historyservice.v1.HistoryService.ListTasks:input_type -> temporal.server.api.historyservice.v1.ListTasksRequest - 61, // 61: temporal.server.api.historyservice.v1.HistoryService.StartWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse - 62, // 62: temporal.server.api.historyservice.v1.HistoryService.GetMutableState:output_type -> temporal.server.api.historyservice.v1.GetMutableStateResponse - 63, // 63: temporal.server.api.historyservice.v1.HistoryService.PollMutableState:output_type -> temporal.server.api.historyservice.v1.PollMutableStateResponse - 64, // 64: temporal.server.api.historyservice.v1.HistoryService.ResetStickyTaskQueue:output_type -> temporal.server.api.historyservice.v1.ResetStickyTaskQueueResponse - 65, // 65: temporal.server.api.historyservice.v1.HistoryService.RecordWorkflowTaskStarted:output_type -> temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse - 66, // 66: temporal.server.api.historyservice.v1.HistoryService.RecordActivityTaskStarted:output_type -> temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse - 67, // 67: temporal.server.api.historyservice.v1.HistoryService.RespondWorkflowTaskCompleted:output_type -> temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse - 68, // 68: temporal.server.api.historyservice.v1.HistoryService.RespondWorkflowTaskFailed:output_type -> temporal.server.api.historyservice.v1.RespondWorkflowTaskFailedResponse - 69, // 69: temporal.server.api.historyservice.v1.HistoryService.IsWorkflowTaskValid:output_type -> temporal.server.api.historyservice.v1.IsWorkflowTaskValidResponse - 70, // 70: temporal.server.api.historyservice.v1.HistoryService.RecordActivityTaskHeartbeat:output_type -> temporal.server.api.historyservice.v1.RecordActivityTaskHeartbeatResponse - 71, // 71: temporal.server.api.historyservice.v1.HistoryService.RespondActivityTaskCompleted:output_type -> temporal.server.api.historyservice.v1.RespondActivityTaskCompletedResponse - 72, // 72: temporal.server.api.historyservice.v1.HistoryService.RespondActivityTaskFailed:output_type -> temporal.server.api.historyservice.v1.RespondActivityTaskFailedResponse - 73, // 73: temporal.server.api.historyservice.v1.HistoryService.RespondActivityTaskCanceled:output_type -> temporal.server.api.historyservice.v1.RespondActivityTaskCanceledResponse - 74, // 74: temporal.server.api.historyservice.v1.HistoryService.IsActivityTaskValid:output_type -> temporal.server.api.historyservice.v1.IsActivityTaskValidResponse - 75, // 75: temporal.server.api.historyservice.v1.HistoryService.SignalWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.SignalWorkflowExecutionResponse - 76, // 76: temporal.server.api.historyservice.v1.HistoryService.SignalWithStartWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionResponse - 77, // 77: temporal.server.api.historyservice.v1.HistoryService.RemoveSignalMutableState:output_type -> temporal.server.api.historyservice.v1.RemoveSignalMutableStateResponse - 78, // 78: temporal.server.api.historyservice.v1.HistoryService.TerminateWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.TerminateWorkflowExecutionResponse - 79, // 79: temporal.server.api.historyservice.v1.HistoryService.DeleteWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.DeleteWorkflowExecutionResponse - 80, // 80: temporal.server.api.historyservice.v1.HistoryService.ResetWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.ResetWorkflowExecutionResponse - 81, // 81: temporal.server.api.historyservice.v1.HistoryService.RequestCancelWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionResponse - 82, // 82: temporal.server.api.historyservice.v1.HistoryService.ScheduleWorkflowTask:output_type -> temporal.server.api.historyservice.v1.ScheduleWorkflowTaskResponse - 83, // 83: temporal.server.api.historyservice.v1.HistoryService.VerifyFirstWorkflowTaskScheduled:output_type -> temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledResponse - 84, // 84: temporal.server.api.historyservice.v1.HistoryService.RecordChildExecutionCompleted:output_type -> temporal.server.api.historyservice.v1.RecordChildExecutionCompletedResponse - 85, // 85: temporal.server.api.historyservice.v1.HistoryService.VerifyChildExecutionCompletionRecorded:output_type -> temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedResponse - 86, // 86: temporal.server.api.historyservice.v1.HistoryService.DescribeWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse - 87, // 87: temporal.server.api.historyservice.v1.HistoryService.ReplicateEventsV2:output_type -> temporal.server.api.historyservice.v1.ReplicateEventsV2Response - 88, // 88: temporal.server.api.historyservice.v1.HistoryService.ReplicateWorkflowState:output_type -> temporal.server.api.historyservice.v1.ReplicateWorkflowStateResponse - 89, // 89: temporal.server.api.historyservice.v1.HistoryService.SyncShardStatus:output_type -> temporal.server.api.historyservice.v1.SyncShardStatusResponse - 90, // 90: temporal.server.api.historyservice.v1.HistoryService.SyncActivity:output_type -> temporal.server.api.historyservice.v1.SyncActivityResponse - 91, // 91: temporal.server.api.historyservice.v1.HistoryService.DescribeMutableState:output_type -> temporal.server.api.historyservice.v1.DescribeMutableStateResponse - 92, // 92: temporal.server.api.historyservice.v1.HistoryService.DescribeHistoryHost:output_type -> temporal.server.api.historyservice.v1.DescribeHistoryHostResponse - 93, // 93: temporal.server.api.historyservice.v1.HistoryService.CloseShard:output_type -> temporal.server.api.historyservice.v1.CloseShardResponse - 94, // 94: temporal.server.api.historyservice.v1.HistoryService.GetShard:output_type -> temporal.server.api.historyservice.v1.GetShardResponse - 95, // 95: temporal.server.api.historyservice.v1.HistoryService.RemoveTask:output_type -> temporal.server.api.historyservice.v1.RemoveTaskResponse - 96, // 96: temporal.server.api.historyservice.v1.HistoryService.GetReplicationMessages:output_type -> temporal.server.api.historyservice.v1.GetReplicationMessagesResponse - 97, // 97: temporal.server.api.historyservice.v1.HistoryService.GetDLQReplicationMessages:output_type -> temporal.server.api.historyservice.v1.GetDLQReplicationMessagesResponse - 98, // 98: temporal.server.api.historyservice.v1.HistoryService.QueryWorkflow:output_type -> temporal.server.api.historyservice.v1.QueryWorkflowResponse - 99, // 99: temporal.server.api.historyservice.v1.HistoryService.ReapplyEvents:output_type -> temporal.server.api.historyservice.v1.ReapplyEventsResponse - 100, // 100: temporal.server.api.historyservice.v1.HistoryService.GetDLQMessages:output_type -> temporal.server.api.historyservice.v1.GetDLQMessagesResponse - 101, // 101: temporal.server.api.historyservice.v1.HistoryService.PurgeDLQMessages:output_type -> temporal.server.api.historyservice.v1.PurgeDLQMessagesResponse - 102, // 102: temporal.server.api.historyservice.v1.HistoryService.MergeDLQMessages:output_type -> temporal.server.api.historyservice.v1.MergeDLQMessagesResponse - 103, // 103: temporal.server.api.historyservice.v1.HistoryService.RefreshWorkflowTasks:output_type -> temporal.server.api.historyservice.v1.RefreshWorkflowTasksResponse - 104, // 104: temporal.server.api.historyservice.v1.HistoryService.GenerateLastHistoryReplicationTasks:output_type -> temporal.server.api.historyservice.v1.GenerateLastHistoryReplicationTasksResponse - 105, // 105: temporal.server.api.historyservice.v1.HistoryService.GetReplicationStatus:output_type -> temporal.server.api.historyservice.v1.GetReplicationStatusResponse - 106, // 106: temporal.server.api.historyservice.v1.HistoryService.RebuildMutableState:output_type -> temporal.server.api.historyservice.v1.RebuildMutableStateResponse - 107, // 107: temporal.server.api.historyservice.v1.HistoryService.ImportWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.ImportWorkflowExecutionResponse - 108, // 108: temporal.server.api.historyservice.v1.HistoryService.DeleteWorkflowVisibilityRecord:output_type -> temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordResponse - 109, // 109: temporal.server.api.historyservice.v1.HistoryService.UpdateWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.UpdateWorkflowExecutionResponse - 110, // 110: temporal.server.api.historyservice.v1.HistoryService.PollWorkflowExecutionUpdate:output_type -> temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateResponse - 111, // 111: temporal.server.api.historyservice.v1.HistoryService.StreamWorkflowReplicationMessages:output_type -> temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesResponse - 112, // 112: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionHistory:output_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryResponse - 113, // 113: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionHistoryReverse:output_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseResponse - 114, // 114: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionRawHistoryV2:output_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Response - 115, // 115: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionRawHistory:output_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryResponse - 116, // 116: temporal.server.api.historyservice.v1.HistoryService.ForceDeleteWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionResponse - 117, // 117: temporal.server.api.historyservice.v1.HistoryService.GetDLQTasks:output_type -> temporal.server.api.historyservice.v1.GetDLQTasksResponse - 118, // 118: temporal.server.api.historyservice.v1.HistoryService.DeleteDLQTasks:output_type -> temporal.server.api.historyservice.v1.DeleteDLQTasksResponse - 119, // 119: temporal.server.api.historyservice.v1.HistoryService.ListQueues:output_type -> temporal.server.api.historyservice.v1.ListQueuesResponse - 120, // 120: temporal.server.api.historyservice.v1.HistoryService.AddTasks:output_type -> temporal.server.api.historyservice.v1.AddTasksResponse - 121, // 121: temporal.server.api.historyservice.v1.HistoryService.ListTasks:output_type -> temporal.server.api.historyservice.v1.ListTasksResponse - 61, // [61:122] is the sub-list for method output_type - 0, // [0:61] is the sub-list for method input_type + 16, // 16: temporal.server.api.historyservice.v1.HistoryService.ExecuteMultiOperation:input_type -> temporal.server.api.historyservice.v1.ExecuteMultiOperationRequest + 17, // 17: temporal.server.api.historyservice.v1.HistoryService.RemoveSignalMutableState:input_type -> temporal.server.api.historyservice.v1.RemoveSignalMutableStateRequest + 18, // 18: temporal.server.api.historyservice.v1.HistoryService.TerminateWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.TerminateWorkflowExecutionRequest + 19, // 19: temporal.server.api.historyservice.v1.HistoryService.DeleteWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.DeleteWorkflowExecutionRequest + 20, // 20: temporal.server.api.historyservice.v1.HistoryService.ResetWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.ResetWorkflowExecutionRequest + 21, // 21: temporal.server.api.historyservice.v1.HistoryService.UpdateWorkflowExecutionOptions:input_type -> temporal.server.api.historyservice.v1.UpdateWorkflowExecutionOptionsRequest + 22, // 22: temporal.server.api.historyservice.v1.HistoryService.RequestCancelWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionRequest + 23, // 23: temporal.server.api.historyservice.v1.HistoryService.ScheduleWorkflowTask:input_type -> temporal.server.api.historyservice.v1.ScheduleWorkflowTaskRequest + 24, // 24: temporal.server.api.historyservice.v1.HistoryService.VerifyFirstWorkflowTaskScheduled:input_type -> temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledRequest + 25, // 25: temporal.server.api.historyservice.v1.HistoryService.RecordChildExecutionCompleted:input_type -> temporal.server.api.historyservice.v1.RecordChildExecutionCompletedRequest + 26, // 26: temporal.server.api.historyservice.v1.HistoryService.VerifyChildExecutionCompletionRecorded:input_type -> temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedRequest + 27, // 27: temporal.server.api.historyservice.v1.HistoryService.DescribeWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.DescribeWorkflowExecutionRequest + 28, // 28: temporal.server.api.historyservice.v1.HistoryService.ReplicateEventsV2:input_type -> temporal.server.api.historyservice.v1.ReplicateEventsV2Request + 29, // 29: temporal.server.api.historyservice.v1.HistoryService.ReplicateWorkflowState:input_type -> temporal.server.api.historyservice.v1.ReplicateWorkflowStateRequest + 30, // 30: temporal.server.api.historyservice.v1.HistoryService.SyncShardStatus:input_type -> temporal.server.api.historyservice.v1.SyncShardStatusRequest + 31, // 31: temporal.server.api.historyservice.v1.HistoryService.SyncActivity:input_type -> temporal.server.api.historyservice.v1.SyncActivityRequest + 32, // 32: temporal.server.api.historyservice.v1.HistoryService.DescribeMutableState:input_type -> temporal.server.api.historyservice.v1.DescribeMutableStateRequest + 33, // 33: temporal.server.api.historyservice.v1.HistoryService.DescribeHistoryHost:input_type -> temporal.server.api.historyservice.v1.DescribeHistoryHostRequest + 34, // 34: temporal.server.api.historyservice.v1.HistoryService.CloseShard:input_type -> temporal.server.api.historyservice.v1.CloseShardRequest + 35, // 35: temporal.server.api.historyservice.v1.HistoryService.GetShard:input_type -> temporal.server.api.historyservice.v1.GetShardRequest + 36, // 36: temporal.server.api.historyservice.v1.HistoryService.RemoveTask:input_type -> temporal.server.api.historyservice.v1.RemoveTaskRequest + 37, // 37: temporal.server.api.historyservice.v1.HistoryService.GetReplicationMessages:input_type -> temporal.server.api.historyservice.v1.GetReplicationMessagesRequest + 38, // 38: temporal.server.api.historyservice.v1.HistoryService.GetDLQReplicationMessages:input_type -> temporal.server.api.historyservice.v1.GetDLQReplicationMessagesRequest + 39, // 39: temporal.server.api.historyservice.v1.HistoryService.QueryWorkflow:input_type -> temporal.server.api.historyservice.v1.QueryWorkflowRequest + 40, // 40: temporal.server.api.historyservice.v1.HistoryService.ReapplyEvents:input_type -> temporal.server.api.historyservice.v1.ReapplyEventsRequest + 41, // 41: temporal.server.api.historyservice.v1.HistoryService.GetDLQMessages:input_type -> temporal.server.api.historyservice.v1.GetDLQMessagesRequest + 42, // 42: temporal.server.api.historyservice.v1.HistoryService.PurgeDLQMessages:input_type -> temporal.server.api.historyservice.v1.PurgeDLQMessagesRequest + 43, // 43: temporal.server.api.historyservice.v1.HistoryService.MergeDLQMessages:input_type -> temporal.server.api.historyservice.v1.MergeDLQMessagesRequest + 44, // 44: temporal.server.api.historyservice.v1.HistoryService.RefreshWorkflowTasks:input_type -> temporal.server.api.historyservice.v1.RefreshWorkflowTasksRequest + 45, // 45: temporal.server.api.historyservice.v1.HistoryService.GenerateLastHistoryReplicationTasks:input_type -> temporal.server.api.historyservice.v1.GenerateLastHistoryReplicationTasksRequest + 46, // 46: temporal.server.api.historyservice.v1.HistoryService.GetReplicationStatus:input_type -> temporal.server.api.historyservice.v1.GetReplicationStatusRequest + 47, // 47: temporal.server.api.historyservice.v1.HistoryService.RebuildMutableState:input_type -> temporal.server.api.historyservice.v1.RebuildMutableStateRequest + 48, // 48: temporal.server.api.historyservice.v1.HistoryService.ImportWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.ImportWorkflowExecutionRequest + 49, // 49: temporal.server.api.historyservice.v1.HistoryService.DeleteWorkflowVisibilityRecord:input_type -> temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordRequest + 50, // 50: temporal.server.api.historyservice.v1.HistoryService.UpdateWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.UpdateWorkflowExecutionRequest + 51, // 51: temporal.server.api.historyservice.v1.HistoryService.PollWorkflowExecutionUpdate:input_type -> temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateRequest + 52, // 52: temporal.server.api.historyservice.v1.HistoryService.StreamWorkflowReplicationMessages:input_type -> temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesRequest + 53, // 53: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionHistory:input_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryRequest + 54, // 54: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionHistoryReverse:input_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseRequest + 55, // 55: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionRawHistoryV2:input_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Request + 56, // 56: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionRawHistory:input_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryRequest + 57, // 57: temporal.server.api.historyservice.v1.HistoryService.ForceDeleteWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionRequest + 58, // 58: temporal.server.api.historyservice.v1.HistoryService.GetDLQTasks:input_type -> temporal.server.api.historyservice.v1.GetDLQTasksRequest + 59, // 59: temporal.server.api.historyservice.v1.HistoryService.DeleteDLQTasks:input_type -> temporal.server.api.historyservice.v1.DeleteDLQTasksRequest + 60, // 60: temporal.server.api.historyservice.v1.HistoryService.ListQueues:input_type -> temporal.server.api.historyservice.v1.ListQueuesRequest + 61, // 61: temporal.server.api.historyservice.v1.HistoryService.AddTasks:input_type -> temporal.server.api.historyservice.v1.AddTasksRequest + 62, // 62: temporal.server.api.historyservice.v1.HistoryService.ListTasks:input_type -> temporal.server.api.historyservice.v1.ListTasksRequest + 63, // 63: temporal.server.api.historyservice.v1.HistoryService.CompleteNexusOperation:input_type -> temporal.server.api.historyservice.v1.CompleteNexusOperationRequest + 64, // 64: temporal.server.api.historyservice.v1.HistoryService.CompleteNexusOperationChasm:input_type -> temporal.server.api.historyservice.v1.CompleteNexusOperationChasmRequest + 65, // 65: temporal.server.api.historyservice.v1.HistoryService.InvokeStateMachineMethod:input_type -> temporal.server.api.historyservice.v1.InvokeStateMachineMethodRequest + 66, // 66: temporal.server.api.historyservice.v1.HistoryService.DeepHealthCheck:input_type -> temporal.server.api.historyservice.v1.DeepHealthCheckRequest + 67, // 67: temporal.server.api.historyservice.v1.HistoryService.SyncWorkflowState:input_type -> temporal.server.api.historyservice.v1.SyncWorkflowStateRequest + 68, // 68: temporal.server.api.historyservice.v1.HistoryService.UpdateActivityOptions:input_type -> temporal.server.api.historyservice.v1.UpdateActivityOptionsRequest + 69, // 69: temporal.server.api.historyservice.v1.HistoryService.PauseActivity:input_type -> temporal.server.api.historyservice.v1.PauseActivityRequest + 70, // 70: temporal.server.api.historyservice.v1.HistoryService.UnpauseActivity:input_type -> temporal.server.api.historyservice.v1.UnpauseActivityRequest + 71, // 71: temporal.server.api.historyservice.v1.HistoryService.ResetActivity:input_type -> temporal.server.api.historyservice.v1.ResetActivityRequest + 72, // 72: temporal.server.api.historyservice.v1.HistoryService.PauseWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.PauseWorkflowExecutionRequest + 73, // 73: temporal.server.api.historyservice.v1.HistoryService.UnpauseWorkflowExecution:input_type -> temporal.server.api.historyservice.v1.UnpauseWorkflowExecutionRequest + 74, // 74: temporal.server.api.historyservice.v1.HistoryService.StartNexusOperation:input_type -> temporal.server.api.historyservice.v1.StartNexusOperationRequest + 75, // 75: temporal.server.api.historyservice.v1.HistoryService.CancelNexusOperation:input_type -> temporal.server.api.historyservice.v1.CancelNexusOperationRequest + 76, // 76: temporal.server.api.historyservice.v1.HistoryService.StartWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.StartWorkflowExecutionResponse + 77, // 77: temporal.server.api.historyservice.v1.HistoryService.GetMutableState:output_type -> temporal.server.api.historyservice.v1.GetMutableStateResponse + 78, // 78: temporal.server.api.historyservice.v1.HistoryService.PollMutableState:output_type -> temporal.server.api.historyservice.v1.PollMutableStateResponse + 79, // 79: temporal.server.api.historyservice.v1.HistoryService.ResetStickyTaskQueue:output_type -> temporal.server.api.historyservice.v1.ResetStickyTaskQueueResponse + 80, // 80: temporal.server.api.historyservice.v1.HistoryService.RecordWorkflowTaskStarted:output_type -> temporal.server.api.historyservice.v1.RecordWorkflowTaskStartedResponse + 81, // 81: temporal.server.api.historyservice.v1.HistoryService.RecordActivityTaskStarted:output_type -> temporal.server.api.historyservice.v1.RecordActivityTaskStartedResponse + 82, // 82: temporal.server.api.historyservice.v1.HistoryService.RespondWorkflowTaskCompleted:output_type -> temporal.server.api.historyservice.v1.RespondWorkflowTaskCompletedResponse + 83, // 83: temporal.server.api.historyservice.v1.HistoryService.RespondWorkflowTaskFailed:output_type -> temporal.server.api.historyservice.v1.RespondWorkflowTaskFailedResponse + 84, // 84: temporal.server.api.historyservice.v1.HistoryService.IsWorkflowTaskValid:output_type -> temporal.server.api.historyservice.v1.IsWorkflowTaskValidResponse + 85, // 85: temporal.server.api.historyservice.v1.HistoryService.RecordActivityTaskHeartbeat:output_type -> temporal.server.api.historyservice.v1.RecordActivityTaskHeartbeatResponse + 86, // 86: temporal.server.api.historyservice.v1.HistoryService.RespondActivityTaskCompleted:output_type -> temporal.server.api.historyservice.v1.RespondActivityTaskCompletedResponse + 87, // 87: temporal.server.api.historyservice.v1.HistoryService.RespondActivityTaskFailed:output_type -> temporal.server.api.historyservice.v1.RespondActivityTaskFailedResponse + 88, // 88: temporal.server.api.historyservice.v1.HistoryService.RespondActivityTaskCanceled:output_type -> temporal.server.api.historyservice.v1.RespondActivityTaskCanceledResponse + 89, // 89: temporal.server.api.historyservice.v1.HistoryService.IsActivityTaskValid:output_type -> temporal.server.api.historyservice.v1.IsActivityTaskValidResponse + 90, // 90: temporal.server.api.historyservice.v1.HistoryService.SignalWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.SignalWorkflowExecutionResponse + 91, // 91: temporal.server.api.historyservice.v1.HistoryService.SignalWithStartWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.SignalWithStartWorkflowExecutionResponse + 92, // 92: temporal.server.api.historyservice.v1.HistoryService.ExecuteMultiOperation:output_type -> temporal.server.api.historyservice.v1.ExecuteMultiOperationResponse + 93, // 93: temporal.server.api.historyservice.v1.HistoryService.RemoveSignalMutableState:output_type -> temporal.server.api.historyservice.v1.RemoveSignalMutableStateResponse + 94, // 94: temporal.server.api.historyservice.v1.HistoryService.TerminateWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.TerminateWorkflowExecutionResponse + 95, // 95: temporal.server.api.historyservice.v1.HistoryService.DeleteWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.DeleteWorkflowExecutionResponse + 96, // 96: temporal.server.api.historyservice.v1.HistoryService.ResetWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.ResetWorkflowExecutionResponse + 97, // 97: temporal.server.api.historyservice.v1.HistoryService.UpdateWorkflowExecutionOptions:output_type -> temporal.server.api.historyservice.v1.UpdateWorkflowExecutionOptionsResponse + 98, // 98: temporal.server.api.historyservice.v1.HistoryService.RequestCancelWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.RequestCancelWorkflowExecutionResponse + 99, // 99: temporal.server.api.historyservice.v1.HistoryService.ScheduleWorkflowTask:output_type -> temporal.server.api.historyservice.v1.ScheduleWorkflowTaskResponse + 100, // 100: temporal.server.api.historyservice.v1.HistoryService.VerifyFirstWorkflowTaskScheduled:output_type -> temporal.server.api.historyservice.v1.VerifyFirstWorkflowTaskScheduledResponse + 101, // 101: temporal.server.api.historyservice.v1.HistoryService.RecordChildExecutionCompleted:output_type -> temporal.server.api.historyservice.v1.RecordChildExecutionCompletedResponse + 102, // 102: temporal.server.api.historyservice.v1.HistoryService.VerifyChildExecutionCompletionRecorded:output_type -> temporal.server.api.historyservice.v1.VerifyChildExecutionCompletionRecordedResponse + 103, // 103: temporal.server.api.historyservice.v1.HistoryService.DescribeWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.DescribeWorkflowExecutionResponse + 104, // 104: temporal.server.api.historyservice.v1.HistoryService.ReplicateEventsV2:output_type -> temporal.server.api.historyservice.v1.ReplicateEventsV2Response + 105, // 105: temporal.server.api.historyservice.v1.HistoryService.ReplicateWorkflowState:output_type -> temporal.server.api.historyservice.v1.ReplicateWorkflowStateResponse + 106, // 106: temporal.server.api.historyservice.v1.HistoryService.SyncShardStatus:output_type -> temporal.server.api.historyservice.v1.SyncShardStatusResponse + 107, // 107: temporal.server.api.historyservice.v1.HistoryService.SyncActivity:output_type -> temporal.server.api.historyservice.v1.SyncActivityResponse + 108, // 108: temporal.server.api.historyservice.v1.HistoryService.DescribeMutableState:output_type -> temporal.server.api.historyservice.v1.DescribeMutableStateResponse + 109, // 109: temporal.server.api.historyservice.v1.HistoryService.DescribeHistoryHost:output_type -> temporal.server.api.historyservice.v1.DescribeHistoryHostResponse + 110, // 110: temporal.server.api.historyservice.v1.HistoryService.CloseShard:output_type -> temporal.server.api.historyservice.v1.CloseShardResponse + 111, // 111: temporal.server.api.historyservice.v1.HistoryService.GetShard:output_type -> temporal.server.api.historyservice.v1.GetShardResponse + 112, // 112: temporal.server.api.historyservice.v1.HistoryService.RemoveTask:output_type -> temporal.server.api.historyservice.v1.RemoveTaskResponse + 113, // 113: temporal.server.api.historyservice.v1.HistoryService.GetReplicationMessages:output_type -> temporal.server.api.historyservice.v1.GetReplicationMessagesResponse + 114, // 114: temporal.server.api.historyservice.v1.HistoryService.GetDLQReplicationMessages:output_type -> temporal.server.api.historyservice.v1.GetDLQReplicationMessagesResponse + 115, // 115: temporal.server.api.historyservice.v1.HistoryService.QueryWorkflow:output_type -> temporal.server.api.historyservice.v1.QueryWorkflowResponse + 116, // 116: temporal.server.api.historyservice.v1.HistoryService.ReapplyEvents:output_type -> temporal.server.api.historyservice.v1.ReapplyEventsResponse + 117, // 117: temporal.server.api.historyservice.v1.HistoryService.GetDLQMessages:output_type -> temporal.server.api.historyservice.v1.GetDLQMessagesResponse + 118, // 118: temporal.server.api.historyservice.v1.HistoryService.PurgeDLQMessages:output_type -> temporal.server.api.historyservice.v1.PurgeDLQMessagesResponse + 119, // 119: temporal.server.api.historyservice.v1.HistoryService.MergeDLQMessages:output_type -> temporal.server.api.historyservice.v1.MergeDLQMessagesResponse + 120, // 120: temporal.server.api.historyservice.v1.HistoryService.RefreshWorkflowTasks:output_type -> temporal.server.api.historyservice.v1.RefreshWorkflowTasksResponse + 121, // 121: temporal.server.api.historyservice.v1.HistoryService.GenerateLastHistoryReplicationTasks:output_type -> temporal.server.api.historyservice.v1.GenerateLastHistoryReplicationTasksResponse + 122, // 122: temporal.server.api.historyservice.v1.HistoryService.GetReplicationStatus:output_type -> temporal.server.api.historyservice.v1.GetReplicationStatusResponse + 123, // 123: temporal.server.api.historyservice.v1.HistoryService.RebuildMutableState:output_type -> temporal.server.api.historyservice.v1.RebuildMutableStateResponse + 124, // 124: temporal.server.api.historyservice.v1.HistoryService.ImportWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.ImportWorkflowExecutionResponse + 125, // 125: temporal.server.api.historyservice.v1.HistoryService.DeleteWorkflowVisibilityRecord:output_type -> temporal.server.api.historyservice.v1.DeleteWorkflowVisibilityRecordResponse + 126, // 126: temporal.server.api.historyservice.v1.HistoryService.UpdateWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.UpdateWorkflowExecutionResponse + 127, // 127: temporal.server.api.historyservice.v1.HistoryService.PollWorkflowExecutionUpdate:output_type -> temporal.server.api.historyservice.v1.PollWorkflowExecutionUpdateResponse + 128, // 128: temporal.server.api.historyservice.v1.HistoryService.StreamWorkflowReplicationMessages:output_type -> temporal.server.api.historyservice.v1.StreamWorkflowReplicationMessagesResponse + 129, // 129: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionHistory:output_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryResponse + 130, // 130: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionHistoryReverse:output_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionHistoryReverseResponse + 131, // 131: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionRawHistoryV2:output_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryV2Response + 132, // 132: temporal.server.api.historyservice.v1.HistoryService.GetWorkflowExecutionRawHistory:output_type -> temporal.server.api.historyservice.v1.GetWorkflowExecutionRawHistoryResponse + 133, // 133: temporal.server.api.historyservice.v1.HistoryService.ForceDeleteWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.ForceDeleteWorkflowExecutionResponse + 134, // 134: temporal.server.api.historyservice.v1.HistoryService.GetDLQTasks:output_type -> temporal.server.api.historyservice.v1.GetDLQTasksResponse + 135, // 135: temporal.server.api.historyservice.v1.HistoryService.DeleteDLQTasks:output_type -> temporal.server.api.historyservice.v1.DeleteDLQTasksResponse + 136, // 136: temporal.server.api.historyservice.v1.HistoryService.ListQueues:output_type -> temporal.server.api.historyservice.v1.ListQueuesResponse + 137, // 137: temporal.server.api.historyservice.v1.HistoryService.AddTasks:output_type -> temporal.server.api.historyservice.v1.AddTasksResponse + 138, // 138: temporal.server.api.historyservice.v1.HistoryService.ListTasks:output_type -> temporal.server.api.historyservice.v1.ListTasksResponse + 139, // 139: temporal.server.api.historyservice.v1.HistoryService.CompleteNexusOperation:output_type -> temporal.server.api.historyservice.v1.CompleteNexusOperationResponse + 140, // 140: temporal.server.api.historyservice.v1.HistoryService.CompleteNexusOperationChasm:output_type -> temporal.server.api.historyservice.v1.CompleteNexusOperationChasmResponse + 141, // 141: temporal.server.api.historyservice.v1.HistoryService.InvokeStateMachineMethod:output_type -> temporal.server.api.historyservice.v1.InvokeStateMachineMethodResponse + 142, // 142: temporal.server.api.historyservice.v1.HistoryService.DeepHealthCheck:output_type -> temporal.server.api.historyservice.v1.DeepHealthCheckResponse + 143, // 143: temporal.server.api.historyservice.v1.HistoryService.SyncWorkflowState:output_type -> temporal.server.api.historyservice.v1.SyncWorkflowStateResponse + 144, // 144: temporal.server.api.historyservice.v1.HistoryService.UpdateActivityOptions:output_type -> temporal.server.api.historyservice.v1.UpdateActivityOptionsResponse + 145, // 145: temporal.server.api.historyservice.v1.HistoryService.PauseActivity:output_type -> temporal.server.api.historyservice.v1.PauseActivityResponse + 146, // 146: temporal.server.api.historyservice.v1.HistoryService.UnpauseActivity:output_type -> temporal.server.api.historyservice.v1.UnpauseActivityResponse + 147, // 147: temporal.server.api.historyservice.v1.HistoryService.ResetActivity:output_type -> temporal.server.api.historyservice.v1.ResetActivityResponse + 148, // 148: temporal.server.api.historyservice.v1.HistoryService.PauseWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.PauseWorkflowExecutionResponse + 149, // 149: temporal.server.api.historyservice.v1.HistoryService.UnpauseWorkflowExecution:output_type -> temporal.server.api.historyservice.v1.UnpauseWorkflowExecutionResponse + 150, // 150: temporal.server.api.historyservice.v1.HistoryService.StartNexusOperation:output_type -> temporal.server.api.historyservice.v1.StartNexusOperationResponse + 151, // 151: temporal.server.api.historyservice.v1.HistoryService.CancelNexusOperation:output_type -> temporal.server.api.historyservice.v1.CancelNexusOperationResponse + 76, // [76:152] is the sub-list for method output_type + 0, // [0:76] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -973,7 +432,7 @@ func file_temporal_server_api_historyservice_v1_service_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_historyservice_v1_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_historyservice_v1_service_proto_rawDesc), len(file_temporal_server_api_historyservice_v1_service_proto_rawDesc)), NumEnums: 0, NumMessages: 0, NumExtensions: 0, @@ -983,7 +442,6 @@ func file_temporal_server_api_historyservice_v1_service_proto_init() { DependencyIndexes: file_temporal_server_api_historyservice_v1_service_proto_depIdxs, }.Build() File_temporal_server_api_historyservice_v1_service_proto = out.File - file_temporal_server_api_historyservice_v1_service_proto_rawDesc = nil file_temporal_server_api_historyservice_v1_service_proto_goTypes = nil file_temporal_server_api_historyservice_v1_service_proto_depIdxs = nil } diff --git a/api/historyservice/v1/service_grpc.pb.go b/api/historyservice/v1/service_grpc.pb.go index b76e07dd7ea..ddc94fcfafa 100644 --- a/api/historyservice/v1/service_grpc.pb.go +++ b/api/historyservice/v1/service_grpc.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // plugins: // - protoc-gen-go-grpc @@ -58,10 +36,12 @@ const ( HistoryService_IsActivityTaskValid_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/IsActivityTaskValid" HistoryService_SignalWorkflowExecution_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/SignalWorkflowExecution" HistoryService_SignalWithStartWorkflowExecution_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/SignalWithStartWorkflowExecution" + HistoryService_ExecuteMultiOperation_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/ExecuteMultiOperation" HistoryService_RemoveSignalMutableState_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/RemoveSignalMutableState" HistoryService_TerminateWorkflowExecution_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/TerminateWorkflowExecution" HistoryService_DeleteWorkflowExecution_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/DeleteWorkflowExecution" HistoryService_ResetWorkflowExecution_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/ResetWorkflowExecution" + HistoryService_UpdateWorkflowExecutionOptions_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/UpdateWorkflowExecutionOptions" HistoryService_RequestCancelWorkflowExecution_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/RequestCancelWorkflowExecution" HistoryService_ScheduleWorkflowTask_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/ScheduleWorkflowTask" HistoryService_VerifyFirstWorkflowTaskScheduled_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/VerifyFirstWorkflowTaskScheduled" @@ -103,6 +83,19 @@ const ( HistoryService_ListQueues_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/ListQueues" HistoryService_AddTasks_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/AddTasks" HistoryService_ListTasks_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/ListTasks" + HistoryService_CompleteNexusOperation_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/CompleteNexusOperation" + HistoryService_CompleteNexusOperationChasm_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/CompleteNexusOperationChasm" + HistoryService_InvokeStateMachineMethod_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/InvokeStateMachineMethod" + HistoryService_DeepHealthCheck_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/DeepHealthCheck" + HistoryService_SyncWorkflowState_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/SyncWorkflowState" + HistoryService_UpdateActivityOptions_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/UpdateActivityOptions" + HistoryService_PauseActivity_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/PauseActivity" + HistoryService_UnpauseActivity_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/UnpauseActivity" + HistoryService_ResetActivity_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/ResetActivity" + HistoryService_PauseWorkflowExecution_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/PauseWorkflowExecution" + HistoryService_UnpauseWorkflowExecution_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/UnpauseWorkflowExecution" + HistoryService_StartNexusOperation_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/StartNexusOperation" + HistoryService_CancelNexusOperation_FullMethodName = "/temporal.server.api.historyservice.v1.HistoryService/CancelNexusOperation" ) // HistoryServiceClient is the client API for HistoryService service. @@ -187,6 +180,8 @@ type HistoryServiceClient interface { // and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success. // It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy. SignalWithStartWorkflowExecution(ctx context.Context, in *SignalWithStartWorkflowExecutionRequest, opts ...grpc.CallOption) (*SignalWithStartWorkflowExecutionResponse, error) + // ExecuteMultiOperation executes multiple operations within a single workflow. + ExecuteMultiOperation(ctx context.Context, in *ExecuteMultiOperationRequest, opts ...grpc.CallOption) (*ExecuteMultiOperationResponse, error) // RemoveSignalMutableState is used to remove a signal request Id that was previously recorded. This is currently // used to clean execution info when signal workflow task finished. RemoveSignalMutableState(ctx context.Context, in *RemoveSignalMutableStateRequest, opts ...grpc.CallOption) (*RemoveSignalMutableStateResponse, error) @@ -201,6 +196,11 @@ type HistoryServiceClient interface { // in the history and immediately terminating the current execution instance. // After reset, the history will grow from nextFirstEventId. ResetWorkflowExecution(ctx context.Context, in *ResetWorkflowExecutionRequest, opts ...grpc.CallOption) (*ResetWorkflowExecutionResponse, error) + // UpdateWorkflowExecutionOptions modifies the options of an existing workflow execution. + // Currently the option that can be updated is setting and unsetting a versioning behavior override. + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + UpdateWorkflowExecutionOptions(ctx context.Context, in *UpdateWorkflowExecutionOptionsRequest, opts ...grpc.CallOption) (*UpdateWorkflowExecutionOptionsResponse, error) // RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance. // It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new WorkflowTask // created for the workflow instance so new commands could be made. It fails with 'EntityNotExistsError' if the workflow is not valid @@ -308,6 +308,85 @@ type HistoryServiceClient interface { // in the same shard. Calls to the persistence API will be batched by workflow run. AddTasks(ctx context.Context, in *AddTasksRequest, opts ...grpc.CallOption) (*AddTasksResponse, error) ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) + // Complete an async Nexus Operation using a completion token. The completion state could be successful, failed, or + // canceled. + // + // Deprecated. Will be renamed to CompleteNexusOperationHsm in a future release. + CompleteNexusOperation(ctx context.Context, in *CompleteNexusOperationRequest, opts ...grpc.CallOption) (*CompleteNexusOperationResponse, error) + // Complete an async Nexus Operation using a CHASM reference. The completion + // state could be successful, failed, or canceled. + CompleteNexusOperationChasm(ctx context.Context, in *CompleteNexusOperationChasmRequest, opts ...grpc.CallOption) (*CompleteNexusOperationChasmResponse, error) + InvokeStateMachineMethod(ctx context.Context, in *InvokeStateMachineMethodRequest, opts ...grpc.CallOption) (*InvokeStateMachineMethodResponse, error) + // Deep health check history service dependencies health status + DeepHealthCheck(ctx context.Context, in *DeepHealthCheckRequest, opts ...grpc.CallOption) (*DeepHealthCheckResponse, error) + SyncWorkflowState(ctx context.Context, in *SyncWorkflowStateRequest, opts ...grpc.CallOption) (*SyncWorkflowStateResponse, error) + // UpdateActivityOptions is called by the client to update the options of an activity + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + UpdateActivityOptions(ctx context.Context, in *UpdateActivityOptionsRequest, opts ...grpc.CallOption) (*UpdateActivityOptionsResponse, error) + // PauseActivity pauses the execution of an activity specified by its ID. + // Returns a `NotFound` error if there is no pending activity with the provided ID. + // + // Pausing an activity means: + // - If the activity is currently waiting for a retry or is running and subsequently fails, + // it will not be rescheduled until it is unpause. + // - If the activity is already paused, calling this method will have no effect. + // - If the activity is running and finishes successfully, the activity will be completed. + // - If the activity is running and finishes with failure: + // - if there is no retry left - the activity will be completed. + // - if there are more retries left - the activity will be paused. + // + // For long-running activities: + // - activities in paused state will send a cancellation with "activity_paused" set to 'true' in response to 'RecordActivityTaskHeartbeat'. + // - The activity should respond to the cancellation accordingly. + // For long-running activities: + // - activity in paused state will send a cancellation with "activity_paused" set to 'true' in response to 'RecordActivityTaskHeartbeat'. + // - The activity should respond to the cancellation accordingly. + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + PauseActivity(ctx context.Context, in *PauseActivityRequest, opts ...grpc.CallOption) (*PauseActivityResponse, error) + // UnpauseActivity unpauses the execution of an activity specified by its ID. + // + // If activity is not paused, this call will have no effect. + // If the activity is waiting for retry, it will be scheduled immediately (* see 'jitter' flag). + // Once the activity is unpause, all timeout timers will be regenerated. + // + // Flags: + // 'jitter': the activity will be scheduled at a random time within the jitter duration. + // 'reset_attempts': the number of attempts will be reset. + // 'reset_heartbeat': the activity heartbeat timer and heartbeats will be reset. + // + // Returns a `NotFound` error if there is no pending activity with the provided ID. + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + UnpauseActivity(ctx context.Context, in *UnpauseActivityRequest, opts ...grpc.CallOption) (*UnpauseActivityResponse, error) + // ResetActivity resets the execution of an activity specified by its ID. + // + // Resetting an activity means: + // - number of attempts will be reset to 0. + // - activity timeouts will be reset. + // - if the activity is waiting for retry, and it is not paused or 'keep_paused' is not provided: + // it will be scheduled immediately (* see 'jitter' flag), + // + // Flags: + // + // 'jitter': the activity will be scheduled at a random time within the jitter duration. + // If the activity currently paused it will be unpause, unless 'keep_paused' flag is provided. + // 'reset_heartbeats': the activity heartbeat timer and heartbeats will be reset. + // 'keep_paused': if the activity is paused, it will remain paused. + // + // Returns a `NotFound` error if there is no pending activity with the provided ID. + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + ResetActivity(ctx context.Context, in *ResetActivityRequest, opts ...grpc.CallOption) (*ResetActivityResponse, error) + // PauseWorkflowExecution pauses the workflow execution specified in the request. + PauseWorkflowExecution(ctx context.Context, in *PauseWorkflowExecutionRequest, opts ...grpc.CallOption) (*PauseWorkflowExecutionResponse, error) + // UnpauseWorkflowExecution unpauses the workflow execution specified in the request. + UnpauseWorkflowExecution(ctx context.Context, in *UnpauseWorkflowExecutionRequest, opts ...grpc.CallOption) (*UnpauseWorkflowExecutionResponse, error) + // StartNexusOperation starts a Nexus operation on the __temporal_system endpoint. + StartNexusOperation(ctx context.Context, in *StartNexusOperationRequest, opts ...grpc.CallOption) (*StartNexusOperationResponse, error) + // CancelNexusOperation cancels a Nexus operation on the __temporal_system endpoint. + CancelNexusOperation(ctx context.Context, in *CancelNexusOperationRequest, opts ...grpc.CallOption) (*CancelNexusOperationResponse, error) } type historyServiceClient struct { @@ -462,6 +541,15 @@ func (c *historyServiceClient) SignalWithStartWorkflowExecution(ctx context.Cont return out, nil } +func (c *historyServiceClient) ExecuteMultiOperation(ctx context.Context, in *ExecuteMultiOperationRequest, opts ...grpc.CallOption) (*ExecuteMultiOperationResponse, error) { + out := new(ExecuteMultiOperationResponse) + err := c.cc.Invoke(ctx, HistoryService_ExecuteMultiOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *historyServiceClient) RemoveSignalMutableState(ctx context.Context, in *RemoveSignalMutableStateRequest, opts ...grpc.CallOption) (*RemoveSignalMutableStateResponse, error) { out := new(RemoveSignalMutableStateResponse) err := c.cc.Invoke(ctx, HistoryService_RemoveSignalMutableState_FullMethodName, in, out, opts...) @@ -498,6 +586,15 @@ func (c *historyServiceClient) ResetWorkflowExecution(ctx context.Context, in *R return out, nil } +func (c *historyServiceClient) UpdateWorkflowExecutionOptions(ctx context.Context, in *UpdateWorkflowExecutionOptionsRequest, opts ...grpc.CallOption) (*UpdateWorkflowExecutionOptionsResponse, error) { + out := new(UpdateWorkflowExecutionOptionsResponse) + err := c.cc.Invoke(ctx, HistoryService_UpdateWorkflowExecutionOptions_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *historyServiceClient) RequestCancelWorkflowExecution(ctx context.Context, in *RequestCancelWorkflowExecutionRequest, opts ...grpc.CallOption) (*RequestCancelWorkflowExecutionResponse, error) { out := new(RequestCancelWorkflowExecutionResponse) err := c.cc.Invoke(ctx, HistoryService_RequestCancelWorkflowExecution_FullMethodName, in, out, opts...) @@ -889,6 +986,123 @@ func (c *historyServiceClient) ListTasks(ctx context.Context, in *ListTasksReque return out, nil } +func (c *historyServiceClient) CompleteNexusOperation(ctx context.Context, in *CompleteNexusOperationRequest, opts ...grpc.CallOption) (*CompleteNexusOperationResponse, error) { + out := new(CompleteNexusOperationResponse) + err := c.cc.Invoke(ctx, HistoryService_CompleteNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) CompleteNexusOperationChasm(ctx context.Context, in *CompleteNexusOperationChasmRequest, opts ...grpc.CallOption) (*CompleteNexusOperationChasmResponse, error) { + out := new(CompleteNexusOperationChasmResponse) + err := c.cc.Invoke(ctx, HistoryService_CompleteNexusOperationChasm_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) InvokeStateMachineMethod(ctx context.Context, in *InvokeStateMachineMethodRequest, opts ...grpc.CallOption) (*InvokeStateMachineMethodResponse, error) { + out := new(InvokeStateMachineMethodResponse) + err := c.cc.Invoke(ctx, HistoryService_InvokeStateMachineMethod_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) DeepHealthCheck(ctx context.Context, in *DeepHealthCheckRequest, opts ...grpc.CallOption) (*DeepHealthCheckResponse, error) { + out := new(DeepHealthCheckResponse) + err := c.cc.Invoke(ctx, HistoryService_DeepHealthCheck_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) SyncWorkflowState(ctx context.Context, in *SyncWorkflowStateRequest, opts ...grpc.CallOption) (*SyncWorkflowStateResponse, error) { + out := new(SyncWorkflowStateResponse) + err := c.cc.Invoke(ctx, HistoryService_SyncWorkflowState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) UpdateActivityOptions(ctx context.Context, in *UpdateActivityOptionsRequest, opts ...grpc.CallOption) (*UpdateActivityOptionsResponse, error) { + out := new(UpdateActivityOptionsResponse) + err := c.cc.Invoke(ctx, HistoryService_UpdateActivityOptions_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) PauseActivity(ctx context.Context, in *PauseActivityRequest, opts ...grpc.CallOption) (*PauseActivityResponse, error) { + out := new(PauseActivityResponse) + err := c.cc.Invoke(ctx, HistoryService_PauseActivity_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) UnpauseActivity(ctx context.Context, in *UnpauseActivityRequest, opts ...grpc.CallOption) (*UnpauseActivityResponse, error) { + out := new(UnpauseActivityResponse) + err := c.cc.Invoke(ctx, HistoryService_UnpauseActivity_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) ResetActivity(ctx context.Context, in *ResetActivityRequest, opts ...grpc.CallOption) (*ResetActivityResponse, error) { + out := new(ResetActivityResponse) + err := c.cc.Invoke(ctx, HistoryService_ResetActivity_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) PauseWorkflowExecution(ctx context.Context, in *PauseWorkflowExecutionRequest, opts ...grpc.CallOption) (*PauseWorkflowExecutionResponse, error) { + out := new(PauseWorkflowExecutionResponse) + err := c.cc.Invoke(ctx, HistoryService_PauseWorkflowExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) UnpauseWorkflowExecution(ctx context.Context, in *UnpauseWorkflowExecutionRequest, opts ...grpc.CallOption) (*UnpauseWorkflowExecutionResponse, error) { + out := new(UnpauseWorkflowExecutionResponse) + err := c.cc.Invoke(ctx, HistoryService_UnpauseWorkflowExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) StartNexusOperation(ctx context.Context, in *StartNexusOperationRequest, opts ...grpc.CallOption) (*StartNexusOperationResponse, error) { + out := new(StartNexusOperationResponse) + err := c.cc.Invoke(ctx, HistoryService_StartNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *historyServiceClient) CancelNexusOperation(ctx context.Context, in *CancelNexusOperationRequest, opts ...grpc.CallOption) (*CancelNexusOperationResponse, error) { + out := new(CancelNexusOperationResponse) + err := c.cc.Invoke(ctx, HistoryService_CancelNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // HistoryServiceServer is the server API for HistoryService service. // All implementations must embed UnimplementedHistoryServiceServer // for forward compatibility @@ -914,7 +1128,7 @@ type HistoryServiceServer interface { // RecordWorkflowTaskStarted is called by the Matchingservice before it hands a workflow task to the application worker in response to // a PollWorkflowTaskQueue call. It records in the history the event that the workflow task has started. It will return 'TaskAlreadyStartedError', // if the workflow's execution history already includes a record of the event starting. - RecordWorkflowTaskStarted(context.Context, *RecordWorkflowTaskStartedRequest) (*RecordWorkflowTaskStartedResponse, error) + RecordWorkflowTaskStarted(context.Context, *RecordWorkflowTaskStartedRequest) (*RecordWorkflowTaskStartedResponseWithRawHistory, error) // RecordActivityTaskStarted is called by the Matchingservice before it hands a workflow task to the application worker in response to // a PollActivityTaskQueue call. It records in the history the event that the workflow task has started. It will return 'TaskAlreadyStartedError', // if the workflow's execution history already includes a record of the event starting. @@ -971,6 +1185,8 @@ type HistoryServiceServer interface { // and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success. // It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy. SignalWithStartWorkflowExecution(context.Context, *SignalWithStartWorkflowExecutionRequest) (*SignalWithStartWorkflowExecutionResponse, error) + // ExecuteMultiOperation executes multiple operations within a single workflow. + ExecuteMultiOperation(context.Context, *ExecuteMultiOperationRequest) (*ExecuteMultiOperationResponse, error) // RemoveSignalMutableState is used to remove a signal request Id that was previously recorded. This is currently // used to clean execution info when signal workflow task finished. RemoveSignalMutableState(context.Context, *RemoveSignalMutableStateRequest) (*RemoveSignalMutableStateResponse, error) @@ -985,6 +1201,11 @@ type HistoryServiceServer interface { // in the history and immediately terminating the current execution instance. // After reset, the history will grow from nextFirstEventId. ResetWorkflowExecution(context.Context, *ResetWorkflowExecutionRequest) (*ResetWorkflowExecutionResponse, error) + // UpdateWorkflowExecutionOptions modifies the options of an existing workflow execution. + // Currently the option that can be updated is setting and unsetting a versioning behavior override. + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + UpdateWorkflowExecutionOptions(context.Context, *UpdateWorkflowExecutionOptionsRequest) (*UpdateWorkflowExecutionOptionsResponse, error) // RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance. // It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new WorkflowTask // created for the workflow instance so new commands could be made. It fails with 'EntityNotExistsError' if the workflow is not valid @@ -1074,7 +1295,7 @@ type HistoryServiceServer interface { // aip.dev/not-precedent: This service does not follow the update method API --) PollWorkflowExecutionUpdate(context.Context, *PollWorkflowExecutionUpdateRequest) (*PollWorkflowExecutionUpdateResponse, error) StreamWorkflowReplicationMessages(HistoryService_StreamWorkflowReplicationMessagesServer) error - GetWorkflowExecutionHistory(context.Context, *GetWorkflowExecutionHistoryRequest) (*GetWorkflowExecutionHistoryResponse, error) + GetWorkflowExecutionHistory(context.Context, *GetWorkflowExecutionHistoryRequest) (*GetWorkflowExecutionHistoryResponseWithRaw, error) GetWorkflowExecutionHistoryReverse(context.Context, *GetWorkflowExecutionHistoryReverseRequest) (*GetWorkflowExecutionHistoryReverseResponse, error) GetWorkflowExecutionRawHistoryV2(context.Context, *GetWorkflowExecutionRawHistoryV2Request) (*GetWorkflowExecutionRawHistoryV2Response, error) GetWorkflowExecutionRawHistory(context.Context, *GetWorkflowExecutionRawHistoryRequest) (*GetWorkflowExecutionRawHistoryResponse, error) @@ -1092,6 +1313,85 @@ type HistoryServiceServer interface { // in the same shard. Calls to the persistence API will be batched by workflow run. AddTasks(context.Context, *AddTasksRequest) (*AddTasksResponse, error) ListTasks(context.Context, *ListTasksRequest) (*ListTasksResponse, error) + // Complete an async Nexus Operation using a completion token. The completion state could be successful, failed, or + // canceled. + // + // Deprecated. Will be renamed to CompleteNexusOperationHsm in a future release. + CompleteNexusOperation(context.Context, *CompleteNexusOperationRequest) (*CompleteNexusOperationResponse, error) + // Complete an async Nexus Operation using a CHASM reference. The completion + // state could be successful, failed, or canceled. + CompleteNexusOperationChasm(context.Context, *CompleteNexusOperationChasmRequest) (*CompleteNexusOperationChasmResponse, error) + InvokeStateMachineMethod(context.Context, *InvokeStateMachineMethodRequest) (*InvokeStateMachineMethodResponse, error) + // Deep health check history service dependencies health status + DeepHealthCheck(context.Context, *DeepHealthCheckRequest) (*DeepHealthCheckResponse, error) + SyncWorkflowState(context.Context, *SyncWorkflowStateRequest) (*SyncWorkflowStateResponse, error) + // UpdateActivityOptions is called by the client to update the options of an activity + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + UpdateActivityOptions(context.Context, *UpdateActivityOptionsRequest) (*UpdateActivityOptionsResponse, error) + // PauseActivity pauses the execution of an activity specified by its ID. + // Returns a `NotFound` error if there is no pending activity with the provided ID. + // + // Pausing an activity means: + // - If the activity is currently waiting for a retry or is running and subsequently fails, + // it will not be rescheduled until it is unpause. + // - If the activity is already paused, calling this method will have no effect. + // - If the activity is running and finishes successfully, the activity will be completed. + // - If the activity is running and finishes with failure: + // - if there is no retry left - the activity will be completed. + // - if there are more retries left - the activity will be paused. + // + // For long-running activities: + // - activities in paused state will send a cancellation with "activity_paused" set to 'true' in response to 'RecordActivityTaskHeartbeat'. + // - The activity should respond to the cancellation accordingly. + // For long-running activities: + // - activity in paused state will send a cancellation with "activity_paused" set to 'true' in response to 'RecordActivityTaskHeartbeat'. + // - The activity should respond to the cancellation accordingly. + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + PauseActivity(context.Context, *PauseActivityRequest) (*PauseActivityResponse, error) + // UnpauseActivity unpauses the execution of an activity specified by its ID. + // + // If activity is not paused, this call will have no effect. + // If the activity is waiting for retry, it will be scheduled immediately (* see 'jitter' flag). + // Once the activity is unpause, all timeout timers will be regenerated. + // + // Flags: + // 'jitter': the activity will be scheduled at a random time within the jitter duration. + // 'reset_attempts': the number of attempts will be reset. + // 'reset_heartbeat': the activity heartbeat timer and heartbeats will be reset. + // + // Returns a `NotFound` error if there is no pending activity with the provided ID. + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + UnpauseActivity(context.Context, *UnpauseActivityRequest) (*UnpauseActivityResponse, error) + // ResetActivity resets the execution of an activity specified by its ID. + // + // Resetting an activity means: + // - number of attempts will be reset to 0. + // - activity timeouts will be reset. + // - if the activity is waiting for retry, and it is not paused or 'keep_paused' is not provided: + // it will be scheduled immediately (* see 'jitter' flag), + // + // Flags: + // + // 'jitter': the activity will be scheduled at a random time within the jitter duration. + // If the activity currently paused it will be unpause, unless 'keep_paused' flag is provided. + // 'reset_heartbeats': the activity heartbeat timer and heartbeats will be reset. + // 'keep_paused': if the activity is paused, it will remain paused. + // + // Returns a `NotFound` error if there is no pending activity with the provided ID. + // (-- api-linter: core::0134::method-signature=disabled + // (-- api-linter: core::0134::response-message-name=disabled + ResetActivity(context.Context, *ResetActivityRequest) (*ResetActivityResponse, error) + // PauseWorkflowExecution pauses the workflow execution specified in the request. + PauseWorkflowExecution(context.Context, *PauseWorkflowExecutionRequest) (*PauseWorkflowExecutionResponse, error) + // UnpauseWorkflowExecution unpauses the workflow execution specified in the request. + UnpauseWorkflowExecution(context.Context, *UnpauseWorkflowExecutionRequest) (*UnpauseWorkflowExecutionResponse, error) + // StartNexusOperation starts a Nexus operation on the __temporal_system endpoint. + StartNexusOperation(context.Context, *StartNexusOperationRequest) (*StartNexusOperationResponse, error) + // CancelNexusOperation cancels a Nexus operation on the __temporal_system endpoint. + CancelNexusOperation(context.Context, *CancelNexusOperationRequest) (*CancelNexusOperationResponse, error) mustEmbedUnimplementedHistoryServiceServer() } @@ -1111,7 +1411,7 @@ func (UnimplementedHistoryServiceServer) PollMutableState(context.Context, *Poll func (UnimplementedHistoryServiceServer) ResetStickyTaskQueue(context.Context, *ResetStickyTaskQueueRequest) (*ResetStickyTaskQueueResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ResetStickyTaskQueue not implemented") } -func (UnimplementedHistoryServiceServer) RecordWorkflowTaskStarted(context.Context, *RecordWorkflowTaskStartedRequest) (*RecordWorkflowTaskStartedResponse, error) { +func (UnimplementedHistoryServiceServer) RecordWorkflowTaskStarted(context.Context, *RecordWorkflowTaskStartedRequest) (*RecordWorkflowTaskStartedResponseWithRawHistory, error) { return nil, status.Errorf(codes.Unimplemented, "method RecordWorkflowTaskStarted not implemented") } func (UnimplementedHistoryServiceServer) RecordActivityTaskStarted(context.Context, *RecordActivityTaskStartedRequest) (*RecordActivityTaskStartedResponse, error) { @@ -1147,6 +1447,9 @@ func (UnimplementedHistoryServiceServer) SignalWorkflowExecution(context.Context func (UnimplementedHistoryServiceServer) SignalWithStartWorkflowExecution(context.Context, *SignalWithStartWorkflowExecutionRequest) (*SignalWithStartWorkflowExecutionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method SignalWithStartWorkflowExecution not implemented") } +func (UnimplementedHistoryServiceServer) ExecuteMultiOperation(context.Context, *ExecuteMultiOperationRequest) (*ExecuteMultiOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExecuteMultiOperation not implemented") +} func (UnimplementedHistoryServiceServer) RemoveSignalMutableState(context.Context, *RemoveSignalMutableStateRequest) (*RemoveSignalMutableStateResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RemoveSignalMutableState not implemented") } @@ -1159,6 +1462,9 @@ func (UnimplementedHistoryServiceServer) DeleteWorkflowExecution(context.Context func (UnimplementedHistoryServiceServer) ResetWorkflowExecution(context.Context, *ResetWorkflowExecutionRequest) (*ResetWorkflowExecutionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ResetWorkflowExecution not implemented") } +func (UnimplementedHistoryServiceServer) UpdateWorkflowExecutionOptions(context.Context, *UpdateWorkflowExecutionOptionsRequest) (*UpdateWorkflowExecutionOptionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateWorkflowExecutionOptions not implemented") +} func (UnimplementedHistoryServiceServer) RequestCancelWorkflowExecution(context.Context, *RequestCancelWorkflowExecutionRequest) (*RequestCancelWorkflowExecutionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method RequestCancelWorkflowExecution not implemented") } @@ -1252,7 +1558,7 @@ func (UnimplementedHistoryServiceServer) PollWorkflowExecutionUpdate(context.Con func (UnimplementedHistoryServiceServer) StreamWorkflowReplicationMessages(HistoryService_StreamWorkflowReplicationMessagesServer) error { return status.Errorf(codes.Unimplemented, "method StreamWorkflowReplicationMessages not implemented") } -func (UnimplementedHistoryServiceServer) GetWorkflowExecutionHistory(context.Context, *GetWorkflowExecutionHistoryRequest) (*GetWorkflowExecutionHistoryResponse, error) { +func (UnimplementedHistoryServiceServer) GetWorkflowExecutionHistory(context.Context, *GetWorkflowExecutionHistoryRequest) (*GetWorkflowExecutionHistoryResponseWithRaw, error) { return nil, status.Errorf(codes.Unimplemented, "method GetWorkflowExecutionHistory not implemented") } func (UnimplementedHistoryServiceServer) GetWorkflowExecutionHistoryReverse(context.Context, *GetWorkflowExecutionHistoryReverseRequest) (*GetWorkflowExecutionHistoryReverseResponse, error) { @@ -1282,6 +1588,45 @@ func (UnimplementedHistoryServiceServer) AddTasks(context.Context, *AddTasksRequ func (UnimplementedHistoryServiceServer) ListTasks(context.Context, *ListTasksRequest) (*ListTasksResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListTasks not implemented") } +func (UnimplementedHistoryServiceServer) CompleteNexusOperation(context.Context, *CompleteNexusOperationRequest) (*CompleteNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CompleteNexusOperation not implemented") +} +func (UnimplementedHistoryServiceServer) CompleteNexusOperationChasm(context.Context, *CompleteNexusOperationChasmRequest) (*CompleteNexusOperationChasmResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CompleteNexusOperationChasm not implemented") +} +func (UnimplementedHistoryServiceServer) InvokeStateMachineMethod(context.Context, *InvokeStateMachineMethodRequest) (*InvokeStateMachineMethodResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method InvokeStateMachineMethod not implemented") +} +func (UnimplementedHistoryServiceServer) DeepHealthCheck(context.Context, *DeepHealthCheckRequest) (*DeepHealthCheckResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeepHealthCheck not implemented") +} +func (UnimplementedHistoryServiceServer) SyncWorkflowState(context.Context, *SyncWorkflowStateRequest) (*SyncWorkflowStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SyncWorkflowState not implemented") +} +func (UnimplementedHistoryServiceServer) UpdateActivityOptions(context.Context, *UpdateActivityOptionsRequest) (*UpdateActivityOptionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateActivityOptions not implemented") +} +func (UnimplementedHistoryServiceServer) PauseActivity(context.Context, *PauseActivityRequest) (*PauseActivityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PauseActivity not implemented") +} +func (UnimplementedHistoryServiceServer) UnpauseActivity(context.Context, *UnpauseActivityRequest) (*UnpauseActivityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnpauseActivity not implemented") +} +func (UnimplementedHistoryServiceServer) ResetActivity(context.Context, *ResetActivityRequest) (*ResetActivityResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetActivity not implemented") +} +func (UnimplementedHistoryServiceServer) PauseWorkflowExecution(context.Context, *PauseWorkflowExecutionRequest) (*PauseWorkflowExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PauseWorkflowExecution not implemented") +} +func (UnimplementedHistoryServiceServer) UnpauseWorkflowExecution(context.Context, *UnpauseWorkflowExecutionRequest) (*UnpauseWorkflowExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UnpauseWorkflowExecution not implemented") +} +func (UnimplementedHistoryServiceServer) StartNexusOperation(context.Context, *StartNexusOperationRequest) (*StartNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartNexusOperation not implemented") +} +func (UnimplementedHistoryServiceServer) CancelNexusOperation(context.Context, *CancelNexusOperationRequest) (*CancelNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelNexusOperation not implemented") +} func (UnimplementedHistoryServiceServer) mustEmbedUnimplementedHistoryServiceServer() {} // UnsafeHistoryServiceServer may be embedded to opt out of forward compatibility for this service. @@ -1583,6 +1928,24 @@ func _HistoryService_SignalWithStartWorkflowExecution_Handler(srv interface{}, c return interceptor(ctx, in, info, handler) } +func _HistoryService_ExecuteMultiOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecuteMultiOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).ExecuteMultiOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_ExecuteMultiOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).ExecuteMultiOperation(ctx, req.(*ExecuteMultiOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _HistoryService_RemoveSignalMutableState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RemoveSignalMutableStateRequest) if err := dec(in); err != nil { @@ -1655,6 +2018,24 @@ func _HistoryService_ResetWorkflowExecution_Handler(srv interface{}, ctx context return interceptor(ctx, in, info, handler) } +func _HistoryService_UpdateWorkflowExecutionOptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateWorkflowExecutionOptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).UpdateWorkflowExecutionOptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_UpdateWorkflowExecutionOptions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).UpdateWorkflowExecutionOptions(ctx, req.(*UpdateWorkflowExecutionOptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _HistoryService_RequestCancelWorkflowExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(RequestCancelWorkflowExecutionRequest) if err := dec(in); err != nil { @@ -2401,6 +2782,240 @@ func _HistoryService_ListTasks_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _HistoryService_CompleteNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompleteNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).CompleteNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_CompleteNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).CompleteNexusOperation(ctx, req.(*CompleteNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_CompleteNexusOperationChasm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompleteNexusOperationChasmRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).CompleteNexusOperationChasm(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_CompleteNexusOperationChasm_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).CompleteNexusOperationChasm(ctx, req.(*CompleteNexusOperationChasmRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_InvokeStateMachineMethod_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InvokeStateMachineMethodRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).InvokeStateMachineMethod(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_InvokeStateMachineMethod_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).InvokeStateMachineMethod(ctx, req.(*InvokeStateMachineMethodRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_DeepHealthCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeepHealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).DeepHealthCheck(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_DeepHealthCheck_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).DeepHealthCheck(ctx, req.(*DeepHealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_SyncWorkflowState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SyncWorkflowStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).SyncWorkflowState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_SyncWorkflowState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).SyncWorkflowState(ctx, req.(*SyncWorkflowStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_UpdateActivityOptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateActivityOptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).UpdateActivityOptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_UpdateActivityOptions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).UpdateActivityOptions(ctx, req.(*UpdateActivityOptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_PauseActivity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseActivityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).PauseActivity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_PauseActivity_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).PauseActivity(ctx, req.(*PauseActivityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_UnpauseActivity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnpauseActivityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).UnpauseActivity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_UnpauseActivity_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).UnpauseActivity(ctx, req.(*UnpauseActivityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_ResetActivity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResetActivityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).ResetActivity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_ResetActivity_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).ResetActivity(ctx, req.(*ResetActivityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_PauseWorkflowExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseWorkflowExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).PauseWorkflowExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_PauseWorkflowExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).PauseWorkflowExecution(ctx, req.(*PauseWorkflowExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_UnpauseWorkflowExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnpauseWorkflowExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).UnpauseWorkflowExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_UnpauseWorkflowExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).UnpauseWorkflowExecution(ctx, req.(*UnpauseWorkflowExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_StartNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).StartNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_StartNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).StartNexusOperation(ctx, req.(*StartNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _HistoryService_CancelNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HistoryServiceServer).CancelNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: HistoryService_CancelNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HistoryServiceServer).CancelNexusOperation(ctx, req.(*CancelNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + // HistoryService_ServiceDesc is the grpc.ServiceDesc for HistoryService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -2472,6 +3087,10 @@ var HistoryService_ServiceDesc = grpc.ServiceDesc{ MethodName: "SignalWithStartWorkflowExecution", Handler: _HistoryService_SignalWithStartWorkflowExecution_Handler, }, + { + MethodName: "ExecuteMultiOperation", + Handler: _HistoryService_ExecuteMultiOperation_Handler, + }, { MethodName: "RemoveSignalMutableState", Handler: _HistoryService_RemoveSignalMutableState_Handler, @@ -2488,6 +3107,10 @@ var HistoryService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ResetWorkflowExecution", Handler: _HistoryService_ResetWorkflowExecution_Handler, }, + { + MethodName: "UpdateWorkflowExecutionOptions", + Handler: _HistoryService_UpdateWorkflowExecutionOptions_Handler, + }, { MethodName: "RequestCancelWorkflowExecution", Handler: _HistoryService_RequestCancelWorkflowExecution_Handler, @@ -2648,6 +3271,58 @@ var HistoryService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ListTasks", Handler: _HistoryService_ListTasks_Handler, }, + { + MethodName: "CompleteNexusOperation", + Handler: _HistoryService_CompleteNexusOperation_Handler, + }, + { + MethodName: "CompleteNexusOperationChasm", + Handler: _HistoryService_CompleteNexusOperationChasm_Handler, + }, + { + MethodName: "InvokeStateMachineMethod", + Handler: _HistoryService_InvokeStateMachineMethod_Handler, + }, + { + MethodName: "DeepHealthCheck", + Handler: _HistoryService_DeepHealthCheck_Handler, + }, + { + MethodName: "SyncWorkflowState", + Handler: _HistoryService_SyncWorkflowState_Handler, + }, + { + MethodName: "UpdateActivityOptions", + Handler: _HistoryService_UpdateActivityOptions_Handler, + }, + { + MethodName: "PauseActivity", + Handler: _HistoryService_PauseActivity_Handler, + }, + { + MethodName: "UnpauseActivity", + Handler: _HistoryService_UnpauseActivity_Handler, + }, + { + MethodName: "ResetActivity", + Handler: _HistoryService_ResetActivity_Handler, + }, + { + MethodName: "PauseWorkflowExecution", + Handler: _HistoryService_PauseWorkflowExecution_Handler, + }, + { + MethodName: "UnpauseWorkflowExecution", + Handler: _HistoryService_UnpauseWorkflowExecution_Handler, + }, + { + MethodName: "StartNexusOperation", + Handler: _HistoryService_StartNexusOperation_Handler, + }, + { + MethodName: "CancelNexusOperation", + Handler: _HistoryService_CancelNexusOperation_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/api/historyservicemock/v1/service.pb.mock.go b/api/historyservicemock/v1/service.pb.mock.go index 0cc7ca04d24..a1a21b05903 100644 --- a/api/historyservicemock/v1/service.pb.mock.go +++ b/api/historyservicemock/v1/service.pb.mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: api/historyservice/v1/service.pb.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package historyservicemock -source api/historyservice/v1/service.pb.go -destination api/historyservicemock/v1/service.pb.mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: historyservice/v1/service.pb.go // Package historyservicemock is a generated GoMock package. package historyservicemock diff --git a/api/historyservicemock/v1/service_grpc.pb.mock.go b/api/historyservicemock/v1/service_grpc.pb.mock.go index 4daae5fb531..581b7f844bf 100644 --- a/api/historyservicemock/v1/service_grpc.pb.mock.go +++ b/api/historyservicemock/v1/service_grpc.pb.mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: api/historyservice/v1/service_grpc.pb.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package historyservicemock -source api/historyservice/v1/service_grpc.pb.go -destination api/historyservicemock/v1/service_grpc.pb.mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: historyservice/v1/service_grpc.pb.go // Package historyservicemock is a generated GoMock package. package historyservicemock @@ -32,8 +13,8 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" historyservice "go.temporal.io/server/api/historyservice/v1" + gomock "go.uber.org/mock/gomock" grpc "google.golang.org/grpc" metadata "google.golang.org/grpc/metadata" ) @@ -42,6 +23,7 @@ import ( type MockHistoryServiceClient struct { ctrl *gomock.Controller recorder *MockHistoryServiceClientMockRecorder + isgomock struct{} } // MockHistoryServiceClientMockRecorder is the mock recorder for MockHistoryServiceClient. @@ -64,7 +46,7 @@ func (m *MockHistoryServiceClient) EXPECT() *MockHistoryServiceClientMockRecorde // AddTasks mocks base method. func (m *MockHistoryServiceClient) AddTasks(ctx context.Context, in *historyservice.AddTasksRequest, opts ...grpc.CallOption) (*historyservice.AddTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -75,16 +57,36 @@ func (m *MockHistoryServiceClient) AddTasks(ctx context.Context, in *historyserv } // AddTasks indicates an expected call of AddTasks. -func (mr *MockHistoryServiceClientMockRecorder) AddTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) AddTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTasks", reflect.TypeOf((*MockHistoryServiceClient)(nil).AddTasks), varargs...) } +// CancelNexusOperation mocks base method. +func (m *MockHistoryServiceClient) CancelNexusOperation(ctx context.Context, in *historyservice.CancelNexusOperationRequest, opts ...grpc.CallOption) (*historyservice.CancelNexusOperationResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CancelNexusOperation", varargs...) + ret0, _ := ret[0].(*historyservice.CancelNexusOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelNexusOperation indicates an expected call of CancelNexusOperation. +func (mr *MockHistoryServiceClientMockRecorder) CancelNexusOperation(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelNexusOperation", reflect.TypeOf((*MockHistoryServiceClient)(nil).CancelNexusOperation), varargs...) +} + // CloseShard mocks base method. func (m *MockHistoryServiceClient) CloseShard(ctx context.Context, in *historyservice.CloseShardRequest, opts ...grpc.CallOption) (*historyservice.CloseShardResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -95,16 +97,76 @@ func (m *MockHistoryServiceClient) CloseShard(ctx context.Context, in *historyse } // CloseShard indicates an expected call of CloseShard. -func (mr *MockHistoryServiceClientMockRecorder) CloseShard(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) CloseShard(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseShard", reflect.TypeOf((*MockHistoryServiceClient)(nil).CloseShard), varargs...) } +// CompleteNexusOperation mocks base method. +func (m *MockHistoryServiceClient) CompleteNexusOperation(ctx context.Context, in *historyservice.CompleteNexusOperationRequest, opts ...grpc.CallOption) (*historyservice.CompleteNexusOperationResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CompleteNexusOperation", varargs...) + ret0, _ := ret[0].(*historyservice.CompleteNexusOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteNexusOperation indicates an expected call of CompleteNexusOperation. +func (mr *MockHistoryServiceClientMockRecorder) CompleteNexusOperation(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteNexusOperation", reflect.TypeOf((*MockHistoryServiceClient)(nil).CompleteNexusOperation), varargs...) +} + +// CompleteNexusOperationChasm mocks base method. +func (m *MockHistoryServiceClient) CompleteNexusOperationChasm(ctx context.Context, in *historyservice.CompleteNexusOperationChasmRequest, opts ...grpc.CallOption) (*historyservice.CompleteNexusOperationChasmResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CompleteNexusOperationChasm", varargs...) + ret0, _ := ret[0].(*historyservice.CompleteNexusOperationChasmResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteNexusOperationChasm indicates an expected call of CompleteNexusOperationChasm. +func (mr *MockHistoryServiceClientMockRecorder) CompleteNexusOperationChasm(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteNexusOperationChasm", reflect.TypeOf((*MockHistoryServiceClient)(nil).CompleteNexusOperationChasm), varargs...) +} + +// DeepHealthCheck mocks base method. +func (m *MockHistoryServiceClient) DeepHealthCheck(ctx context.Context, in *historyservice.DeepHealthCheckRequest, opts ...grpc.CallOption) (*historyservice.DeepHealthCheckResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeepHealthCheck", varargs...) + ret0, _ := ret[0].(*historyservice.DeepHealthCheckResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeepHealthCheck indicates an expected call of DeepHealthCheck. +func (mr *MockHistoryServiceClientMockRecorder) DeepHealthCheck(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeepHealthCheck", reflect.TypeOf((*MockHistoryServiceClient)(nil).DeepHealthCheck), varargs...) +} + // DeleteDLQTasks mocks base method. func (m *MockHistoryServiceClient) DeleteDLQTasks(ctx context.Context, in *historyservice.DeleteDLQTasksRequest, opts ...grpc.CallOption) (*historyservice.DeleteDLQTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -115,16 +177,16 @@ func (m *MockHistoryServiceClient) DeleteDLQTasks(ctx context.Context, in *histo } // DeleteDLQTasks indicates an expected call of DeleteDLQTasks. -func (mr *MockHistoryServiceClientMockRecorder) DeleteDLQTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) DeleteDLQTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDLQTasks", reflect.TypeOf((*MockHistoryServiceClient)(nil).DeleteDLQTasks), varargs...) } // DeleteWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) DeleteWorkflowExecution(ctx context.Context, in *historyservice.DeleteWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.DeleteWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -135,16 +197,16 @@ func (m *MockHistoryServiceClient) DeleteWorkflowExecution(ctx context.Context, } // DeleteWorkflowExecution indicates an expected call of DeleteWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) DeleteWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) DeleteWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).DeleteWorkflowExecution), varargs...) } // DeleteWorkflowVisibilityRecord mocks base method. func (m *MockHistoryServiceClient) DeleteWorkflowVisibilityRecord(ctx context.Context, in *historyservice.DeleteWorkflowVisibilityRecordRequest, opts ...grpc.CallOption) (*historyservice.DeleteWorkflowVisibilityRecordResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -155,16 +217,16 @@ func (m *MockHistoryServiceClient) DeleteWorkflowVisibilityRecord(ctx context.Co } // DeleteWorkflowVisibilityRecord indicates an expected call of DeleteWorkflowVisibilityRecord. -func (mr *MockHistoryServiceClientMockRecorder) DeleteWorkflowVisibilityRecord(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) DeleteWorkflowVisibilityRecord(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowVisibilityRecord", reflect.TypeOf((*MockHistoryServiceClient)(nil).DeleteWorkflowVisibilityRecord), varargs...) } // DescribeHistoryHost mocks base method. func (m *MockHistoryServiceClient) DescribeHistoryHost(ctx context.Context, in *historyservice.DescribeHistoryHostRequest, opts ...grpc.CallOption) (*historyservice.DescribeHistoryHostResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -175,16 +237,16 @@ func (m *MockHistoryServiceClient) DescribeHistoryHost(ctx context.Context, in * } // DescribeHistoryHost indicates an expected call of DescribeHistoryHost. -func (mr *MockHistoryServiceClientMockRecorder) DescribeHistoryHost(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) DescribeHistoryHost(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeHistoryHost", reflect.TypeOf((*MockHistoryServiceClient)(nil).DescribeHistoryHost), varargs...) } // DescribeMutableState mocks base method. func (m *MockHistoryServiceClient) DescribeMutableState(ctx context.Context, in *historyservice.DescribeMutableStateRequest, opts ...grpc.CallOption) (*historyservice.DescribeMutableStateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -195,16 +257,16 @@ func (m *MockHistoryServiceClient) DescribeMutableState(ctx context.Context, in } // DescribeMutableState indicates an expected call of DescribeMutableState. -func (mr *MockHistoryServiceClientMockRecorder) DescribeMutableState(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) DescribeMutableState(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMutableState", reflect.TypeOf((*MockHistoryServiceClient)(nil).DescribeMutableState), varargs...) } // DescribeWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) DescribeWorkflowExecution(ctx context.Context, in *historyservice.DescribeWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.DescribeWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -215,16 +277,36 @@ func (m *MockHistoryServiceClient) DescribeWorkflowExecution(ctx context.Context } // DescribeWorkflowExecution indicates an expected call of DescribeWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) DescribeWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) DescribeWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).DescribeWorkflowExecution), varargs...) } +// ExecuteMultiOperation mocks base method. +func (m *MockHistoryServiceClient) ExecuteMultiOperation(ctx context.Context, in *historyservice.ExecuteMultiOperationRequest, opts ...grpc.CallOption) (*historyservice.ExecuteMultiOperationResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ExecuteMultiOperation", varargs...) + ret0, _ := ret[0].(*historyservice.ExecuteMultiOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteMultiOperation indicates an expected call of ExecuteMultiOperation. +func (mr *MockHistoryServiceClientMockRecorder) ExecuteMultiOperation(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteMultiOperation", reflect.TypeOf((*MockHistoryServiceClient)(nil).ExecuteMultiOperation), varargs...) +} + // ForceDeleteWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) ForceDeleteWorkflowExecution(ctx context.Context, in *historyservice.ForceDeleteWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.ForceDeleteWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -235,16 +317,16 @@ func (m *MockHistoryServiceClient) ForceDeleteWorkflowExecution(ctx context.Cont } // ForceDeleteWorkflowExecution indicates an expected call of ForceDeleteWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) ForceDeleteWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ForceDeleteWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceDeleteWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).ForceDeleteWorkflowExecution), varargs...) } // GenerateLastHistoryReplicationTasks mocks base method. func (m *MockHistoryServiceClient) GenerateLastHistoryReplicationTasks(ctx context.Context, in *historyservice.GenerateLastHistoryReplicationTasksRequest, opts ...grpc.CallOption) (*historyservice.GenerateLastHistoryReplicationTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -255,16 +337,16 @@ func (m *MockHistoryServiceClient) GenerateLastHistoryReplicationTasks(ctx conte } // GenerateLastHistoryReplicationTasks indicates an expected call of GenerateLastHistoryReplicationTasks. -func (mr *MockHistoryServiceClientMockRecorder) GenerateLastHistoryReplicationTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GenerateLastHistoryReplicationTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateLastHistoryReplicationTasks", reflect.TypeOf((*MockHistoryServiceClient)(nil).GenerateLastHistoryReplicationTasks), varargs...) } // GetDLQMessages mocks base method. func (m *MockHistoryServiceClient) GetDLQMessages(ctx context.Context, in *historyservice.GetDLQMessagesRequest, opts ...grpc.CallOption) (*historyservice.GetDLQMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -275,16 +357,16 @@ func (m *MockHistoryServiceClient) GetDLQMessages(ctx context.Context, in *histo } // GetDLQMessages indicates an expected call of GetDLQMessages. -func (mr *MockHistoryServiceClientMockRecorder) GetDLQMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetDLQMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQMessages", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetDLQMessages), varargs...) } // GetDLQReplicationMessages mocks base method. func (m *MockHistoryServiceClient) GetDLQReplicationMessages(ctx context.Context, in *historyservice.GetDLQReplicationMessagesRequest, opts ...grpc.CallOption) (*historyservice.GetDLQReplicationMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -295,16 +377,16 @@ func (m *MockHistoryServiceClient) GetDLQReplicationMessages(ctx context.Context } // GetDLQReplicationMessages indicates an expected call of GetDLQReplicationMessages. -func (mr *MockHistoryServiceClientMockRecorder) GetDLQReplicationMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetDLQReplicationMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQReplicationMessages", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetDLQReplicationMessages), varargs...) } // GetDLQTasks mocks base method. func (m *MockHistoryServiceClient) GetDLQTasks(ctx context.Context, in *historyservice.GetDLQTasksRequest, opts ...grpc.CallOption) (*historyservice.GetDLQTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -315,16 +397,16 @@ func (m *MockHistoryServiceClient) GetDLQTasks(ctx context.Context, in *historys } // GetDLQTasks indicates an expected call of GetDLQTasks. -func (mr *MockHistoryServiceClientMockRecorder) GetDLQTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetDLQTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQTasks", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetDLQTasks), varargs...) } // GetMutableState mocks base method. func (m *MockHistoryServiceClient) GetMutableState(ctx context.Context, in *historyservice.GetMutableStateRequest, opts ...grpc.CallOption) (*historyservice.GetMutableStateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -335,16 +417,16 @@ func (m *MockHistoryServiceClient) GetMutableState(ctx context.Context, in *hist } // GetMutableState indicates an expected call of GetMutableState. -func (mr *MockHistoryServiceClientMockRecorder) GetMutableState(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetMutableState(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMutableState", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetMutableState), varargs...) } // GetReplicationMessages mocks base method. func (m *MockHistoryServiceClient) GetReplicationMessages(ctx context.Context, in *historyservice.GetReplicationMessagesRequest, opts ...grpc.CallOption) (*historyservice.GetReplicationMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -355,16 +437,16 @@ func (m *MockHistoryServiceClient) GetReplicationMessages(ctx context.Context, i } // GetReplicationMessages indicates an expected call of GetReplicationMessages. -func (mr *MockHistoryServiceClientMockRecorder) GetReplicationMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetReplicationMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationMessages", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetReplicationMessages), varargs...) } // GetReplicationStatus mocks base method. func (m *MockHistoryServiceClient) GetReplicationStatus(ctx context.Context, in *historyservice.GetReplicationStatusRequest, opts ...grpc.CallOption) (*historyservice.GetReplicationStatusResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -375,16 +457,16 @@ func (m *MockHistoryServiceClient) GetReplicationStatus(ctx context.Context, in } // GetReplicationStatus indicates an expected call of GetReplicationStatus. -func (mr *MockHistoryServiceClientMockRecorder) GetReplicationStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetReplicationStatus(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationStatus", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetReplicationStatus), varargs...) } // GetShard mocks base method. func (m *MockHistoryServiceClient) GetShard(ctx context.Context, in *historyservice.GetShardRequest, opts ...grpc.CallOption) (*historyservice.GetShardResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -395,16 +477,16 @@ func (m *MockHistoryServiceClient) GetShard(ctx context.Context, in *historyserv } // GetShard indicates an expected call of GetShard. -func (mr *MockHistoryServiceClientMockRecorder) GetShard(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetShard(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShard", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetShard), varargs...) } // GetWorkflowExecutionHistory mocks base method. func (m *MockHistoryServiceClient) GetWorkflowExecutionHistory(ctx context.Context, in *historyservice.GetWorkflowExecutionHistoryRequest, opts ...grpc.CallOption) (*historyservice.GetWorkflowExecutionHistoryResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -415,16 +497,16 @@ func (m *MockHistoryServiceClient) GetWorkflowExecutionHistory(ctx context.Conte } // GetWorkflowExecutionHistory indicates an expected call of GetWorkflowExecutionHistory. -func (mr *MockHistoryServiceClientMockRecorder) GetWorkflowExecutionHistory(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetWorkflowExecutionHistory(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionHistory", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetWorkflowExecutionHistory), varargs...) } // GetWorkflowExecutionHistoryReverse mocks base method. func (m *MockHistoryServiceClient) GetWorkflowExecutionHistoryReverse(ctx context.Context, in *historyservice.GetWorkflowExecutionHistoryReverseRequest, opts ...grpc.CallOption) (*historyservice.GetWorkflowExecutionHistoryReverseResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -435,16 +517,16 @@ func (m *MockHistoryServiceClient) GetWorkflowExecutionHistoryReverse(ctx contex } // GetWorkflowExecutionHistoryReverse indicates an expected call of GetWorkflowExecutionHistoryReverse. -func (mr *MockHistoryServiceClientMockRecorder) GetWorkflowExecutionHistoryReverse(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetWorkflowExecutionHistoryReverse(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionHistoryReverse", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetWorkflowExecutionHistoryReverse), varargs...) } // GetWorkflowExecutionRawHistory mocks base method. func (m *MockHistoryServiceClient) GetWorkflowExecutionRawHistory(ctx context.Context, in *historyservice.GetWorkflowExecutionRawHistoryRequest, opts ...grpc.CallOption) (*historyservice.GetWorkflowExecutionRawHistoryResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -455,16 +537,16 @@ func (m *MockHistoryServiceClient) GetWorkflowExecutionRawHistory(ctx context.Co } // GetWorkflowExecutionRawHistory indicates an expected call of GetWorkflowExecutionRawHistory. -func (mr *MockHistoryServiceClientMockRecorder) GetWorkflowExecutionRawHistory(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetWorkflowExecutionRawHistory(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionRawHistory", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetWorkflowExecutionRawHistory), varargs...) } // GetWorkflowExecutionRawHistoryV2 mocks base method. func (m *MockHistoryServiceClient) GetWorkflowExecutionRawHistoryV2(ctx context.Context, in *historyservice.GetWorkflowExecutionRawHistoryV2Request, opts ...grpc.CallOption) (*historyservice.GetWorkflowExecutionRawHistoryV2Response, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -475,16 +557,16 @@ func (m *MockHistoryServiceClient) GetWorkflowExecutionRawHistoryV2(ctx context. } // GetWorkflowExecutionRawHistoryV2 indicates an expected call of GetWorkflowExecutionRawHistoryV2. -func (mr *MockHistoryServiceClientMockRecorder) GetWorkflowExecutionRawHistoryV2(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) GetWorkflowExecutionRawHistoryV2(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionRawHistoryV2", reflect.TypeOf((*MockHistoryServiceClient)(nil).GetWorkflowExecutionRawHistoryV2), varargs...) } // ImportWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) ImportWorkflowExecution(ctx context.Context, in *historyservice.ImportWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.ImportWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -495,16 +577,36 @@ func (m *MockHistoryServiceClient) ImportWorkflowExecution(ctx context.Context, } // ImportWorkflowExecution indicates an expected call of ImportWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) ImportWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ImportWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).ImportWorkflowExecution), varargs...) } +// InvokeStateMachineMethod mocks base method. +func (m *MockHistoryServiceClient) InvokeStateMachineMethod(ctx context.Context, in *historyservice.InvokeStateMachineMethodRequest, opts ...grpc.CallOption) (*historyservice.InvokeStateMachineMethodResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "InvokeStateMachineMethod", varargs...) + ret0, _ := ret[0].(*historyservice.InvokeStateMachineMethodResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InvokeStateMachineMethod indicates an expected call of InvokeStateMachineMethod. +func (mr *MockHistoryServiceClientMockRecorder) InvokeStateMachineMethod(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InvokeStateMachineMethod", reflect.TypeOf((*MockHistoryServiceClient)(nil).InvokeStateMachineMethod), varargs...) +} + // IsActivityTaskValid mocks base method. func (m *MockHistoryServiceClient) IsActivityTaskValid(ctx context.Context, in *historyservice.IsActivityTaskValidRequest, opts ...grpc.CallOption) (*historyservice.IsActivityTaskValidResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -515,16 +617,16 @@ func (m *MockHistoryServiceClient) IsActivityTaskValid(ctx context.Context, in * } // IsActivityTaskValid indicates an expected call of IsActivityTaskValid. -func (mr *MockHistoryServiceClientMockRecorder) IsActivityTaskValid(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) IsActivityTaskValid(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsActivityTaskValid", reflect.TypeOf((*MockHistoryServiceClient)(nil).IsActivityTaskValid), varargs...) } // IsWorkflowTaskValid mocks base method. func (m *MockHistoryServiceClient) IsWorkflowTaskValid(ctx context.Context, in *historyservice.IsWorkflowTaskValidRequest, opts ...grpc.CallOption) (*historyservice.IsWorkflowTaskValidResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -535,16 +637,16 @@ func (m *MockHistoryServiceClient) IsWorkflowTaskValid(ctx context.Context, in * } // IsWorkflowTaskValid indicates an expected call of IsWorkflowTaskValid. -func (mr *MockHistoryServiceClientMockRecorder) IsWorkflowTaskValid(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) IsWorkflowTaskValid(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsWorkflowTaskValid", reflect.TypeOf((*MockHistoryServiceClient)(nil).IsWorkflowTaskValid), varargs...) } // ListQueues mocks base method. func (m *MockHistoryServiceClient) ListQueues(ctx context.Context, in *historyservice.ListQueuesRequest, opts ...grpc.CallOption) (*historyservice.ListQueuesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -555,16 +657,16 @@ func (m *MockHistoryServiceClient) ListQueues(ctx context.Context, in *historyse } // ListQueues indicates an expected call of ListQueues. -func (mr *MockHistoryServiceClientMockRecorder) ListQueues(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ListQueues(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListQueues", reflect.TypeOf((*MockHistoryServiceClient)(nil).ListQueues), varargs...) } // ListTasks mocks base method. func (m *MockHistoryServiceClient) ListTasks(ctx context.Context, in *historyservice.ListTasksRequest, opts ...grpc.CallOption) (*historyservice.ListTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -575,16 +677,16 @@ func (m *MockHistoryServiceClient) ListTasks(ctx context.Context, in *historyser } // ListTasks indicates an expected call of ListTasks. -func (mr *MockHistoryServiceClientMockRecorder) ListTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ListTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTasks", reflect.TypeOf((*MockHistoryServiceClient)(nil).ListTasks), varargs...) } // MergeDLQMessages mocks base method. func (m *MockHistoryServiceClient) MergeDLQMessages(ctx context.Context, in *historyservice.MergeDLQMessagesRequest, opts ...grpc.CallOption) (*historyservice.MergeDLQMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -595,16 +697,56 @@ func (m *MockHistoryServiceClient) MergeDLQMessages(ctx context.Context, in *his } // MergeDLQMessages indicates an expected call of MergeDLQMessages. -func (mr *MockHistoryServiceClientMockRecorder) MergeDLQMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) MergeDLQMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeDLQMessages", reflect.TypeOf((*MockHistoryServiceClient)(nil).MergeDLQMessages), varargs...) } +// PauseActivity mocks base method. +func (m *MockHistoryServiceClient) PauseActivity(ctx context.Context, in *historyservice.PauseActivityRequest, opts ...grpc.CallOption) (*historyservice.PauseActivityResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PauseActivity", varargs...) + ret0, _ := ret[0].(*historyservice.PauseActivityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PauseActivity indicates an expected call of PauseActivity. +func (mr *MockHistoryServiceClientMockRecorder) PauseActivity(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseActivity", reflect.TypeOf((*MockHistoryServiceClient)(nil).PauseActivity), varargs...) +} + +// PauseWorkflowExecution mocks base method. +func (m *MockHistoryServiceClient) PauseWorkflowExecution(ctx context.Context, in *historyservice.PauseWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.PauseWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PauseWorkflowExecution", varargs...) + ret0, _ := ret[0].(*historyservice.PauseWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PauseWorkflowExecution indicates an expected call of PauseWorkflowExecution. +func (mr *MockHistoryServiceClientMockRecorder) PauseWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).PauseWorkflowExecution), varargs...) +} + // PollMutableState mocks base method. func (m *MockHistoryServiceClient) PollMutableState(ctx context.Context, in *historyservice.PollMutableStateRequest, opts ...grpc.CallOption) (*historyservice.PollMutableStateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -615,16 +757,16 @@ func (m *MockHistoryServiceClient) PollMutableState(ctx context.Context, in *his } // PollMutableState indicates an expected call of PollMutableState. -func (mr *MockHistoryServiceClientMockRecorder) PollMutableState(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) PollMutableState(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollMutableState", reflect.TypeOf((*MockHistoryServiceClient)(nil).PollMutableState), varargs...) } // PollWorkflowExecutionUpdate mocks base method. func (m *MockHistoryServiceClient) PollWorkflowExecutionUpdate(ctx context.Context, in *historyservice.PollWorkflowExecutionUpdateRequest, opts ...grpc.CallOption) (*historyservice.PollWorkflowExecutionUpdateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -635,16 +777,16 @@ func (m *MockHistoryServiceClient) PollWorkflowExecutionUpdate(ctx context.Conte } // PollWorkflowExecutionUpdate indicates an expected call of PollWorkflowExecutionUpdate. -func (mr *MockHistoryServiceClientMockRecorder) PollWorkflowExecutionUpdate(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) PollWorkflowExecutionUpdate(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollWorkflowExecutionUpdate", reflect.TypeOf((*MockHistoryServiceClient)(nil).PollWorkflowExecutionUpdate), varargs...) } // PurgeDLQMessages mocks base method. func (m *MockHistoryServiceClient) PurgeDLQMessages(ctx context.Context, in *historyservice.PurgeDLQMessagesRequest, opts ...grpc.CallOption) (*historyservice.PurgeDLQMessagesResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -655,16 +797,16 @@ func (m *MockHistoryServiceClient) PurgeDLQMessages(ctx context.Context, in *his } // PurgeDLQMessages indicates an expected call of PurgeDLQMessages. -func (mr *MockHistoryServiceClientMockRecorder) PurgeDLQMessages(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) PurgeDLQMessages(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurgeDLQMessages", reflect.TypeOf((*MockHistoryServiceClient)(nil).PurgeDLQMessages), varargs...) } // QueryWorkflow mocks base method. func (m *MockHistoryServiceClient) QueryWorkflow(ctx context.Context, in *historyservice.QueryWorkflowRequest, opts ...grpc.CallOption) (*historyservice.QueryWorkflowResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -675,16 +817,16 @@ func (m *MockHistoryServiceClient) QueryWorkflow(ctx context.Context, in *histor } // QueryWorkflow indicates an expected call of QueryWorkflow. -func (mr *MockHistoryServiceClientMockRecorder) QueryWorkflow(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) QueryWorkflow(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryWorkflow", reflect.TypeOf((*MockHistoryServiceClient)(nil).QueryWorkflow), varargs...) } // ReapplyEvents mocks base method. func (m *MockHistoryServiceClient) ReapplyEvents(ctx context.Context, in *historyservice.ReapplyEventsRequest, opts ...grpc.CallOption) (*historyservice.ReapplyEventsResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -695,16 +837,16 @@ func (m *MockHistoryServiceClient) ReapplyEvents(ctx context.Context, in *histor } // ReapplyEvents indicates an expected call of ReapplyEvents. -func (mr *MockHistoryServiceClientMockRecorder) ReapplyEvents(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ReapplyEvents(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReapplyEvents", reflect.TypeOf((*MockHistoryServiceClient)(nil).ReapplyEvents), varargs...) } // RebuildMutableState mocks base method. func (m *MockHistoryServiceClient) RebuildMutableState(ctx context.Context, in *historyservice.RebuildMutableStateRequest, opts ...grpc.CallOption) (*historyservice.RebuildMutableStateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -715,16 +857,16 @@ func (m *MockHistoryServiceClient) RebuildMutableState(ctx context.Context, in * } // RebuildMutableState indicates an expected call of RebuildMutableState. -func (mr *MockHistoryServiceClientMockRecorder) RebuildMutableState(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RebuildMutableState(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebuildMutableState", reflect.TypeOf((*MockHistoryServiceClient)(nil).RebuildMutableState), varargs...) } // RecordActivityTaskHeartbeat mocks base method. func (m *MockHistoryServiceClient) RecordActivityTaskHeartbeat(ctx context.Context, in *historyservice.RecordActivityTaskHeartbeatRequest, opts ...grpc.CallOption) (*historyservice.RecordActivityTaskHeartbeatResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -735,16 +877,16 @@ func (m *MockHistoryServiceClient) RecordActivityTaskHeartbeat(ctx context.Conte } // RecordActivityTaskHeartbeat indicates an expected call of RecordActivityTaskHeartbeat. -func (mr *MockHistoryServiceClientMockRecorder) RecordActivityTaskHeartbeat(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RecordActivityTaskHeartbeat(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordActivityTaskHeartbeat", reflect.TypeOf((*MockHistoryServiceClient)(nil).RecordActivityTaskHeartbeat), varargs...) } // RecordActivityTaskStarted mocks base method. func (m *MockHistoryServiceClient) RecordActivityTaskStarted(ctx context.Context, in *historyservice.RecordActivityTaskStartedRequest, opts ...grpc.CallOption) (*historyservice.RecordActivityTaskStartedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -755,16 +897,16 @@ func (m *MockHistoryServiceClient) RecordActivityTaskStarted(ctx context.Context } // RecordActivityTaskStarted indicates an expected call of RecordActivityTaskStarted. -func (mr *MockHistoryServiceClientMockRecorder) RecordActivityTaskStarted(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RecordActivityTaskStarted(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordActivityTaskStarted", reflect.TypeOf((*MockHistoryServiceClient)(nil).RecordActivityTaskStarted), varargs...) } // RecordChildExecutionCompleted mocks base method. func (m *MockHistoryServiceClient) RecordChildExecutionCompleted(ctx context.Context, in *historyservice.RecordChildExecutionCompletedRequest, opts ...grpc.CallOption) (*historyservice.RecordChildExecutionCompletedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -775,16 +917,16 @@ func (m *MockHistoryServiceClient) RecordChildExecutionCompleted(ctx context.Con } // RecordChildExecutionCompleted indicates an expected call of RecordChildExecutionCompleted. -func (mr *MockHistoryServiceClientMockRecorder) RecordChildExecutionCompleted(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RecordChildExecutionCompleted(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordChildExecutionCompleted", reflect.TypeOf((*MockHistoryServiceClient)(nil).RecordChildExecutionCompleted), varargs...) } // RecordWorkflowTaskStarted mocks base method. func (m *MockHistoryServiceClient) RecordWorkflowTaskStarted(ctx context.Context, in *historyservice.RecordWorkflowTaskStartedRequest, opts ...grpc.CallOption) (*historyservice.RecordWorkflowTaskStartedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -795,16 +937,16 @@ func (m *MockHistoryServiceClient) RecordWorkflowTaskStarted(ctx context.Context } // RecordWorkflowTaskStarted indicates an expected call of RecordWorkflowTaskStarted. -func (mr *MockHistoryServiceClientMockRecorder) RecordWorkflowTaskStarted(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RecordWorkflowTaskStarted(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordWorkflowTaskStarted", reflect.TypeOf((*MockHistoryServiceClient)(nil).RecordWorkflowTaskStarted), varargs...) } // RefreshWorkflowTasks mocks base method. func (m *MockHistoryServiceClient) RefreshWorkflowTasks(ctx context.Context, in *historyservice.RefreshWorkflowTasksRequest, opts ...grpc.CallOption) (*historyservice.RefreshWorkflowTasksResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -815,16 +957,16 @@ func (m *MockHistoryServiceClient) RefreshWorkflowTasks(ctx context.Context, in } // RefreshWorkflowTasks indicates an expected call of RefreshWorkflowTasks. -func (mr *MockHistoryServiceClientMockRecorder) RefreshWorkflowTasks(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RefreshWorkflowTasks(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshWorkflowTasks", reflect.TypeOf((*MockHistoryServiceClient)(nil).RefreshWorkflowTasks), varargs...) } // RemoveSignalMutableState mocks base method. func (m *MockHistoryServiceClient) RemoveSignalMutableState(ctx context.Context, in *historyservice.RemoveSignalMutableStateRequest, opts ...grpc.CallOption) (*historyservice.RemoveSignalMutableStateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -835,16 +977,16 @@ func (m *MockHistoryServiceClient) RemoveSignalMutableState(ctx context.Context, } // RemoveSignalMutableState indicates an expected call of RemoveSignalMutableState. -func (mr *MockHistoryServiceClientMockRecorder) RemoveSignalMutableState(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RemoveSignalMutableState(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSignalMutableState", reflect.TypeOf((*MockHistoryServiceClient)(nil).RemoveSignalMutableState), varargs...) } // RemoveTask mocks base method. func (m *MockHistoryServiceClient) RemoveTask(ctx context.Context, in *historyservice.RemoveTaskRequest, opts ...grpc.CallOption) (*historyservice.RemoveTaskResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -855,16 +997,16 @@ func (m *MockHistoryServiceClient) RemoveTask(ctx context.Context, in *historyse } // RemoveTask indicates an expected call of RemoveTask. -func (mr *MockHistoryServiceClientMockRecorder) RemoveTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RemoveTask(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTask", reflect.TypeOf((*MockHistoryServiceClient)(nil).RemoveTask), varargs...) } // ReplicateEventsV2 mocks base method. func (m *MockHistoryServiceClient) ReplicateEventsV2(ctx context.Context, in *historyservice.ReplicateEventsV2Request, opts ...grpc.CallOption) (*historyservice.ReplicateEventsV2Response, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -875,16 +1017,16 @@ func (m *MockHistoryServiceClient) ReplicateEventsV2(ctx context.Context, in *hi } // ReplicateEventsV2 indicates an expected call of ReplicateEventsV2. -func (mr *MockHistoryServiceClientMockRecorder) ReplicateEventsV2(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ReplicateEventsV2(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateEventsV2", reflect.TypeOf((*MockHistoryServiceClient)(nil).ReplicateEventsV2), varargs...) } // ReplicateWorkflowState mocks base method. func (m *MockHistoryServiceClient) ReplicateWorkflowState(ctx context.Context, in *historyservice.ReplicateWorkflowStateRequest, opts ...grpc.CallOption) (*historyservice.ReplicateWorkflowStateResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -895,16 +1037,16 @@ func (m *MockHistoryServiceClient) ReplicateWorkflowState(ctx context.Context, i } // ReplicateWorkflowState indicates an expected call of ReplicateWorkflowState. -func (mr *MockHistoryServiceClientMockRecorder) ReplicateWorkflowState(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ReplicateWorkflowState(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateWorkflowState", reflect.TypeOf((*MockHistoryServiceClient)(nil).ReplicateWorkflowState), varargs...) } // RequestCancelWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) RequestCancelWorkflowExecution(ctx context.Context, in *historyservice.RequestCancelWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.RequestCancelWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -915,16 +1057,36 @@ func (m *MockHistoryServiceClient) RequestCancelWorkflowExecution(ctx context.Co } // RequestCancelWorkflowExecution indicates an expected call of RequestCancelWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) RequestCancelWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RequestCancelWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestCancelWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).RequestCancelWorkflowExecution), varargs...) } +// ResetActivity mocks base method. +func (m *MockHistoryServiceClient) ResetActivity(ctx context.Context, in *historyservice.ResetActivityRequest, opts ...grpc.CallOption) (*historyservice.ResetActivityResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ResetActivity", varargs...) + ret0, _ := ret[0].(*historyservice.ResetActivityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResetActivity indicates an expected call of ResetActivity. +func (mr *MockHistoryServiceClientMockRecorder) ResetActivity(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetActivity", reflect.TypeOf((*MockHistoryServiceClient)(nil).ResetActivity), varargs...) +} + // ResetStickyTaskQueue mocks base method. func (m *MockHistoryServiceClient) ResetStickyTaskQueue(ctx context.Context, in *historyservice.ResetStickyTaskQueueRequest, opts ...grpc.CallOption) (*historyservice.ResetStickyTaskQueueResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -935,16 +1097,16 @@ func (m *MockHistoryServiceClient) ResetStickyTaskQueue(ctx context.Context, in } // ResetStickyTaskQueue indicates an expected call of ResetStickyTaskQueue. -func (mr *MockHistoryServiceClientMockRecorder) ResetStickyTaskQueue(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ResetStickyTaskQueue(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetStickyTaskQueue", reflect.TypeOf((*MockHistoryServiceClient)(nil).ResetStickyTaskQueue), varargs...) } // ResetWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) ResetWorkflowExecution(ctx context.Context, in *historyservice.ResetWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.ResetWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -955,16 +1117,16 @@ func (m *MockHistoryServiceClient) ResetWorkflowExecution(ctx context.Context, i } // ResetWorkflowExecution indicates an expected call of ResetWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) ResetWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ResetWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).ResetWorkflowExecution), varargs...) } // RespondActivityTaskCanceled mocks base method. func (m *MockHistoryServiceClient) RespondActivityTaskCanceled(ctx context.Context, in *historyservice.RespondActivityTaskCanceledRequest, opts ...grpc.CallOption) (*historyservice.RespondActivityTaskCanceledResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -975,16 +1137,16 @@ func (m *MockHistoryServiceClient) RespondActivityTaskCanceled(ctx context.Conte } // RespondActivityTaskCanceled indicates an expected call of RespondActivityTaskCanceled. -func (mr *MockHistoryServiceClientMockRecorder) RespondActivityTaskCanceled(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RespondActivityTaskCanceled(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondActivityTaskCanceled", reflect.TypeOf((*MockHistoryServiceClient)(nil).RespondActivityTaskCanceled), varargs...) } // RespondActivityTaskCompleted mocks base method. func (m *MockHistoryServiceClient) RespondActivityTaskCompleted(ctx context.Context, in *historyservice.RespondActivityTaskCompletedRequest, opts ...grpc.CallOption) (*historyservice.RespondActivityTaskCompletedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -995,16 +1157,16 @@ func (m *MockHistoryServiceClient) RespondActivityTaskCompleted(ctx context.Cont } // RespondActivityTaskCompleted indicates an expected call of RespondActivityTaskCompleted. -func (mr *MockHistoryServiceClientMockRecorder) RespondActivityTaskCompleted(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RespondActivityTaskCompleted(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondActivityTaskCompleted", reflect.TypeOf((*MockHistoryServiceClient)(nil).RespondActivityTaskCompleted), varargs...) } // RespondActivityTaskFailed mocks base method. func (m *MockHistoryServiceClient) RespondActivityTaskFailed(ctx context.Context, in *historyservice.RespondActivityTaskFailedRequest, opts ...grpc.CallOption) (*historyservice.RespondActivityTaskFailedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1015,16 +1177,16 @@ func (m *MockHistoryServiceClient) RespondActivityTaskFailed(ctx context.Context } // RespondActivityTaskFailed indicates an expected call of RespondActivityTaskFailed. -func (mr *MockHistoryServiceClientMockRecorder) RespondActivityTaskFailed(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RespondActivityTaskFailed(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondActivityTaskFailed", reflect.TypeOf((*MockHistoryServiceClient)(nil).RespondActivityTaskFailed), varargs...) } // RespondWorkflowTaskCompleted mocks base method. func (m *MockHistoryServiceClient) RespondWorkflowTaskCompleted(ctx context.Context, in *historyservice.RespondWorkflowTaskCompletedRequest, opts ...grpc.CallOption) (*historyservice.RespondWorkflowTaskCompletedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1035,16 +1197,16 @@ func (m *MockHistoryServiceClient) RespondWorkflowTaskCompleted(ctx context.Cont } // RespondWorkflowTaskCompleted indicates an expected call of RespondWorkflowTaskCompleted. -func (mr *MockHistoryServiceClientMockRecorder) RespondWorkflowTaskCompleted(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RespondWorkflowTaskCompleted(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondWorkflowTaskCompleted", reflect.TypeOf((*MockHistoryServiceClient)(nil).RespondWorkflowTaskCompleted), varargs...) } // RespondWorkflowTaskFailed mocks base method. func (m *MockHistoryServiceClient) RespondWorkflowTaskFailed(ctx context.Context, in *historyservice.RespondWorkflowTaskFailedRequest, opts ...grpc.CallOption) (*historyservice.RespondWorkflowTaskFailedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1055,16 +1217,16 @@ func (m *MockHistoryServiceClient) RespondWorkflowTaskFailed(ctx context.Context } // RespondWorkflowTaskFailed indicates an expected call of RespondWorkflowTaskFailed. -func (mr *MockHistoryServiceClientMockRecorder) RespondWorkflowTaskFailed(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) RespondWorkflowTaskFailed(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondWorkflowTaskFailed", reflect.TypeOf((*MockHistoryServiceClient)(nil).RespondWorkflowTaskFailed), varargs...) } // ScheduleWorkflowTask mocks base method. func (m *MockHistoryServiceClient) ScheduleWorkflowTask(ctx context.Context, in *historyservice.ScheduleWorkflowTaskRequest, opts ...grpc.CallOption) (*historyservice.ScheduleWorkflowTaskResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1075,16 +1237,16 @@ func (m *MockHistoryServiceClient) ScheduleWorkflowTask(ctx context.Context, in } // ScheduleWorkflowTask indicates an expected call of ScheduleWorkflowTask. -func (mr *MockHistoryServiceClientMockRecorder) ScheduleWorkflowTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) ScheduleWorkflowTask(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScheduleWorkflowTask", reflect.TypeOf((*MockHistoryServiceClient)(nil).ScheduleWorkflowTask), varargs...) } // SignalWithStartWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) SignalWithStartWorkflowExecution(ctx context.Context, in *historyservice.SignalWithStartWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.SignalWithStartWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1095,16 +1257,16 @@ func (m *MockHistoryServiceClient) SignalWithStartWorkflowExecution(ctx context. } // SignalWithStartWorkflowExecution indicates an expected call of SignalWithStartWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) SignalWithStartWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) SignalWithStartWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignalWithStartWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).SignalWithStartWorkflowExecution), varargs...) } // SignalWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) SignalWorkflowExecution(ctx context.Context, in *historyservice.SignalWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.SignalWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1115,16 +1277,36 @@ func (m *MockHistoryServiceClient) SignalWorkflowExecution(ctx context.Context, } // SignalWorkflowExecution indicates an expected call of SignalWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) SignalWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) SignalWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignalWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).SignalWorkflowExecution), varargs...) } +// StartNexusOperation mocks base method. +func (m *MockHistoryServiceClient) StartNexusOperation(ctx context.Context, in *historyservice.StartNexusOperationRequest, opts ...grpc.CallOption) (*historyservice.StartNexusOperationResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "StartNexusOperation", varargs...) + ret0, _ := ret[0].(*historyservice.StartNexusOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartNexusOperation indicates an expected call of StartNexusOperation. +func (mr *MockHistoryServiceClientMockRecorder) StartNexusOperation(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartNexusOperation", reflect.TypeOf((*MockHistoryServiceClient)(nil).StartNexusOperation), varargs...) +} + // StartWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) StartWorkflowExecution(ctx context.Context, in *historyservice.StartWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.StartWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1135,16 +1317,16 @@ func (m *MockHistoryServiceClient) StartWorkflowExecution(ctx context.Context, i } // StartWorkflowExecution indicates an expected call of StartWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) StartWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) StartWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).StartWorkflowExecution), varargs...) } // StreamWorkflowReplicationMessages mocks base method. func (m *MockHistoryServiceClient) StreamWorkflowReplicationMessages(ctx context.Context, opts ...grpc.CallOption) (historyservice.HistoryService_StreamWorkflowReplicationMessagesClient, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx} + varargs := []any{ctx} for _, a := range opts { varargs = append(varargs, a) } @@ -1155,16 +1337,16 @@ func (m *MockHistoryServiceClient) StreamWorkflowReplicationMessages(ctx context } // StreamWorkflowReplicationMessages indicates an expected call of StreamWorkflowReplicationMessages. -func (mr *MockHistoryServiceClientMockRecorder) StreamWorkflowReplicationMessages(ctx interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) StreamWorkflowReplicationMessages(ctx any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx}, opts...) + varargs := append([]any{ctx}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWorkflowReplicationMessages", reflect.TypeOf((*MockHistoryServiceClient)(nil).StreamWorkflowReplicationMessages), varargs...) } // SyncActivity mocks base method. func (m *MockHistoryServiceClient) SyncActivity(ctx context.Context, in *historyservice.SyncActivityRequest, opts ...grpc.CallOption) (*historyservice.SyncActivityResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1175,16 +1357,16 @@ func (m *MockHistoryServiceClient) SyncActivity(ctx context.Context, in *history } // SyncActivity indicates an expected call of SyncActivity. -func (mr *MockHistoryServiceClientMockRecorder) SyncActivity(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) SyncActivity(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncActivity", reflect.TypeOf((*MockHistoryServiceClient)(nil).SyncActivity), varargs...) } // SyncShardStatus mocks base method. func (m *MockHistoryServiceClient) SyncShardStatus(ctx context.Context, in *historyservice.SyncShardStatusRequest, opts ...grpc.CallOption) (*historyservice.SyncShardStatusResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1195,16 +1377,36 @@ func (m *MockHistoryServiceClient) SyncShardStatus(ctx context.Context, in *hist } // SyncShardStatus indicates an expected call of SyncShardStatus. -func (mr *MockHistoryServiceClientMockRecorder) SyncShardStatus(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) SyncShardStatus(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncShardStatus", reflect.TypeOf((*MockHistoryServiceClient)(nil).SyncShardStatus), varargs...) } +// SyncWorkflowState mocks base method. +func (m *MockHistoryServiceClient) SyncWorkflowState(ctx context.Context, in *historyservice.SyncWorkflowStateRequest, opts ...grpc.CallOption) (*historyservice.SyncWorkflowStateResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SyncWorkflowState", varargs...) + ret0, _ := ret[0].(*historyservice.SyncWorkflowStateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncWorkflowState indicates an expected call of SyncWorkflowState. +func (mr *MockHistoryServiceClientMockRecorder) SyncWorkflowState(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWorkflowState", reflect.TypeOf((*MockHistoryServiceClient)(nil).SyncWorkflowState), varargs...) +} + // TerminateWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) TerminateWorkflowExecution(ctx context.Context, in *historyservice.TerminateWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.TerminateWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1215,16 +1417,76 @@ func (m *MockHistoryServiceClient) TerminateWorkflowExecution(ctx context.Contex } // TerminateWorkflowExecution indicates an expected call of TerminateWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) TerminateWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) TerminateWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TerminateWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).TerminateWorkflowExecution), varargs...) } +// UnpauseActivity mocks base method. +func (m *MockHistoryServiceClient) UnpauseActivity(ctx context.Context, in *historyservice.UnpauseActivityRequest, opts ...grpc.CallOption) (*historyservice.UnpauseActivityResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UnpauseActivity", varargs...) + ret0, _ := ret[0].(*historyservice.UnpauseActivityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UnpauseActivity indicates an expected call of UnpauseActivity. +func (mr *MockHistoryServiceClientMockRecorder) UnpauseActivity(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnpauseActivity", reflect.TypeOf((*MockHistoryServiceClient)(nil).UnpauseActivity), varargs...) +} + +// UnpauseWorkflowExecution mocks base method. +func (m *MockHistoryServiceClient) UnpauseWorkflowExecution(ctx context.Context, in *historyservice.UnpauseWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.UnpauseWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UnpauseWorkflowExecution", varargs...) + ret0, _ := ret[0].(*historyservice.UnpauseWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UnpauseWorkflowExecution indicates an expected call of UnpauseWorkflowExecution. +func (mr *MockHistoryServiceClientMockRecorder) UnpauseWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnpauseWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).UnpauseWorkflowExecution), varargs...) +} + +// UpdateActivityOptions mocks base method. +func (m *MockHistoryServiceClient) UpdateActivityOptions(ctx context.Context, in *historyservice.UpdateActivityOptionsRequest, opts ...grpc.CallOption) (*historyservice.UpdateActivityOptionsResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateActivityOptions", varargs...) + ret0, _ := ret[0].(*historyservice.UpdateActivityOptionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateActivityOptions indicates an expected call of UpdateActivityOptions. +func (mr *MockHistoryServiceClientMockRecorder) UpdateActivityOptions(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateActivityOptions", reflect.TypeOf((*MockHistoryServiceClient)(nil).UpdateActivityOptions), varargs...) +} + // UpdateWorkflowExecution mocks base method. func (m *MockHistoryServiceClient) UpdateWorkflowExecution(ctx context.Context, in *historyservice.UpdateWorkflowExecutionRequest, opts ...grpc.CallOption) (*historyservice.UpdateWorkflowExecutionResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1235,16 +1497,36 @@ func (m *MockHistoryServiceClient) UpdateWorkflowExecution(ctx context.Context, } // UpdateWorkflowExecution indicates an expected call of UpdateWorkflowExecution. -func (mr *MockHistoryServiceClientMockRecorder) UpdateWorkflowExecution(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) UpdateWorkflowExecution(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkflowExecution", reflect.TypeOf((*MockHistoryServiceClient)(nil).UpdateWorkflowExecution), varargs...) } +// UpdateWorkflowExecutionOptions mocks base method. +func (m *MockHistoryServiceClient) UpdateWorkflowExecutionOptions(ctx context.Context, in *historyservice.UpdateWorkflowExecutionOptionsRequest, opts ...grpc.CallOption) (*historyservice.UpdateWorkflowExecutionOptionsResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateWorkflowExecutionOptions", varargs...) + ret0, _ := ret[0].(*historyservice.UpdateWorkflowExecutionOptionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateWorkflowExecutionOptions indicates an expected call of UpdateWorkflowExecutionOptions. +func (mr *MockHistoryServiceClientMockRecorder) UpdateWorkflowExecutionOptions(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkflowExecutionOptions", reflect.TypeOf((*MockHistoryServiceClient)(nil).UpdateWorkflowExecutionOptions), varargs...) +} + // VerifyChildExecutionCompletionRecorded mocks base method. func (m *MockHistoryServiceClient) VerifyChildExecutionCompletionRecorded(ctx context.Context, in *historyservice.VerifyChildExecutionCompletionRecordedRequest, opts ...grpc.CallOption) (*historyservice.VerifyChildExecutionCompletionRecordedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1255,16 +1537,16 @@ func (m *MockHistoryServiceClient) VerifyChildExecutionCompletionRecorded(ctx co } // VerifyChildExecutionCompletionRecorded indicates an expected call of VerifyChildExecutionCompletionRecorded. -func (mr *MockHistoryServiceClientMockRecorder) VerifyChildExecutionCompletionRecorded(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) VerifyChildExecutionCompletionRecorded(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyChildExecutionCompletionRecorded", reflect.TypeOf((*MockHistoryServiceClient)(nil).VerifyChildExecutionCompletionRecorded), varargs...) } // VerifyFirstWorkflowTaskScheduled mocks base method. func (m *MockHistoryServiceClient) VerifyFirstWorkflowTaskScheduled(ctx context.Context, in *historyservice.VerifyFirstWorkflowTaskScheduledRequest, opts ...grpc.CallOption) (*historyservice.VerifyFirstWorkflowTaskScheduledResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -1275,9 +1557,9 @@ func (m *MockHistoryServiceClient) VerifyFirstWorkflowTaskScheduled(ctx context. } // VerifyFirstWorkflowTaskScheduled indicates an expected call of VerifyFirstWorkflowTaskScheduled. -func (mr *MockHistoryServiceClientMockRecorder) VerifyFirstWorkflowTaskScheduled(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryServiceClientMockRecorder) VerifyFirstWorkflowTaskScheduled(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyFirstWorkflowTaskScheduled", reflect.TypeOf((*MockHistoryServiceClient)(nil).VerifyFirstWorkflowTaskScheduled), varargs...) } @@ -1285,6 +1567,7 @@ func (mr *MockHistoryServiceClientMockRecorder) VerifyFirstWorkflowTaskScheduled type MockHistoryService_StreamWorkflowReplicationMessagesClient struct { ctrl *gomock.Controller recorder *MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder + isgomock struct{} } // MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder is the mock recorder for MockHistoryService_StreamWorkflowReplicationMessagesClient. @@ -1371,7 +1654,7 @@ func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesClient) RecvMsg(m } // RecvMsg indicates an expected call of RecvMsg. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder) RecvMsg(m interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder) RecvMsg(m any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesClient)(nil).RecvMsg), m) } @@ -1385,7 +1668,7 @@ func (m *MockHistoryService_StreamWorkflowReplicationMessagesClient) Send(arg0 * } // Send indicates an expected call of Send. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder) Send(arg0 interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder) Send(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesClient)(nil).Send), arg0) } @@ -1399,7 +1682,7 @@ func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesClient) SendMsg(m } // SendMsg indicates an expected call of SendMsg. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder) SendMsg(m interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder) SendMsg(m any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesClient)(nil).SendMsg), m) } @@ -1422,6 +1705,7 @@ func (mr *MockHistoryService_StreamWorkflowReplicationMessagesClientMockRecorder type MockHistoryServiceServer struct { ctrl *gomock.Controller recorder *MockHistoryServiceServerMockRecorder + isgomock struct{} } // MockHistoryServiceServerMockRecorder is the mock recorder for MockHistoryServiceServer. @@ -1451,11 +1735,26 @@ func (m *MockHistoryServiceServer) AddTasks(arg0 context.Context, arg1 *historys } // AddTasks indicates an expected call of AddTasks. -func (mr *MockHistoryServiceServerMockRecorder) AddTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) AddTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTasks", reflect.TypeOf((*MockHistoryServiceServer)(nil).AddTasks), arg0, arg1) } +// CancelNexusOperation mocks base method. +func (m *MockHistoryServiceServer) CancelNexusOperation(arg0 context.Context, arg1 *historyservice.CancelNexusOperationRequest) (*historyservice.CancelNexusOperationResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CancelNexusOperation", arg0, arg1) + ret0, _ := ret[0].(*historyservice.CancelNexusOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelNexusOperation indicates an expected call of CancelNexusOperation. +func (mr *MockHistoryServiceServerMockRecorder) CancelNexusOperation(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelNexusOperation", reflect.TypeOf((*MockHistoryServiceServer)(nil).CancelNexusOperation), arg0, arg1) +} + // CloseShard mocks base method. func (m *MockHistoryServiceServer) CloseShard(arg0 context.Context, arg1 *historyservice.CloseShardRequest) (*historyservice.CloseShardResponse, error) { m.ctrl.T.Helper() @@ -1466,11 +1765,56 @@ func (m *MockHistoryServiceServer) CloseShard(arg0 context.Context, arg1 *histor } // CloseShard indicates an expected call of CloseShard. -func (mr *MockHistoryServiceServerMockRecorder) CloseShard(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) CloseShard(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseShard", reflect.TypeOf((*MockHistoryServiceServer)(nil).CloseShard), arg0, arg1) } +// CompleteNexusOperation mocks base method. +func (m *MockHistoryServiceServer) CompleteNexusOperation(arg0 context.Context, arg1 *historyservice.CompleteNexusOperationRequest) (*historyservice.CompleteNexusOperationResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteNexusOperation", arg0, arg1) + ret0, _ := ret[0].(*historyservice.CompleteNexusOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteNexusOperation indicates an expected call of CompleteNexusOperation. +func (mr *MockHistoryServiceServerMockRecorder) CompleteNexusOperation(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteNexusOperation", reflect.TypeOf((*MockHistoryServiceServer)(nil).CompleteNexusOperation), arg0, arg1) +} + +// CompleteNexusOperationChasm mocks base method. +func (m *MockHistoryServiceServer) CompleteNexusOperationChasm(arg0 context.Context, arg1 *historyservice.CompleteNexusOperationChasmRequest) (*historyservice.CompleteNexusOperationChasmResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CompleteNexusOperationChasm", arg0, arg1) + ret0, _ := ret[0].(*historyservice.CompleteNexusOperationChasmResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CompleteNexusOperationChasm indicates an expected call of CompleteNexusOperationChasm. +func (mr *MockHistoryServiceServerMockRecorder) CompleteNexusOperationChasm(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteNexusOperationChasm", reflect.TypeOf((*MockHistoryServiceServer)(nil).CompleteNexusOperationChasm), arg0, arg1) +} + +// DeepHealthCheck mocks base method. +func (m *MockHistoryServiceServer) DeepHealthCheck(arg0 context.Context, arg1 *historyservice.DeepHealthCheckRequest) (*historyservice.DeepHealthCheckResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeepHealthCheck", arg0, arg1) + ret0, _ := ret[0].(*historyservice.DeepHealthCheckResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeepHealthCheck indicates an expected call of DeepHealthCheck. +func (mr *MockHistoryServiceServerMockRecorder) DeepHealthCheck(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeepHealthCheck", reflect.TypeOf((*MockHistoryServiceServer)(nil).DeepHealthCheck), arg0, arg1) +} + // DeleteDLQTasks mocks base method. func (m *MockHistoryServiceServer) DeleteDLQTasks(arg0 context.Context, arg1 *historyservice.DeleteDLQTasksRequest) (*historyservice.DeleteDLQTasksResponse, error) { m.ctrl.T.Helper() @@ -1481,7 +1825,7 @@ func (m *MockHistoryServiceServer) DeleteDLQTasks(arg0 context.Context, arg1 *hi } // DeleteDLQTasks indicates an expected call of DeleteDLQTasks. -func (mr *MockHistoryServiceServerMockRecorder) DeleteDLQTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) DeleteDLQTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteDLQTasks", reflect.TypeOf((*MockHistoryServiceServer)(nil).DeleteDLQTasks), arg0, arg1) } @@ -1496,7 +1840,7 @@ func (m *MockHistoryServiceServer) DeleteWorkflowExecution(arg0 context.Context, } // DeleteWorkflowExecution indicates an expected call of DeleteWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) DeleteWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) DeleteWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).DeleteWorkflowExecution), arg0, arg1) } @@ -1511,7 +1855,7 @@ func (m *MockHistoryServiceServer) DeleteWorkflowVisibilityRecord(arg0 context.C } // DeleteWorkflowVisibilityRecord indicates an expected call of DeleteWorkflowVisibilityRecord. -func (mr *MockHistoryServiceServerMockRecorder) DeleteWorkflowVisibilityRecord(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) DeleteWorkflowVisibilityRecord(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteWorkflowVisibilityRecord", reflect.TypeOf((*MockHistoryServiceServer)(nil).DeleteWorkflowVisibilityRecord), arg0, arg1) } @@ -1526,7 +1870,7 @@ func (m *MockHistoryServiceServer) DescribeHistoryHost(arg0 context.Context, arg } // DescribeHistoryHost indicates an expected call of DescribeHistoryHost. -func (mr *MockHistoryServiceServerMockRecorder) DescribeHistoryHost(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) DescribeHistoryHost(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeHistoryHost", reflect.TypeOf((*MockHistoryServiceServer)(nil).DescribeHistoryHost), arg0, arg1) } @@ -1541,7 +1885,7 @@ func (m *MockHistoryServiceServer) DescribeMutableState(arg0 context.Context, ar } // DescribeMutableState indicates an expected call of DescribeMutableState. -func (mr *MockHistoryServiceServerMockRecorder) DescribeMutableState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) DescribeMutableState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeMutableState", reflect.TypeOf((*MockHistoryServiceServer)(nil).DescribeMutableState), arg0, arg1) } @@ -1556,11 +1900,26 @@ func (m *MockHistoryServiceServer) DescribeWorkflowExecution(arg0 context.Contex } // DescribeWorkflowExecution indicates an expected call of DescribeWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) DescribeWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) DescribeWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).DescribeWorkflowExecution), arg0, arg1) } +// ExecuteMultiOperation mocks base method. +func (m *MockHistoryServiceServer) ExecuteMultiOperation(arg0 context.Context, arg1 *historyservice.ExecuteMultiOperationRequest) (*historyservice.ExecuteMultiOperationResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ExecuteMultiOperation", arg0, arg1) + ret0, _ := ret[0].(*historyservice.ExecuteMultiOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ExecuteMultiOperation indicates an expected call of ExecuteMultiOperation. +func (mr *MockHistoryServiceServerMockRecorder) ExecuteMultiOperation(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ExecuteMultiOperation", reflect.TypeOf((*MockHistoryServiceServer)(nil).ExecuteMultiOperation), arg0, arg1) +} + // ForceDeleteWorkflowExecution mocks base method. func (m *MockHistoryServiceServer) ForceDeleteWorkflowExecution(arg0 context.Context, arg1 *historyservice.ForceDeleteWorkflowExecutionRequest) (*historyservice.ForceDeleteWorkflowExecutionResponse, error) { m.ctrl.T.Helper() @@ -1571,7 +1930,7 @@ func (m *MockHistoryServiceServer) ForceDeleteWorkflowExecution(arg0 context.Con } // ForceDeleteWorkflowExecution indicates an expected call of ForceDeleteWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) ForceDeleteWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ForceDeleteWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceDeleteWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).ForceDeleteWorkflowExecution), arg0, arg1) } @@ -1586,7 +1945,7 @@ func (m *MockHistoryServiceServer) GenerateLastHistoryReplicationTasks(arg0 cont } // GenerateLastHistoryReplicationTasks indicates an expected call of GenerateLastHistoryReplicationTasks. -func (mr *MockHistoryServiceServerMockRecorder) GenerateLastHistoryReplicationTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GenerateLastHistoryReplicationTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GenerateLastHistoryReplicationTasks", reflect.TypeOf((*MockHistoryServiceServer)(nil).GenerateLastHistoryReplicationTasks), arg0, arg1) } @@ -1601,7 +1960,7 @@ func (m *MockHistoryServiceServer) GetDLQMessages(arg0 context.Context, arg1 *hi } // GetDLQMessages indicates an expected call of GetDLQMessages. -func (mr *MockHistoryServiceServerMockRecorder) GetDLQMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetDLQMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQMessages", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetDLQMessages), arg0, arg1) } @@ -1616,7 +1975,7 @@ func (m *MockHistoryServiceServer) GetDLQReplicationMessages(arg0 context.Contex } // GetDLQReplicationMessages indicates an expected call of GetDLQReplicationMessages. -func (mr *MockHistoryServiceServerMockRecorder) GetDLQReplicationMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetDLQReplicationMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQReplicationMessages", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetDLQReplicationMessages), arg0, arg1) } @@ -1631,7 +1990,7 @@ func (m *MockHistoryServiceServer) GetDLQTasks(arg0 context.Context, arg1 *histo } // GetDLQTasks indicates an expected call of GetDLQTasks. -func (mr *MockHistoryServiceServerMockRecorder) GetDLQTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetDLQTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDLQTasks", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetDLQTasks), arg0, arg1) } @@ -1646,7 +2005,7 @@ func (m *MockHistoryServiceServer) GetMutableState(arg0 context.Context, arg1 *h } // GetMutableState indicates an expected call of GetMutableState. -func (mr *MockHistoryServiceServerMockRecorder) GetMutableState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetMutableState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMutableState", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetMutableState), arg0, arg1) } @@ -1661,7 +2020,7 @@ func (m *MockHistoryServiceServer) GetReplicationMessages(arg0 context.Context, } // GetReplicationMessages indicates an expected call of GetReplicationMessages. -func (mr *MockHistoryServiceServerMockRecorder) GetReplicationMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetReplicationMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationMessages", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetReplicationMessages), arg0, arg1) } @@ -1676,7 +2035,7 @@ func (m *MockHistoryServiceServer) GetReplicationStatus(arg0 context.Context, ar } // GetReplicationStatus indicates an expected call of GetReplicationStatus. -func (mr *MockHistoryServiceServerMockRecorder) GetReplicationStatus(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetReplicationStatus(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetReplicationStatus", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetReplicationStatus), arg0, arg1) } @@ -1691,7 +2050,7 @@ func (m *MockHistoryServiceServer) GetShard(arg0 context.Context, arg1 *historys } // GetShard indicates an expected call of GetShard. -func (mr *MockHistoryServiceServerMockRecorder) GetShard(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetShard(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetShard", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetShard), arg0, arg1) } @@ -1706,7 +2065,7 @@ func (m *MockHistoryServiceServer) GetWorkflowExecutionHistory(arg0 context.Cont } // GetWorkflowExecutionHistory indicates an expected call of GetWorkflowExecutionHistory. -func (mr *MockHistoryServiceServerMockRecorder) GetWorkflowExecutionHistory(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetWorkflowExecutionHistory(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionHistory", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetWorkflowExecutionHistory), arg0, arg1) } @@ -1721,7 +2080,7 @@ func (m *MockHistoryServiceServer) GetWorkflowExecutionHistoryReverse(arg0 conte } // GetWorkflowExecutionHistoryReverse indicates an expected call of GetWorkflowExecutionHistoryReverse. -func (mr *MockHistoryServiceServerMockRecorder) GetWorkflowExecutionHistoryReverse(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetWorkflowExecutionHistoryReverse(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionHistoryReverse", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetWorkflowExecutionHistoryReverse), arg0, arg1) } @@ -1736,7 +2095,7 @@ func (m *MockHistoryServiceServer) GetWorkflowExecutionRawHistory(arg0 context.C } // GetWorkflowExecutionRawHistory indicates an expected call of GetWorkflowExecutionRawHistory. -func (mr *MockHistoryServiceServerMockRecorder) GetWorkflowExecutionRawHistory(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetWorkflowExecutionRawHistory(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionRawHistory", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetWorkflowExecutionRawHistory), arg0, arg1) } @@ -1751,7 +2110,7 @@ func (m *MockHistoryServiceServer) GetWorkflowExecutionRawHistoryV2(arg0 context } // GetWorkflowExecutionRawHistoryV2 indicates an expected call of GetWorkflowExecutionRawHistoryV2. -func (mr *MockHistoryServiceServerMockRecorder) GetWorkflowExecutionRawHistoryV2(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) GetWorkflowExecutionRawHistoryV2(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkflowExecutionRawHistoryV2", reflect.TypeOf((*MockHistoryServiceServer)(nil).GetWorkflowExecutionRawHistoryV2), arg0, arg1) } @@ -1766,11 +2125,26 @@ func (m *MockHistoryServiceServer) ImportWorkflowExecution(arg0 context.Context, } // ImportWorkflowExecution indicates an expected call of ImportWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) ImportWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ImportWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImportWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).ImportWorkflowExecution), arg0, arg1) } +// InvokeStateMachineMethod mocks base method. +func (m *MockHistoryServiceServer) InvokeStateMachineMethod(arg0 context.Context, arg1 *historyservice.InvokeStateMachineMethodRequest) (*historyservice.InvokeStateMachineMethodResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InvokeStateMachineMethod", arg0, arg1) + ret0, _ := ret[0].(*historyservice.InvokeStateMachineMethodResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// InvokeStateMachineMethod indicates an expected call of InvokeStateMachineMethod. +func (mr *MockHistoryServiceServerMockRecorder) InvokeStateMachineMethod(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InvokeStateMachineMethod", reflect.TypeOf((*MockHistoryServiceServer)(nil).InvokeStateMachineMethod), arg0, arg1) +} + // IsActivityTaskValid mocks base method. func (m *MockHistoryServiceServer) IsActivityTaskValid(arg0 context.Context, arg1 *historyservice.IsActivityTaskValidRequest) (*historyservice.IsActivityTaskValidResponse, error) { m.ctrl.T.Helper() @@ -1781,7 +2155,7 @@ func (m *MockHistoryServiceServer) IsActivityTaskValid(arg0 context.Context, arg } // IsActivityTaskValid indicates an expected call of IsActivityTaskValid. -func (mr *MockHistoryServiceServerMockRecorder) IsActivityTaskValid(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) IsActivityTaskValid(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsActivityTaskValid", reflect.TypeOf((*MockHistoryServiceServer)(nil).IsActivityTaskValid), arg0, arg1) } @@ -1796,7 +2170,7 @@ func (m *MockHistoryServiceServer) IsWorkflowTaskValid(arg0 context.Context, arg } // IsWorkflowTaskValid indicates an expected call of IsWorkflowTaskValid. -func (mr *MockHistoryServiceServerMockRecorder) IsWorkflowTaskValid(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) IsWorkflowTaskValid(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsWorkflowTaskValid", reflect.TypeOf((*MockHistoryServiceServer)(nil).IsWorkflowTaskValid), arg0, arg1) } @@ -1811,7 +2185,7 @@ func (m *MockHistoryServiceServer) ListQueues(arg0 context.Context, arg1 *histor } // ListQueues indicates an expected call of ListQueues. -func (mr *MockHistoryServiceServerMockRecorder) ListQueues(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ListQueues(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListQueues", reflect.TypeOf((*MockHistoryServiceServer)(nil).ListQueues), arg0, arg1) } @@ -1826,7 +2200,7 @@ func (m *MockHistoryServiceServer) ListTasks(arg0 context.Context, arg1 *history } // ListTasks indicates an expected call of ListTasks. -func (mr *MockHistoryServiceServerMockRecorder) ListTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ListTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTasks", reflect.TypeOf((*MockHistoryServiceServer)(nil).ListTasks), arg0, arg1) } @@ -1841,11 +2215,41 @@ func (m *MockHistoryServiceServer) MergeDLQMessages(arg0 context.Context, arg1 * } // MergeDLQMessages indicates an expected call of MergeDLQMessages. -func (mr *MockHistoryServiceServerMockRecorder) MergeDLQMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) MergeDLQMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeDLQMessages", reflect.TypeOf((*MockHistoryServiceServer)(nil).MergeDLQMessages), arg0, arg1) } +// PauseActivity mocks base method. +func (m *MockHistoryServiceServer) PauseActivity(arg0 context.Context, arg1 *historyservice.PauseActivityRequest) (*historyservice.PauseActivityResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PauseActivity", arg0, arg1) + ret0, _ := ret[0].(*historyservice.PauseActivityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PauseActivity indicates an expected call of PauseActivity. +func (mr *MockHistoryServiceServerMockRecorder) PauseActivity(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseActivity", reflect.TypeOf((*MockHistoryServiceServer)(nil).PauseActivity), arg0, arg1) +} + +// PauseWorkflowExecution mocks base method. +func (m *MockHistoryServiceServer) PauseWorkflowExecution(arg0 context.Context, arg1 *historyservice.PauseWorkflowExecutionRequest) (*historyservice.PauseWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PauseWorkflowExecution", arg0, arg1) + ret0, _ := ret[0].(*historyservice.PauseWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PauseWorkflowExecution indicates an expected call of PauseWorkflowExecution. +func (mr *MockHistoryServiceServerMockRecorder) PauseWorkflowExecution(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PauseWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).PauseWorkflowExecution), arg0, arg1) +} + // PollMutableState mocks base method. func (m *MockHistoryServiceServer) PollMutableState(arg0 context.Context, arg1 *historyservice.PollMutableStateRequest) (*historyservice.PollMutableStateResponse, error) { m.ctrl.T.Helper() @@ -1856,7 +2260,7 @@ func (m *MockHistoryServiceServer) PollMutableState(arg0 context.Context, arg1 * } // PollMutableState indicates an expected call of PollMutableState. -func (mr *MockHistoryServiceServerMockRecorder) PollMutableState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) PollMutableState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollMutableState", reflect.TypeOf((*MockHistoryServiceServer)(nil).PollMutableState), arg0, arg1) } @@ -1871,7 +2275,7 @@ func (m *MockHistoryServiceServer) PollWorkflowExecutionUpdate(arg0 context.Cont } // PollWorkflowExecutionUpdate indicates an expected call of PollWorkflowExecutionUpdate. -func (mr *MockHistoryServiceServerMockRecorder) PollWorkflowExecutionUpdate(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) PollWorkflowExecutionUpdate(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollWorkflowExecutionUpdate", reflect.TypeOf((*MockHistoryServiceServer)(nil).PollWorkflowExecutionUpdate), arg0, arg1) } @@ -1886,7 +2290,7 @@ func (m *MockHistoryServiceServer) PurgeDLQMessages(arg0 context.Context, arg1 * } // PurgeDLQMessages indicates an expected call of PurgeDLQMessages. -func (mr *MockHistoryServiceServerMockRecorder) PurgeDLQMessages(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) PurgeDLQMessages(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurgeDLQMessages", reflect.TypeOf((*MockHistoryServiceServer)(nil).PurgeDLQMessages), arg0, arg1) } @@ -1901,7 +2305,7 @@ func (m *MockHistoryServiceServer) QueryWorkflow(arg0 context.Context, arg1 *his } // QueryWorkflow indicates an expected call of QueryWorkflow. -func (mr *MockHistoryServiceServerMockRecorder) QueryWorkflow(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) QueryWorkflow(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryWorkflow", reflect.TypeOf((*MockHistoryServiceServer)(nil).QueryWorkflow), arg0, arg1) } @@ -1916,7 +2320,7 @@ func (m *MockHistoryServiceServer) ReapplyEvents(arg0 context.Context, arg1 *his } // ReapplyEvents indicates an expected call of ReapplyEvents. -func (mr *MockHistoryServiceServerMockRecorder) ReapplyEvents(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ReapplyEvents(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReapplyEvents", reflect.TypeOf((*MockHistoryServiceServer)(nil).ReapplyEvents), arg0, arg1) } @@ -1931,7 +2335,7 @@ func (m *MockHistoryServiceServer) RebuildMutableState(arg0 context.Context, arg } // RebuildMutableState indicates an expected call of RebuildMutableState. -func (mr *MockHistoryServiceServerMockRecorder) RebuildMutableState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RebuildMutableState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebuildMutableState", reflect.TypeOf((*MockHistoryServiceServer)(nil).RebuildMutableState), arg0, arg1) } @@ -1946,7 +2350,7 @@ func (m *MockHistoryServiceServer) RecordActivityTaskHeartbeat(arg0 context.Cont } // RecordActivityTaskHeartbeat indicates an expected call of RecordActivityTaskHeartbeat. -func (mr *MockHistoryServiceServerMockRecorder) RecordActivityTaskHeartbeat(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RecordActivityTaskHeartbeat(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordActivityTaskHeartbeat", reflect.TypeOf((*MockHistoryServiceServer)(nil).RecordActivityTaskHeartbeat), arg0, arg1) } @@ -1961,7 +2365,7 @@ func (m *MockHistoryServiceServer) RecordActivityTaskStarted(arg0 context.Contex } // RecordActivityTaskStarted indicates an expected call of RecordActivityTaskStarted. -func (mr *MockHistoryServiceServerMockRecorder) RecordActivityTaskStarted(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RecordActivityTaskStarted(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordActivityTaskStarted", reflect.TypeOf((*MockHistoryServiceServer)(nil).RecordActivityTaskStarted), arg0, arg1) } @@ -1976,7 +2380,7 @@ func (m *MockHistoryServiceServer) RecordChildExecutionCompleted(arg0 context.Co } // RecordChildExecutionCompleted indicates an expected call of RecordChildExecutionCompleted. -func (mr *MockHistoryServiceServerMockRecorder) RecordChildExecutionCompleted(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RecordChildExecutionCompleted(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordChildExecutionCompleted", reflect.TypeOf((*MockHistoryServiceServer)(nil).RecordChildExecutionCompleted), arg0, arg1) } @@ -1991,7 +2395,7 @@ func (m *MockHistoryServiceServer) RecordWorkflowTaskStarted(arg0 context.Contex } // RecordWorkflowTaskStarted indicates an expected call of RecordWorkflowTaskStarted. -func (mr *MockHistoryServiceServerMockRecorder) RecordWorkflowTaskStarted(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RecordWorkflowTaskStarted(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordWorkflowTaskStarted", reflect.TypeOf((*MockHistoryServiceServer)(nil).RecordWorkflowTaskStarted), arg0, arg1) } @@ -2006,7 +2410,7 @@ func (m *MockHistoryServiceServer) RefreshWorkflowTasks(arg0 context.Context, ar } // RefreshWorkflowTasks indicates an expected call of RefreshWorkflowTasks. -func (mr *MockHistoryServiceServerMockRecorder) RefreshWorkflowTasks(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RefreshWorkflowTasks(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RefreshWorkflowTasks", reflect.TypeOf((*MockHistoryServiceServer)(nil).RefreshWorkflowTasks), arg0, arg1) } @@ -2021,7 +2425,7 @@ func (m *MockHistoryServiceServer) RemoveSignalMutableState(arg0 context.Context } // RemoveSignalMutableState indicates an expected call of RemoveSignalMutableState. -func (mr *MockHistoryServiceServerMockRecorder) RemoveSignalMutableState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RemoveSignalMutableState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveSignalMutableState", reflect.TypeOf((*MockHistoryServiceServer)(nil).RemoveSignalMutableState), arg0, arg1) } @@ -2036,7 +2440,7 @@ func (m *MockHistoryServiceServer) RemoveTask(arg0 context.Context, arg1 *histor } // RemoveTask indicates an expected call of RemoveTask. -func (mr *MockHistoryServiceServerMockRecorder) RemoveTask(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RemoveTask(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveTask", reflect.TypeOf((*MockHistoryServiceServer)(nil).RemoveTask), arg0, arg1) } @@ -2051,7 +2455,7 @@ func (m *MockHistoryServiceServer) ReplicateEventsV2(arg0 context.Context, arg1 } // ReplicateEventsV2 indicates an expected call of ReplicateEventsV2. -func (mr *MockHistoryServiceServerMockRecorder) ReplicateEventsV2(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ReplicateEventsV2(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateEventsV2", reflect.TypeOf((*MockHistoryServiceServer)(nil).ReplicateEventsV2), arg0, arg1) } @@ -2066,7 +2470,7 @@ func (m *MockHistoryServiceServer) ReplicateWorkflowState(arg0 context.Context, } // ReplicateWorkflowState indicates an expected call of ReplicateWorkflowState. -func (mr *MockHistoryServiceServerMockRecorder) ReplicateWorkflowState(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ReplicateWorkflowState(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateWorkflowState", reflect.TypeOf((*MockHistoryServiceServer)(nil).ReplicateWorkflowState), arg0, arg1) } @@ -2081,11 +2485,26 @@ func (m *MockHistoryServiceServer) RequestCancelWorkflowExecution(arg0 context.C } // RequestCancelWorkflowExecution indicates an expected call of RequestCancelWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) RequestCancelWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RequestCancelWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestCancelWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).RequestCancelWorkflowExecution), arg0, arg1) } +// ResetActivity mocks base method. +func (m *MockHistoryServiceServer) ResetActivity(arg0 context.Context, arg1 *historyservice.ResetActivityRequest) (*historyservice.ResetActivityResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetActivity", arg0, arg1) + ret0, _ := ret[0].(*historyservice.ResetActivityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ResetActivity indicates an expected call of ResetActivity. +func (mr *MockHistoryServiceServerMockRecorder) ResetActivity(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetActivity", reflect.TypeOf((*MockHistoryServiceServer)(nil).ResetActivity), arg0, arg1) +} + // ResetStickyTaskQueue mocks base method. func (m *MockHistoryServiceServer) ResetStickyTaskQueue(arg0 context.Context, arg1 *historyservice.ResetStickyTaskQueueRequest) (*historyservice.ResetStickyTaskQueueResponse, error) { m.ctrl.T.Helper() @@ -2096,7 +2515,7 @@ func (m *MockHistoryServiceServer) ResetStickyTaskQueue(arg0 context.Context, ar } // ResetStickyTaskQueue indicates an expected call of ResetStickyTaskQueue. -func (mr *MockHistoryServiceServerMockRecorder) ResetStickyTaskQueue(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ResetStickyTaskQueue(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetStickyTaskQueue", reflect.TypeOf((*MockHistoryServiceServer)(nil).ResetStickyTaskQueue), arg0, arg1) } @@ -2111,7 +2530,7 @@ func (m *MockHistoryServiceServer) ResetWorkflowExecution(arg0 context.Context, } // ResetWorkflowExecution indicates an expected call of ResetWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) ResetWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ResetWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).ResetWorkflowExecution), arg0, arg1) } @@ -2126,7 +2545,7 @@ func (m *MockHistoryServiceServer) RespondActivityTaskCanceled(arg0 context.Cont } // RespondActivityTaskCanceled indicates an expected call of RespondActivityTaskCanceled. -func (mr *MockHistoryServiceServerMockRecorder) RespondActivityTaskCanceled(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RespondActivityTaskCanceled(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondActivityTaskCanceled", reflect.TypeOf((*MockHistoryServiceServer)(nil).RespondActivityTaskCanceled), arg0, arg1) } @@ -2141,7 +2560,7 @@ func (m *MockHistoryServiceServer) RespondActivityTaskCompleted(arg0 context.Con } // RespondActivityTaskCompleted indicates an expected call of RespondActivityTaskCompleted. -func (mr *MockHistoryServiceServerMockRecorder) RespondActivityTaskCompleted(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RespondActivityTaskCompleted(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondActivityTaskCompleted", reflect.TypeOf((*MockHistoryServiceServer)(nil).RespondActivityTaskCompleted), arg0, arg1) } @@ -2156,7 +2575,7 @@ func (m *MockHistoryServiceServer) RespondActivityTaskFailed(arg0 context.Contex } // RespondActivityTaskFailed indicates an expected call of RespondActivityTaskFailed. -func (mr *MockHistoryServiceServerMockRecorder) RespondActivityTaskFailed(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RespondActivityTaskFailed(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondActivityTaskFailed", reflect.TypeOf((*MockHistoryServiceServer)(nil).RespondActivityTaskFailed), arg0, arg1) } @@ -2171,7 +2590,7 @@ func (m *MockHistoryServiceServer) RespondWorkflowTaskCompleted(arg0 context.Con } // RespondWorkflowTaskCompleted indicates an expected call of RespondWorkflowTaskCompleted. -func (mr *MockHistoryServiceServerMockRecorder) RespondWorkflowTaskCompleted(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RespondWorkflowTaskCompleted(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondWorkflowTaskCompleted", reflect.TypeOf((*MockHistoryServiceServer)(nil).RespondWorkflowTaskCompleted), arg0, arg1) } @@ -2186,7 +2605,7 @@ func (m *MockHistoryServiceServer) RespondWorkflowTaskFailed(arg0 context.Contex } // RespondWorkflowTaskFailed indicates an expected call of RespondWorkflowTaskFailed. -func (mr *MockHistoryServiceServerMockRecorder) RespondWorkflowTaskFailed(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) RespondWorkflowTaskFailed(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondWorkflowTaskFailed", reflect.TypeOf((*MockHistoryServiceServer)(nil).RespondWorkflowTaskFailed), arg0, arg1) } @@ -2201,7 +2620,7 @@ func (m *MockHistoryServiceServer) ScheduleWorkflowTask(arg0 context.Context, ar } // ScheduleWorkflowTask indicates an expected call of ScheduleWorkflowTask. -func (mr *MockHistoryServiceServerMockRecorder) ScheduleWorkflowTask(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) ScheduleWorkflowTask(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ScheduleWorkflowTask", reflect.TypeOf((*MockHistoryServiceServer)(nil).ScheduleWorkflowTask), arg0, arg1) } @@ -2216,7 +2635,7 @@ func (m *MockHistoryServiceServer) SignalWithStartWorkflowExecution(arg0 context } // SignalWithStartWorkflowExecution indicates an expected call of SignalWithStartWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) SignalWithStartWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) SignalWithStartWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignalWithStartWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).SignalWithStartWorkflowExecution), arg0, arg1) } @@ -2231,11 +2650,26 @@ func (m *MockHistoryServiceServer) SignalWorkflowExecution(arg0 context.Context, } // SignalWorkflowExecution indicates an expected call of SignalWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) SignalWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) SignalWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignalWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).SignalWorkflowExecution), arg0, arg1) } +// StartNexusOperation mocks base method. +func (m *MockHistoryServiceServer) StartNexusOperation(arg0 context.Context, arg1 *historyservice.StartNexusOperationRequest) (*historyservice.StartNexusOperationResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartNexusOperation", arg0, arg1) + ret0, _ := ret[0].(*historyservice.StartNexusOperationResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartNexusOperation indicates an expected call of StartNexusOperation. +func (mr *MockHistoryServiceServerMockRecorder) StartNexusOperation(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartNexusOperation", reflect.TypeOf((*MockHistoryServiceServer)(nil).StartNexusOperation), arg0, arg1) +} + // StartWorkflowExecution mocks base method. func (m *MockHistoryServiceServer) StartWorkflowExecution(arg0 context.Context, arg1 *historyservice.StartWorkflowExecutionRequest) (*historyservice.StartWorkflowExecutionResponse, error) { m.ctrl.T.Helper() @@ -2246,7 +2680,7 @@ func (m *MockHistoryServiceServer) StartWorkflowExecution(arg0 context.Context, } // StartWorkflowExecution indicates an expected call of StartWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) StartWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) StartWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).StartWorkflowExecution), arg0, arg1) } @@ -2260,7 +2694,7 @@ func (m *MockHistoryServiceServer) StreamWorkflowReplicationMessages(arg0 histor } // StreamWorkflowReplicationMessages indicates an expected call of StreamWorkflowReplicationMessages. -func (mr *MockHistoryServiceServerMockRecorder) StreamWorkflowReplicationMessages(arg0 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) StreamWorkflowReplicationMessages(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StreamWorkflowReplicationMessages", reflect.TypeOf((*MockHistoryServiceServer)(nil).StreamWorkflowReplicationMessages), arg0) } @@ -2275,7 +2709,7 @@ func (m *MockHistoryServiceServer) SyncActivity(arg0 context.Context, arg1 *hist } // SyncActivity indicates an expected call of SyncActivity. -func (mr *MockHistoryServiceServerMockRecorder) SyncActivity(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) SyncActivity(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncActivity", reflect.TypeOf((*MockHistoryServiceServer)(nil).SyncActivity), arg0, arg1) } @@ -2290,11 +2724,26 @@ func (m *MockHistoryServiceServer) SyncShardStatus(arg0 context.Context, arg1 *h } // SyncShardStatus indicates an expected call of SyncShardStatus. -func (mr *MockHistoryServiceServerMockRecorder) SyncShardStatus(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) SyncShardStatus(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncShardStatus", reflect.TypeOf((*MockHistoryServiceServer)(nil).SyncShardStatus), arg0, arg1) } +// SyncWorkflowState mocks base method. +func (m *MockHistoryServiceServer) SyncWorkflowState(arg0 context.Context, arg1 *historyservice.SyncWorkflowStateRequest) (*historyservice.SyncWorkflowStateResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncWorkflowState", arg0, arg1) + ret0, _ := ret[0].(*historyservice.SyncWorkflowStateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncWorkflowState indicates an expected call of SyncWorkflowState. +func (mr *MockHistoryServiceServerMockRecorder) SyncWorkflowState(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncWorkflowState", reflect.TypeOf((*MockHistoryServiceServer)(nil).SyncWorkflowState), arg0, arg1) +} + // TerminateWorkflowExecution mocks base method. func (m *MockHistoryServiceServer) TerminateWorkflowExecution(arg0 context.Context, arg1 *historyservice.TerminateWorkflowExecutionRequest) (*historyservice.TerminateWorkflowExecutionResponse, error) { m.ctrl.T.Helper() @@ -2305,11 +2754,56 @@ func (m *MockHistoryServiceServer) TerminateWorkflowExecution(arg0 context.Conte } // TerminateWorkflowExecution indicates an expected call of TerminateWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) TerminateWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) TerminateWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TerminateWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).TerminateWorkflowExecution), arg0, arg1) } +// UnpauseActivity mocks base method. +func (m *MockHistoryServiceServer) UnpauseActivity(arg0 context.Context, arg1 *historyservice.UnpauseActivityRequest) (*historyservice.UnpauseActivityResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnpauseActivity", arg0, arg1) + ret0, _ := ret[0].(*historyservice.UnpauseActivityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UnpauseActivity indicates an expected call of UnpauseActivity. +func (mr *MockHistoryServiceServerMockRecorder) UnpauseActivity(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnpauseActivity", reflect.TypeOf((*MockHistoryServiceServer)(nil).UnpauseActivity), arg0, arg1) +} + +// UnpauseWorkflowExecution mocks base method. +func (m *MockHistoryServiceServer) UnpauseWorkflowExecution(arg0 context.Context, arg1 *historyservice.UnpauseWorkflowExecutionRequest) (*historyservice.UnpauseWorkflowExecutionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnpauseWorkflowExecution", arg0, arg1) + ret0, _ := ret[0].(*historyservice.UnpauseWorkflowExecutionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UnpauseWorkflowExecution indicates an expected call of UnpauseWorkflowExecution. +func (mr *MockHistoryServiceServerMockRecorder) UnpauseWorkflowExecution(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnpauseWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).UnpauseWorkflowExecution), arg0, arg1) +} + +// UpdateActivityOptions mocks base method. +func (m *MockHistoryServiceServer) UpdateActivityOptions(arg0 context.Context, arg1 *historyservice.UpdateActivityOptionsRequest) (*historyservice.UpdateActivityOptionsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateActivityOptions", arg0, arg1) + ret0, _ := ret[0].(*historyservice.UpdateActivityOptionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateActivityOptions indicates an expected call of UpdateActivityOptions. +func (mr *MockHistoryServiceServerMockRecorder) UpdateActivityOptions(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateActivityOptions", reflect.TypeOf((*MockHistoryServiceServer)(nil).UpdateActivityOptions), arg0, arg1) +} + // UpdateWorkflowExecution mocks base method. func (m *MockHistoryServiceServer) UpdateWorkflowExecution(arg0 context.Context, arg1 *historyservice.UpdateWorkflowExecutionRequest) (*historyservice.UpdateWorkflowExecutionResponse, error) { m.ctrl.T.Helper() @@ -2320,11 +2814,26 @@ func (m *MockHistoryServiceServer) UpdateWorkflowExecution(arg0 context.Context, } // UpdateWorkflowExecution indicates an expected call of UpdateWorkflowExecution. -func (mr *MockHistoryServiceServerMockRecorder) UpdateWorkflowExecution(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) UpdateWorkflowExecution(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkflowExecution", reflect.TypeOf((*MockHistoryServiceServer)(nil).UpdateWorkflowExecution), arg0, arg1) } +// UpdateWorkflowExecutionOptions mocks base method. +func (m *MockHistoryServiceServer) UpdateWorkflowExecutionOptions(arg0 context.Context, arg1 *historyservice.UpdateWorkflowExecutionOptionsRequest) (*historyservice.UpdateWorkflowExecutionOptionsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateWorkflowExecutionOptions", arg0, arg1) + ret0, _ := ret[0].(*historyservice.UpdateWorkflowExecutionOptionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateWorkflowExecutionOptions indicates an expected call of UpdateWorkflowExecutionOptions. +func (mr *MockHistoryServiceServerMockRecorder) UpdateWorkflowExecutionOptions(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkflowExecutionOptions", reflect.TypeOf((*MockHistoryServiceServer)(nil).UpdateWorkflowExecutionOptions), arg0, arg1) +} + // VerifyChildExecutionCompletionRecorded mocks base method. func (m *MockHistoryServiceServer) VerifyChildExecutionCompletionRecorded(arg0 context.Context, arg1 *historyservice.VerifyChildExecutionCompletionRecordedRequest) (*historyservice.VerifyChildExecutionCompletionRecordedResponse, error) { m.ctrl.T.Helper() @@ -2335,7 +2844,7 @@ func (m *MockHistoryServiceServer) VerifyChildExecutionCompletionRecorded(arg0 c } // VerifyChildExecutionCompletionRecorded indicates an expected call of VerifyChildExecutionCompletionRecorded. -func (mr *MockHistoryServiceServerMockRecorder) VerifyChildExecutionCompletionRecorded(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) VerifyChildExecutionCompletionRecorded(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyChildExecutionCompletionRecorded", reflect.TypeOf((*MockHistoryServiceServer)(nil).VerifyChildExecutionCompletionRecorded), arg0, arg1) } @@ -2350,7 +2859,7 @@ func (m *MockHistoryServiceServer) VerifyFirstWorkflowTaskScheduled(arg0 context } // VerifyFirstWorkflowTaskScheduled indicates an expected call of VerifyFirstWorkflowTaskScheduled. -func (mr *MockHistoryServiceServerMockRecorder) VerifyFirstWorkflowTaskScheduled(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockHistoryServiceServerMockRecorder) VerifyFirstWorkflowTaskScheduled(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyFirstWorkflowTaskScheduled", reflect.TypeOf((*MockHistoryServiceServer)(nil).VerifyFirstWorkflowTaskScheduled), arg0, arg1) } @@ -2371,6 +2880,7 @@ func (mr *MockHistoryServiceServerMockRecorder) mustEmbedUnimplementedHistorySer type MockUnsafeHistoryServiceServer struct { ctrl *gomock.Controller recorder *MockUnsafeHistoryServiceServerMockRecorder + isgomock struct{} } // MockUnsafeHistoryServiceServerMockRecorder is the mock recorder for MockUnsafeHistoryServiceServer. @@ -2406,6 +2916,7 @@ func (mr *MockUnsafeHistoryServiceServerMockRecorder) mustEmbedUnimplementedHist type MockHistoryService_StreamWorkflowReplicationMessagesServer struct { ctrl *gomock.Controller recorder *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder + isgomock struct{} } // MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder is the mock recorder for MockHistoryService_StreamWorkflowReplicationMessagesServer. @@ -2463,7 +2974,7 @@ func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesServer) RecvMsg(m } // RecvMsg indicates an expected call of RecvMsg. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) RecvMsg(m interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) RecvMsg(m any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecvMsg", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesServer)(nil).RecvMsg), m) } @@ -2477,7 +2988,7 @@ func (m *MockHistoryService_StreamWorkflowReplicationMessagesServer) Send(arg0 * } // Send indicates an expected call of Send. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) Send(arg0 interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) Send(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesServer)(nil).Send), arg0) } @@ -2491,7 +3002,7 @@ func (m *MockHistoryService_StreamWorkflowReplicationMessagesServer) SendHeader( } // SendHeader indicates an expected call of SendHeader. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) SendHeader(arg0 interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) SendHeader(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHeader", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesServer)(nil).SendHeader), arg0) } @@ -2505,7 +3016,7 @@ func (m_2 *MockHistoryService_StreamWorkflowReplicationMessagesServer) SendMsg(m } // SendMsg indicates an expected call of SendMsg. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) SendMsg(m interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) SendMsg(m any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendMsg", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesServer)(nil).SendMsg), m) } @@ -2519,7 +3030,7 @@ func (m *MockHistoryService_StreamWorkflowReplicationMessagesServer) SetHeader(a } // SetHeader indicates an expected call of SetHeader. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) SetHeader(arg0 interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) SetHeader(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetHeader", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesServer)(nil).SetHeader), arg0) } @@ -2531,7 +3042,7 @@ func (m *MockHistoryService_StreamWorkflowReplicationMessagesServer) SetTrailer( } // SetTrailer indicates an expected call of SetTrailer. -func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) SetTrailer(arg0 interface{}) *gomock.Call { +func (mr *MockHistoryService_StreamWorkflowReplicationMessagesServerMockRecorder) SetTrailer(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTrailer", reflect.TypeOf((*MockHistoryService_StreamWorkflowReplicationMessagesServer)(nil).SetTrailer), arg0) } diff --git a/api/matchingservice/v1/request_response.go-helpers.pb.go b/api/matchingservice/v1/request_response.go-helpers.pb.go index 0698c0a6b72..6fdd838d5ca 100644 --- a/api/matchingservice/v1/request_response.go-helpers.pb.go +++ b/api/matchingservice/v1/request_response.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package matchingservice @@ -103,6 +79,43 @@ func (this *PollWorkflowTaskQueueResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type PollWorkflowTaskQueueResponseWithRawHistory to the protobuf v3 wire format +func (val *PollWorkflowTaskQueueResponseWithRawHistory) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollWorkflowTaskQueueResponseWithRawHistory from the protobuf v3 wire format +func (val *PollWorkflowTaskQueueResponseWithRawHistory) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollWorkflowTaskQueueResponseWithRawHistory) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollWorkflowTaskQueueResponseWithRawHistory values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollWorkflowTaskQueueResponseWithRawHistory) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollWorkflowTaskQueueResponseWithRawHistory + switch t := that.(type) { + case *PollWorkflowTaskQueueResponseWithRawHistory: + that1 = t + case PollWorkflowTaskQueueResponseWithRawHistory: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type PollActivityTaskQueueRequest to the protobuf v3 wire format func (val *PollActivityTaskQueueRequest) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -547,35 +560,35 @@ func (this *CancelOutstandingPollResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type DescribeTaskQueueRequest to the protobuf v3 wire format -func (val *DescribeTaskQueueRequest) Marshal() ([]byte, error) { +// Marshal an object of type CancelOutstandingWorkerPollsRequest to the protobuf v3 wire format +func (val *CancelOutstandingWorkerPollsRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type DescribeTaskQueueRequest from the protobuf v3 wire format -func (val *DescribeTaskQueueRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type CancelOutstandingWorkerPollsRequest from the protobuf v3 wire format +func (val *CancelOutstandingWorkerPollsRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *DescribeTaskQueueRequest) Size() int { +func (val *CancelOutstandingWorkerPollsRequest) Size() int { return proto.Size(val) } -// Equal returns whether two DescribeTaskQueueRequest values are equivalent by recursively +// Equal returns whether two CancelOutstandingWorkerPollsRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *DescribeTaskQueueRequest) Equal(that interface{}) bool { +func (this *CancelOutstandingWorkerPollsRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *DescribeTaskQueueRequest + var that1 *CancelOutstandingWorkerPollsRequest switch t := that.(type) { - case *DescribeTaskQueueRequest: + case *CancelOutstandingWorkerPollsRequest: that1 = t - case DescribeTaskQueueRequest: + case CancelOutstandingWorkerPollsRequest: that1 = &t default: return false @@ -584,35 +597,35 @@ func (this *DescribeTaskQueueRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type DescribeTaskQueueResponse to the protobuf v3 wire format -func (val *DescribeTaskQueueResponse) Marshal() ([]byte, error) { +// Marshal an object of type CancelOutstandingWorkerPollsResponse to the protobuf v3 wire format +func (val *CancelOutstandingWorkerPollsResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type DescribeTaskQueueResponse from the protobuf v3 wire format -func (val *DescribeTaskQueueResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type CancelOutstandingWorkerPollsResponse from the protobuf v3 wire format +func (val *CancelOutstandingWorkerPollsResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *DescribeTaskQueueResponse) Size() int { +func (val *CancelOutstandingWorkerPollsResponse) Size() int { return proto.Size(val) } -// Equal returns whether two DescribeTaskQueueResponse values are equivalent by recursively +// Equal returns whether two CancelOutstandingWorkerPollsResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *DescribeTaskQueueResponse) Equal(that interface{}) bool { +func (this *CancelOutstandingWorkerPollsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *DescribeTaskQueueResponse + var that1 *CancelOutstandingWorkerPollsResponse switch t := that.(type) { - case *DescribeTaskQueueResponse: + case *CancelOutstandingWorkerPollsResponse: that1 = t - case DescribeTaskQueueResponse: + case CancelOutstandingWorkerPollsResponse: that1 = &t default: return false @@ -621,35 +634,35 @@ func (this *DescribeTaskQueueResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ListTaskQueuePartitionsRequest to the protobuf v3 wire format -func (val *ListTaskQueuePartitionsRequest) Marshal() ([]byte, error) { +// Marshal an object of type DescribeTaskQueueRequest to the protobuf v3 wire format +func (val *DescribeTaskQueueRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ListTaskQueuePartitionsRequest from the protobuf v3 wire format -func (val *ListTaskQueuePartitionsRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type DescribeTaskQueueRequest from the protobuf v3 wire format +func (val *DescribeTaskQueueRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ListTaskQueuePartitionsRequest) Size() int { +func (val *DescribeTaskQueueRequest) Size() int { return proto.Size(val) } -// Equal returns whether two ListTaskQueuePartitionsRequest values are equivalent by recursively +// Equal returns whether two DescribeTaskQueueRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ListTaskQueuePartitionsRequest) Equal(that interface{}) bool { +func (this *DescribeTaskQueueRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ListTaskQueuePartitionsRequest + var that1 *DescribeTaskQueueRequest switch t := that.(type) { - case *ListTaskQueuePartitionsRequest: + case *DescribeTaskQueueRequest: that1 = t - case ListTaskQueuePartitionsRequest: + case DescribeTaskQueueRequest: that1 = &t default: return false @@ -658,35 +671,35 @@ func (this *ListTaskQueuePartitionsRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ListTaskQueuePartitionsResponse to the protobuf v3 wire format -func (val *ListTaskQueuePartitionsResponse) Marshal() ([]byte, error) { +// Marshal an object of type DescribeTaskQueueResponse to the protobuf v3 wire format +func (val *DescribeTaskQueueResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ListTaskQueuePartitionsResponse from the protobuf v3 wire format -func (val *ListTaskQueuePartitionsResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type DescribeTaskQueueResponse from the protobuf v3 wire format +func (val *DescribeTaskQueueResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ListTaskQueuePartitionsResponse) Size() int { +func (val *DescribeTaskQueueResponse) Size() int { return proto.Size(val) } -// Equal returns whether two ListTaskQueuePartitionsResponse values are equivalent by recursively +// Equal returns whether two DescribeTaskQueueResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ListTaskQueuePartitionsResponse) Equal(that interface{}) bool { +func (this *DescribeTaskQueueResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ListTaskQueuePartitionsResponse + var that1 *DescribeTaskQueueResponse switch t := that.(type) { - case *ListTaskQueuePartitionsResponse: + case *DescribeTaskQueueResponse: that1 = t - case ListTaskQueuePartitionsResponse: + case DescribeTaskQueueResponse: that1 = &t default: return false @@ -695,35 +708,35 @@ func (this *ListTaskQueuePartitionsResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type UpdateWorkerBuildIdCompatibilityRequest to the protobuf v3 wire format -func (val *UpdateWorkerBuildIdCompatibilityRequest) Marshal() ([]byte, error) { +// Marshal an object of type DescribeVersionedTaskQueuesRequest to the protobuf v3 wire format +func (val *DescribeVersionedTaskQueuesRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type UpdateWorkerBuildIdCompatibilityRequest from the protobuf v3 wire format -func (val *UpdateWorkerBuildIdCompatibilityRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type DescribeVersionedTaskQueuesRequest from the protobuf v3 wire format +func (val *DescribeVersionedTaskQueuesRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *UpdateWorkerBuildIdCompatibilityRequest) Size() int { +func (val *DescribeVersionedTaskQueuesRequest) Size() int { return proto.Size(val) } -// Equal returns whether two UpdateWorkerBuildIdCompatibilityRequest values are equivalent by recursively +// Equal returns whether two DescribeVersionedTaskQueuesRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *UpdateWorkerBuildIdCompatibilityRequest) Equal(that interface{}) bool { +func (this *DescribeVersionedTaskQueuesRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *UpdateWorkerBuildIdCompatibilityRequest + var that1 *DescribeVersionedTaskQueuesRequest switch t := that.(type) { - case *UpdateWorkerBuildIdCompatibilityRequest: + case *DescribeVersionedTaskQueuesRequest: that1 = t - case UpdateWorkerBuildIdCompatibilityRequest: + case DescribeVersionedTaskQueuesRequest: that1 = &t default: return false @@ -732,35 +745,35 @@ func (this *UpdateWorkerBuildIdCompatibilityRequest) Equal(that interface{}) boo return proto.Equal(this, that1) } -// Marshal an object of type UpdateWorkerBuildIdCompatibilityResponse to the protobuf v3 wire format -func (val *UpdateWorkerBuildIdCompatibilityResponse) Marshal() ([]byte, error) { +// Marshal an object of type DescribeVersionedTaskQueuesResponse to the protobuf v3 wire format +func (val *DescribeVersionedTaskQueuesResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type UpdateWorkerBuildIdCompatibilityResponse from the protobuf v3 wire format -func (val *UpdateWorkerBuildIdCompatibilityResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type DescribeVersionedTaskQueuesResponse from the protobuf v3 wire format +func (val *DescribeVersionedTaskQueuesResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *UpdateWorkerBuildIdCompatibilityResponse) Size() int { +func (val *DescribeVersionedTaskQueuesResponse) Size() int { return proto.Size(val) } -// Equal returns whether two UpdateWorkerBuildIdCompatibilityResponse values are equivalent by recursively +// Equal returns whether two DescribeVersionedTaskQueuesResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *UpdateWorkerBuildIdCompatibilityResponse) Equal(that interface{}) bool { +func (this *DescribeVersionedTaskQueuesResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *UpdateWorkerBuildIdCompatibilityResponse + var that1 *DescribeVersionedTaskQueuesResponse switch t := that.(type) { - case *UpdateWorkerBuildIdCompatibilityResponse: + case *DescribeVersionedTaskQueuesResponse: that1 = t - case UpdateWorkerBuildIdCompatibilityResponse: + case DescribeVersionedTaskQueuesResponse: that1 = &t default: return false @@ -769,35 +782,35 @@ func (this *UpdateWorkerBuildIdCompatibilityResponse) Equal(that interface{}) bo return proto.Equal(this, that1) } -// Marshal an object of type GetWorkerBuildIdCompatibilityRequest to the protobuf v3 wire format -func (val *GetWorkerBuildIdCompatibilityRequest) Marshal() ([]byte, error) { +// Marshal an object of type DescribeTaskQueuePartitionRequest to the protobuf v3 wire format +func (val *DescribeTaskQueuePartitionRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type GetWorkerBuildIdCompatibilityRequest from the protobuf v3 wire format -func (val *GetWorkerBuildIdCompatibilityRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type DescribeTaskQueuePartitionRequest from the protobuf v3 wire format +func (val *DescribeTaskQueuePartitionRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *GetWorkerBuildIdCompatibilityRequest) Size() int { +func (val *DescribeTaskQueuePartitionRequest) Size() int { return proto.Size(val) } -// Equal returns whether two GetWorkerBuildIdCompatibilityRequest values are equivalent by recursively +// Equal returns whether two DescribeTaskQueuePartitionRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *GetWorkerBuildIdCompatibilityRequest) Equal(that interface{}) bool { +func (this *DescribeTaskQueuePartitionRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *GetWorkerBuildIdCompatibilityRequest + var that1 *DescribeTaskQueuePartitionRequest switch t := that.(type) { - case *GetWorkerBuildIdCompatibilityRequest: + case *DescribeTaskQueuePartitionRequest: that1 = t - case GetWorkerBuildIdCompatibilityRequest: + case DescribeTaskQueuePartitionRequest: that1 = &t default: return false @@ -806,35 +819,35 @@ func (this *GetWorkerBuildIdCompatibilityRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type GetWorkerBuildIdCompatibilityResponse to the protobuf v3 wire format -func (val *GetWorkerBuildIdCompatibilityResponse) Marshal() ([]byte, error) { +// Marshal an object of type DescribeTaskQueuePartitionResponse to the protobuf v3 wire format +func (val *DescribeTaskQueuePartitionResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type GetWorkerBuildIdCompatibilityResponse from the protobuf v3 wire format -func (val *GetWorkerBuildIdCompatibilityResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type DescribeTaskQueuePartitionResponse from the protobuf v3 wire format +func (val *DescribeTaskQueuePartitionResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *GetWorkerBuildIdCompatibilityResponse) Size() int { +func (val *DescribeTaskQueuePartitionResponse) Size() int { return proto.Size(val) } -// Equal returns whether two GetWorkerBuildIdCompatibilityResponse values are equivalent by recursively +// Equal returns whether two DescribeTaskQueuePartitionResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *GetWorkerBuildIdCompatibilityResponse) Equal(that interface{}) bool { +func (this *DescribeTaskQueuePartitionResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *GetWorkerBuildIdCompatibilityResponse + var that1 *DescribeTaskQueuePartitionResponse switch t := that.(type) { - case *GetWorkerBuildIdCompatibilityResponse: + case *DescribeTaskQueuePartitionResponse: that1 = t - case GetWorkerBuildIdCompatibilityResponse: + case DescribeTaskQueuePartitionResponse: that1 = &t default: return false @@ -843,35 +856,35 @@ func (this *GetWorkerBuildIdCompatibilityResponse) Equal(that interface{}) bool return proto.Equal(this, that1) } -// Marshal an object of type GetTaskQueueUserDataRequest to the protobuf v3 wire format -func (val *GetTaskQueueUserDataRequest) Marshal() ([]byte, error) { +// Marshal an object of type ListTaskQueuePartitionsRequest to the protobuf v3 wire format +func (val *ListTaskQueuePartitionsRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type GetTaskQueueUserDataRequest from the protobuf v3 wire format -func (val *GetTaskQueueUserDataRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type ListTaskQueuePartitionsRequest from the protobuf v3 wire format +func (val *ListTaskQueuePartitionsRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *GetTaskQueueUserDataRequest) Size() int { +func (val *ListTaskQueuePartitionsRequest) Size() int { return proto.Size(val) } -// Equal returns whether two GetTaskQueueUserDataRequest values are equivalent by recursively +// Equal returns whether two ListTaskQueuePartitionsRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *GetTaskQueueUserDataRequest) Equal(that interface{}) bool { +func (this *ListTaskQueuePartitionsRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *GetTaskQueueUserDataRequest + var that1 *ListTaskQueuePartitionsRequest switch t := that.(type) { - case *GetTaskQueueUserDataRequest: + case *ListTaskQueuePartitionsRequest: that1 = t - case GetTaskQueueUserDataRequest: + case ListTaskQueuePartitionsRequest: that1 = &t default: return false @@ -880,35 +893,35 @@ func (this *GetTaskQueueUserDataRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type GetTaskQueueUserDataResponse to the protobuf v3 wire format -func (val *GetTaskQueueUserDataResponse) Marshal() ([]byte, error) { +// Marshal an object of type ListTaskQueuePartitionsResponse to the protobuf v3 wire format +func (val *ListTaskQueuePartitionsResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type GetTaskQueueUserDataResponse from the protobuf v3 wire format -func (val *GetTaskQueueUserDataResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type ListTaskQueuePartitionsResponse from the protobuf v3 wire format +func (val *ListTaskQueuePartitionsResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *GetTaskQueueUserDataResponse) Size() int { +func (val *ListTaskQueuePartitionsResponse) Size() int { return proto.Size(val) } -// Equal returns whether two GetTaskQueueUserDataResponse values are equivalent by recursively +// Equal returns whether two ListTaskQueuePartitionsResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *GetTaskQueueUserDataResponse) Equal(that interface{}) bool { +func (this *ListTaskQueuePartitionsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *GetTaskQueueUserDataResponse + var that1 *ListTaskQueuePartitionsResponse switch t := that.(type) { - case *GetTaskQueueUserDataResponse: + case *ListTaskQueuePartitionsResponse: that1 = t - case GetTaskQueueUserDataResponse: + case ListTaskQueuePartitionsResponse: that1 = &t default: return false @@ -917,35 +930,35 @@ func (this *GetTaskQueueUserDataResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ApplyTaskQueueUserDataReplicationEventRequest to the protobuf v3 wire format -func (val *ApplyTaskQueueUserDataReplicationEventRequest) Marshal() ([]byte, error) { +// Marshal an object of type UpdateWorkerBuildIdCompatibilityRequest to the protobuf v3 wire format +func (val *UpdateWorkerBuildIdCompatibilityRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ApplyTaskQueueUserDataReplicationEventRequest from the protobuf v3 wire format -func (val *ApplyTaskQueueUserDataReplicationEventRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateWorkerBuildIdCompatibilityRequest from the protobuf v3 wire format +func (val *UpdateWorkerBuildIdCompatibilityRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ApplyTaskQueueUserDataReplicationEventRequest) Size() int { +func (val *UpdateWorkerBuildIdCompatibilityRequest) Size() int { return proto.Size(val) } -// Equal returns whether two ApplyTaskQueueUserDataReplicationEventRequest values are equivalent by recursively +// Equal returns whether two UpdateWorkerBuildIdCompatibilityRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ApplyTaskQueueUserDataReplicationEventRequest) Equal(that interface{}) bool { +func (this *UpdateWorkerBuildIdCompatibilityRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ApplyTaskQueueUserDataReplicationEventRequest + var that1 *UpdateWorkerBuildIdCompatibilityRequest switch t := that.(type) { - case *ApplyTaskQueueUserDataReplicationEventRequest: + case *UpdateWorkerBuildIdCompatibilityRequest: that1 = t - case ApplyTaskQueueUserDataReplicationEventRequest: + case UpdateWorkerBuildIdCompatibilityRequest: that1 = &t default: return false @@ -954,35 +967,35 @@ func (this *ApplyTaskQueueUserDataReplicationEventRequest) Equal(that interface{ return proto.Equal(this, that1) } -// Marshal an object of type ApplyTaskQueueUserDataReplicationEventResponse to the protobuf v3 wire format -func (val *ApplyTaskQueueUserDataReplicationEventResponse) Marshal() ([]byte, error) { +// Marshal an object of type UpdateWorkerBuildIdCompatibilityResponse to the protobuf v3 wire format +func (val *UpdateWorkerBuildIdCompatibilityResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ApplyTaskQueueUserDataReplicationEventResponse from the protobuf v3 wire format -func (val *ApplyTaskQueueUserDataReplicationEventResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateWorkerBuildIdCompatibilityResponse from the protobuf v3 wire format +func (val *UpdateWorkerBuildIdCompatibilityResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ApplyTaskQueueUserDataReplicationEventResponse) Size() int { +func (val *UpdateWorkerBuildIdCompatibilityResponse) Size() int { return proto.Size(val) } -// Equal returns whether two ApplyTaskQueueUserDataReplicationEventResponse values are equivalent by recursively +// Equal returns whether two UpdateWorkerBuildIdCompatibilityResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ApplyTaskQueueUserDataReplicationEventResponse) Equal(that interface{}) bool { +func (this *UpdateWorkerBuildIdCompatibilityResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ApplyTaskQueueUserDataReplicationEventResponse + var that1 *UpdateWorkerBuildIdCompatibilityResponse switch t := that.(type) { - case *ApplyTaskQueueUserDataReplicationEventResponse: + case *UpdateWorkerBuildIdCompatibilityResponse: that1 = t - case ApplyTaskQueueUserDataReplicationEventResponse: + case UpdateWorkerBuildIdCompatibilityResponse: that1 = &t default: return false @@ -991,35 +1004,35 @@ func (this *ApplyTaskQueueUserDataReplicationEventResponse) Equal(that interface return proto.Equal(this, that1) } -// Marshal an object of type GetBuildIdTaskQueueMappingRequest to the protobuf v3 wire format -func (val *GetBuildIdTaskQueueMappingRequest) Marshal() ([]byte, error) { +// Marshal an object of type GetWorkerVersioningRulesRequest to the protobuf v3 wire format +func (val *GetWorkerVersioningRulesRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type GetBuildIdTaskQueueMappingRequest from the protobuf v3 wire format -func (val *GetBuildIdTaskQueueMappingRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type GetWorkerVersioningRulesRequest from the protobuf v3 wire format +func (val *GetWorkerVersioningRulesRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *GetBuildIdTaskQueueMappingRequest) Size() int { +func (val *GetWorkerVersioningRulesRequest) Size() int { return proto.Size(val) } -// Equal returns whether two GetBuildIdTaskQueueMappingRequest values are equivalent by recursively +// Equal returns whether two GetWorkerVersioningRulesRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *GetBuildIdTaskQueueMappingRequest) Equal(that interface{}) bool { +func (this *GetWorkerVersioningRulesRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *GetBuildIdTaskQueueMappingRequest + var that1 *GetWorkerVersioningRulesRequest switch t := that.(type) { - case *GetBuildIdTaskQueueMappingRequest: + case *GetWorkerVersioningRulesRequest: that1 = t - case GetBuildIdTaskQueueMappingRequest: + case GetWorkerVersioningRulesRequest: that1 = &t default: return false @@ -1028,35 +1041,35 @@ func (this *GetBuildIdTaskQueueMappingRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type GetBuildIdTaskQueueMappingResponse to the protobuf v3 wire format -func (val *GetBuildIdTaskQueueMappingResponse) Marshal() ([]byte, error) { +// Marshal an object of type GetWorkerVersioningRulesResponse to the protobuf v3 wire format +func (val *GetWorkerVersioningRulesResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type GetBuildIdTaskQueueMappingResponse from the protobuf v3 wire format -func (val *GetBuildIdTaskQueueMappingResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type GetWorkerVersioningRulesResponse from the protobuf v3 wire format +func (val *GetWorkerVersioningRulesResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *GetBuildIdTaskQueueMappingResponse) Size() int { +func (val *GetWorkerVersioningRulesResponse) Size() int { return proto.Size(val) } -// Equal returns whether two GetBuildIdTaskQueueMappingResponse values are equivalent by recursively +// Equal returns whether two GetWorkerVersioningRulesResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *GetBuildIdTaskQueueMappingResponse) Equal(that interface{}) bool { +func (this *GetWorkerVersioningRulesResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *GetBuildIdTaskQueueMappingResponse + var that1 *GetWorkerVersioningRulesResponse switch t := that.(type) { - case *GetBuildIdTaskQueueMappingResponse: + case *GetWorkerVersioningRulesResponse: that1 = t - case GetBuildIdTaskQueueMappingResponse: + case GetWorkerVersioningRulesResponse: that1 = &t default: return false @@ -1065,35 +1078,35 @@ func (this *GetBuildIdTaskQueueMappingResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ForceUnloadTaskQueueRequest to the protobuf v3 wire format -func (val *ForceUnloadTaskQueueRequest) Marshal() ([]byte, error) { +// Marshal an object of type UpdateWorkerVersioningRulesRequest to the protobuf v3 wire format +func (val *UpdateWorkerVersioningRulesRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ForceUnloadTaskQueueRequest from the protobuf v3 wire format -func (val *ForceUnloadTaskQueueRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateWorkerVersioningRulesRequest from the protobuf v3 wire format +func (val *UpdateWorkerVersioningRulesRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ForceUnloadTaskQueueRequest) Size() int { +func (val *UpdateWorkerVersioningRulesRequest) Size() int { return proto.Size(val) } -// Equal returns whether two ForceUnloadTaskQueueRequest values are equivalent by recursively +// Equal returns whether two UpdateWorkerVersioningRulesRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ForceUnloadTaskQueueRequest) Equal(that interface{}) bool { +func (this *UpdateWorkerVersioningRulesRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ForceUnloadTaskQueueRequest + var that1 *UpdateWorkerVersioningRulesRequest switch t := that.(type) { - case *ForceUnloadTaskQueueRequest: + case *UpdateWorkerVersioningRulesRequest: that1 = t - case ForceUnloadTaskQueueRequest: + case UpdateWorkerVersioningRulesRequest: that1 = &t default: return false @@ -1102,35 +1115,35 @@ func (this *ForceUnloadTaskQueueRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ForceUnloadTaskQueueResponse to the protobuf v3 wire format -func (val *ForceUnloadTaskQueueResponse) Marshal() ([]byte, error) { +// Marshal an object of type UpdateWorkerVersioningRulesResponse to the protobuf v3 wire format +func (val *UpdateWorkerVersioningRulesResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ForceUnloadTaskQueueResponse from the protobuf v3 wire format -func (val *ForceUnloadTaskQueueResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateWorkerVersioningRulesResponse from the protobuf v3 wire format +func (val *UpdateWorkerVersioningRulesResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ForceUnloadTaskQueueResponse) Size() int { +func (val *UpdateWorkerVersioningRulesResponse) Size() int { return proto.Size(val) } -// Equal returns whether two ForceUnloadTaskQueueResponse values are equivalent by recursively +// Equal returns whether two UpdateWorkerVersioningRulesResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ForceUnloadTaskQueueResponse) Equal(that interface{}) bool { +func (this *UpdateWorkerVersioningRulesResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ForceUnloadTaskQueueResponse + var that1 *UpdateWorkerVersioningRulesResponse switch t := that.(type) { - case *ForceUnloadTaskQueueResponse: + case *UpdateWorkerVersioningRulesResponse: that1 = t - case ForceUnloadTaskQueueResponse: + case UpdateWorkerVersioningRulesResponse: that1 = &t default: return false @@ -1139,13 +1152,605 @@ func (this *ForceUnloadTaskQueueResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type UpdateTaskQueueUserDataRequest to the protobuf v3 wire format -func (val *UpdateTaskQueueUserDataRequest) Marshal() ([]byte, error) { +// Marshal an object of type GetWorkerBuildIdCompatibilityRequest to the protobuf v3 wire format +func (val *GetWorkerBuildIdCompatibilityRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type UpdateTaskQueueUserDataRequest from the protobuf v3 wire format -func (val *UpdateTaskQueueUserDataRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type GetWorkerBuildIdCompatibilityRequest from the protobuf v3 wire format +func (val *GetWorkerBuildIdCompatibilityRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetWorkerBuildIdCompatibilityRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetWorkerBuildIdCompatibilityRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetWorkerBuildIdCompatibilityRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetWorkerBuildIdCompatibilityRequest + switch t := that.(type) { + case *GetWorkerBuildIdCompatibilityRequest: + that1 = t + case GetWorkerBuildIdCompatibilityRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetWorkerBuildIdCompatibilityResponse to the protobuf v3 wire format +func (val *GetWorkerBuildIdCompatibilityResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetWorkerBuildIdCompatibilityResponse from the protobuf v3 wire format +func (val *GetWorkerBuildIdCompatibilityResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetWorkerBuildIdCompatibilityResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetWorkerBuildIdCompatibilityResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetWorkerBuildIdCompatibilityResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetWorkerBuildIdCompatibilityResponse + switch t := that.(type) { + case *GetWorkerBuildIdCompatibilityResponse: + that1 = t + case GetWorkerBuildIdCompatibilityResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetTaskQueueUserDataRequest to the protobuf v3 wire format +func (val *GetTaskQueueUserDataRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetTaskQueueUserDataRequest from the protobuf v3 wire format +func (val *GetTaskQueueUserDataRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetTaskQueueUserDataRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetTaskQueueUserDataRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetTaskQueueUserDataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetTaskQueueUserDataRequest + switch t := that.(type) { + case *GetTaskQueueUserDataRequest: + that1 = t + case GetTaskQueueUserDataRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetTaskQueueUserDataResponse to the protobuf v3 wire format +func (val *GetTaskQueueUserDataResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetTaskQueueUserDataResponse from the protobuf v3 wire format +func (val *GetTaskQueueUserDataResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetTaskQueueUserDataResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetTaskQueueUserDataResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetTaskQueueUserDataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetTaskQueueUserDataResponse + switch t := that.(type) { + case *GetTaskQueueUserDataResponse: + that1 = t + case GetTaskQueueUserDataResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncDeploymentUserDataRequest to the protobuf v3 wire format +func (val *SyncDeploymentUserDataRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncDeploymentUserDataRequest from the protobuf v3 wire format +func (val *SyncDeploymentUserDataRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncDeploymentUserDataRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncDeploymentUserDataRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncDeploymentUserDataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncDeploymentUserDataRequest + switch t := that.(type) { + case *SyncDeploymentUserDataRequest: + that1 = t + case SyncDeploymentUserDataRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncDeploymentUserDataResponse to the protobuf v3 wire format +func (val *SyncDeploymentUserDataResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncDeploymentUserDataResponse from the protobuf v3 wire format +func (val *SyncDeploymentUserDataResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncDeploymentUserDataResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncDeploymentUserDataResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncDeploymentUserDataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncDeploymentUserDataResponse + switch t := that.(type) { + case *SyncDeploymentUserDataResponse: + that1 = t + case SyncDeploymentUserDataResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ApplyTaskQueueUserDataReplicationEventRequest to the protobuf v3 wire format +func (val *ApplyTaskQueueUserDataReplicationEventRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ApplyTaskQueueUserDataReplicationEventRequest from the protobuf v3 wire format +func (val *ApplyTaskQueueUserDataReplicationEventRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ApplyTaskQueueUserDataReplicationEventRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ApplyTaskQueueUserDataReplicationEventRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ApplyTaskQueueUserDataReplicationEventRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ApplyTaskQueueUserDataReplicationEventRequest + switch t := that.(type) { + case *ApplyTaskQueueUserDataReplicationEventRequest: + that1 = t + case ApplyTaskQueueUserDataReplicationEventRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ApplyTaskQueueUserDataReplicationEventResponse to the protobuf v3 wire format +func (val *ApplyTaskQueueUserDataReplicationEventResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ApplyTaskQueueUserDataReplicationEventResponse from the protobuf v3 wire format +func (val *ApplyTaskQueueUserDataReplicationEventResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ApplyTaskQueueUserDataReplicationEventResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ApplyTaskQueueUserDataReplicationEventResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ApplyTaskQueueUserDataReplicationEventResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ApplyTaskQueueUserDataReplicationEventResponse + switch t := that.(type) { + case *ApplyTaskQueueUserDataReplicationEventResponse: + that1 = t + case ApplyTaskQueueUserDataReplicationEventResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetBuildIdTaskQueueMappingRequest to the protobuf v3 wire format +func (val *GetBuildIdTaskQueueMappingRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetBuildIdTaskQueueMappingRequest from the protobuf v3 wire format +func (val *GetBuildIdTaskQueueMappingRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetBuildIdTaskQueueMappingRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetBuildIdTaskQueueMappingRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetBuildIdTaskQueueMappingRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetBuildIdTaskQueueMappingRequest + switch t := that.(type) { + case *GetBuildIdTaskQueueMappingRequest: + that1 = t + case GetBuildIdTaskQueueMappingRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GetBuildIdTaskQueueMappingResponse to the protobuf v3 wire format +func (val *GetBuildIdTaskQueueMappingResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GetBuildIdTaskQueueMappingResponse from the protobuf v3 wire format +func (val *GetBuildIdTaskQueueMappingResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GetBuildIdTaskQueueMappingResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GetBuildIdTaskQueueMappingResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GetBuildIdTaskQueueMappingResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GetBuildIdTaskQueueMappingResponse + switch t := that.(type) { + case *GetBuildIdTaskQueueMappingResponse: + that1 = t + case GetBuildIdTaskQueueMappingResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceLoadTaskQueuePartitionRequest to the protobuf v3 wire format +func (val *ForceLoadTaskQueuePartitionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceLoadTaskQueuePartitionRequest from the protobuf v3 wire format +func (val *ForceLoadTaskQueuePartitionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceLoadTaskQueuePartitionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceLoadTaskQueuePartitionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceLoadTaskQueuePartitionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceLoadTaskQueuePartitionRequest + switch t := that.(type) { + case *ForceLoadTaskQueuePartitionRequest: + that1 = t + case ForceLoadTaskQueuePartitionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceLoadTaskQueuePartitionResponse to the protobuf v3 wire format +func (val *ForceLoadTaskQueuePartitionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceLoadTaskQueuePartitionResponse from the protobuf v3 wire format +func (val *ForceLoadTaskQueuePartitionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceLoadTaskQueuePartitionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceLoadTaskQueuePartitionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceLoadTaskQueuePartitionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceLoadTaskQueuePartitionResponse + switch t := that.(type) { + case *ForceLoadTaskQueuePartitionResponse: + that1 = t + case ForceLoadTaskQueuePartitionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceUnloadTaskQueueRequest to the protobuf v3 wire format +func (val *ForceUnloadTaskQueueRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceUnloadTaskQueueRequest from the protobuf v3 wire format +func (val *ForceUnloadTaskQueueRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceUnloadTaskQueueRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceUnloadTaskQueueRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceUnloadTaskQueueRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceUnloadTaskQueueRequest + switch t := that.(type) { + case *ForceUnloadTaskQueueRequest: + that1 = t + case ForceUnloadTaskQueueRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceUnloadTaskQueueResponse to the protobuf v3 wire format +func (val *ForceUnloadTaskQueueResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceUnloadTaskQueueResponse from the protobuf v3 wire format +func (val *ForceUnloadTaskQueueResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceUnloadTaskQueueResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceUnloadTaskQueueResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceUnloadTaskQueueResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceUnloadTaskQueueResponse + switch t := that.(type) { + case *ForceUnloadTaskQueueResponse: + that1 = t + case ForceUnloadTaskQueueResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceUnloadTaskQueuePartitionRequest to the protobuf v3 wire format +func (val *ForceUnloadTaskQueuePartitionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceUnloadTaskQueuePartitionRequest from the protobuf v3 wire format +func (val *ForceUnloadTaskQueuePartitionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceUnloadTaskQueuePartitionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceUnloadTaskQueuePartitionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceUnloadTaskQueuePartitionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceUnloadTaskQueuePartitionRequest + switch t := that.(type) { + case *ForceUnloadTaskQueuePartitionRequest: + that1 = t + case ForceUnloadTaskQueuePartitionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ForceUnloadTaskQueuePartitionResponse to the protobuf v3 wire format +func (val *ForceUnloadTaskQueuePartitionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ForceUnloadTaskQueuePartitionResponse from the protobuf v3 wire format +func (val *ForceUnloadTaskQueuePartitionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ForceUnloadTaskQueuePartitionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ForceUnloadTaskQueuePartitionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ForceUnloadTaskQueuePartitionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ForceUnloadTaskQueuePartitionResponse + switch t := that.(type) { + case *ForceUnloadTaskQueuePartitionResponse: + that1 = t + case ForceUnloadTaskQueuePartitionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateTaskQueueUserDataRequest to the protobuf v3 wire format +func (val *UpdateTaskQueueUserDataRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateTaskQueueUserDataRequest from the protobuf v3 wire format +func (val *UpdateTaskQueueUserDataRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } @@ -1154,20 +1759,427 @@ func (val *UpdateTaskQueueUserDataRequest) Size() int { return proto.Size(val) } -// Equal returns whether two UpdateTaskQueueUserDataRequest values are equivalent by recursively +// Equal returns whether two UpdateTaskQueueUserDataRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateTaskQueueUserDataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateTaskQueueUserDataRequest + switch t := that.(type) { + case *UpdateTaskQueueUserDataRequest: + that1 = t + case UpdateTaskQueueUserDataRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateTaskQueueUserDataResponse to the protobuf v3 wire format +func (val *UpdateTaskQueueUserDataResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateTaskQueueUserDataResponse from the protobuf v3 wire format +func (val *UpdateTaskQueueUserDataResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateTaskQueueUserDataResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateTaskQueueUserDataResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateTaskQueueUserDataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateTaskQueueUserDataResponse + switch t := that.(type) { + case *UpdateTaskQueueUserDataResponse: + that1 = t + case UpdateTaskQueueUserDataResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ReplicateTaskQueueUserDataRequest to the protobuf v3 wire format +func (val *ReplicateTaskQueueUserDataRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReplicateTaskQueueUserDataRequest from the protobuf v3 wire format +func (val *ReplicateTaskQueueUserDataRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReplicateTaskQueueUserDataRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReplicateTaskQueueUserDataRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReplicateTaskQueueUserDataRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReplicateTaskQueueUserDataRequest + switch t := that.(type) { + case *ReplicateTaskQueueUserDataRequest: + that1 = t + case ReplicateTaskQueueUserDataRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ReplicateTaskQueueUserDataResponse to the protobuf v3 wire format +func (val *ReplicateTaskQueueUserDataResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReplicateTaskQueueUserDataResponse from the protobuf v3 wire format +func (val *ReplicateTaskQueueUserDataResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReplicateTaskQueueUserDataResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReplicateTaskQueueUserDataResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReplicateTaskQueueUserDataResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReplicateTaskQueueUserDataResponse + switch t := that.(type) { + case *ReplicateTaskQueueUserDataResponse: + that1 = t + case ReplicateTaskQueueUserDataResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CheckTaskQueueUserDataPropagationRequest to the protobuf v3 wire format +func (val *CheckTaskQueueUserDataPropagationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CheckTaskQueueUserDataPropagationRequest from the protobuf v3 wire format +func (val *CheckTaskQueueUserDataPropagationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CheckTaskQueueUserDataPropagationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CheckTaskQueueUserDataPropagationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CheckTaskQueueUserDataPropagationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CheckTaskQueueUserDataPropagationRequest + switch t := that.(type) { + case *CheckTaskQueueUserDataPropagationRequest: + that1 = t + case CheckTaskQueueUserDataPropagationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CheckTaskQueueUserDataPropagationResponse to the protobuf v3 wire format +func (val *CheckTaskQueueUserDataPropagationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CheckTaskQueueUserDataPropagationResponse from the protobuf v3 wire format +func (val *CheckTaskQueueUserDataPropagationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CheckTaskQueueUserDataPropagationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CheckTaskQueueUserDataPropagationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CheckTaskQueueUserDataPropagationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CheckTaskQueueUserDataPropagationResponse + switch t := that.(type) { + case *CheckTaskQueueUserDataPropagationResponse: + that1 = t + case CheckTaskQueueUserDataPropagationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DispatchNexusTaskRequest to the protobuf v3 wire format +func (val *DispatchNexusTaskRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DispatchNexusTaskRequest from the protobuf v3 wire format +func (val *DispatchNexusTaskRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DispatchNexusTaskRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DispatchNexusTaskRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DispatchNexusTaskRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DispatchNexusTaskRequest + switch t := that.(type) { + case *DispatchNexusTaskRequest: + that1 = t + case DispatchNexusTaskRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DispatchNexusTaskResponse to the protobuf v3 wire format +func (val *DispatchNexusTaskResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DispatchNexusTaskResponse from the protobuf v3 wire format +func (val *DispatchNexusTaskResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DispatchNexusTaskResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DispatchNexusTaskResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DispatchNexusTaskResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DispatchNexusTaskResponse + switch t := that.(type) { + case *DispatchNexusTaskResponse: + that1 = t + case DispatchNexusTaskResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PollNexusTaskQueueRequest to the protobuf v3 wire format +func (val *PollNexusTaskQueueRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollNexusTaskQueueRequest from the protobuf v3 wire format +func (val *PollNexusTaskQueueRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollNexusTaskQueueRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollNexusTaskQueueRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollNexusTaskQueueRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollNexusTaskQueueRequest + switch t := that.(type) { + case *PollNexusTaskQueueRequest: + that1 = t + case PollNexusTaskQueueRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PollNexusTaskQueueResponse to the protobuf v3 wire format +func (val *PollNexusTaskQueueResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollNexusTaskQueueResponse from the protobuf v3 wire format +func (val *PollNexusTaskQueueResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollNexusTaskQueueResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollNexusTaskQueueResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollNexusTaskQueueResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollNexusTaskQueueResponse + switch t := that.(type) { + case *PollNexusTaskQueueResponse: + that1 = t + case PollNexusTaskQueueResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RespondNexusTaskCompletedRequest to the protobuf v3 wire format +func (val *RespondNexusTaskCompletedRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RespondNexusTaskCompletedRequest from the protobuf v3 wire format +func (val *RespondNexusTaskCompletedRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RespondNexusTaskCompletedRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RespondNexusTaskCompletedRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RespondNexusTaskCompletedRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RespondNexusTaskCompletedRequest + switch t := that.(type) { + case *RespondNexusTaskCompletedRequest: + that1 = t + case RespondNexusTaskCompletedRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RespondNexusTaskCompletedResponse to the protobuf v3 wire format +func (val *RespondNexusTaskCompletedResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RespondNexusTaskCompletedResponse from the protobuf v3 wire format +func (val *RespondNexusTaskCompletedResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RespondNexusTaskCompletedResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RespondNexusTaskCompletedResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *UpdateTaskQueueUserDataRequest) Equal(that interface{}) bool { +func (this *RespondNexusTaskCompletedResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *UpdateTaskQueueUserDataRequest + var that1 *RespondNexusTaskCompletedResponse switch t := that.(type) { - case *UpdateTaskQueueUserDataRequest: + case *RespondNexusTaskCompletedResponse: that1 = t - case UpdateTaskQueueUserDataRequest: + case RespondNexusTaskCompletedResponse: that1 = &t default: return false @@ -1176,35 +2188,35 @@ func (this *UpdateTaskQueueUserDataRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type UpdateTaskQueueUserDataResponse to the protobuf v3 wire format -func (val *UpdateTaskQueueUserDataResponse) Marshal() ([]byte, error) { +// Marshal an object of type RespondNexusTaskFailedRequest to the protobuf v3 wire format +func (val *RespondNexusTaskFailedRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type UpdateTaskQueueUserDataResponse from the protobuf v3 wire format -func (val *UpdateTaskQueueUserDataResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type RespondNexusTaskFailedRequest from the protobuf v3 wire format +func (val *RespondNexusTaskFailedRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *UpdateTaskQueueUserDataResponse) Size() int { +func (val *RespondNexusTaskFailedRequest) Size() int { return proto.Size(val) } -// Equal returns whether two UpdateTaskQueueUserDataResponse values are equivalent by recursively +// Equal returns whether two RespondNexusTaskFailedRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *UpdateTaskQueueUserDataResponse) Equal(that interface{}) bool { +func (this *RespondNexusTaskFailedRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *UpdateTaskQueueUserDataResponse + var that1 *RespondNexusTaskFailedRequest switch t := that.(type) { - case *UpdateTaskQueueUserDataResponse: + case *RespondNexusTaskFailedRequest: that1 = t - case UpdateTaskQueueUserDataResponse: + case RespondNexusTaskFailedRequest: that1 = &t default: return false @@ -1213,35 +2225,35 @@ func (this *UpdateTaskQueueUserDataResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ReplicateTaskQueueUserDataRequest to the protobuf v3 wire format -func (val *ReplicateTaskQueueUserDataRequest) Marshal() ([]byte, error) { +// Marshal an object of type RespondNexusTaskFailedResponse to the protobuf v3 wire format +func (val *RespondNexusTaskFailedResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ReplicateTaskQueueUserDataRequest from the protobuf v3 wire format -func (val *ReplicateTaskQueueUserDataRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type RespondNexusTaskFailedResponse from the protobuf v3 wire format +func (val *RespondNexusTaskFailedResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ReplicateTaskQueueUserDataRequest) Size() int { +func (val *RespondNexusTaskFailedResponse) Size() int { return proto.Size(val) } -// Equal returns whether two ReplicateTaskQueueUserDataRequest values are equivalent by recursively +// Equal returns whether two RespondNexusTaskFailedResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ReplicateTaskQueueUserDataRequest) Equal(that interface{}) bool { +func (this *RespondNexusTaskFailedResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ReplicateTaskQueueUserDataRequest + var that1 *RespondNexusTaskFailedResponse switch t := that.(type) { - case *ReplicateTaskQueueUserDataRequest: + case *RespondNexusTaskFailedResponse: that1 = t - case ReplicateTaskQueueUserDataRequest: + case RespondNexusTaskFailedResponse: that1 = &t default: return false @@ -1250,35 +2262,35 @@ func (this *ReplicateTaskQueueUserDataRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ReplicateTaskQueueUserDataResponse to the protobuf v3 wire format -func (val *ReplicateTaskQueueUserDataResponse) Marshal() ([]byte, error) { +// Marshal an object of type CreateNexusEndpointRequest to the protobuf v3 wire format +func (val *CreateNexusEndpointRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ReplicateTaskQueueUserDataResponse from the protobuf v3 wire format -func (val *ReplicateTaskQueueUserDataResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type CreateNexusEndpointRequest from the protobuf v3 wire format +func (val *CreateNexusEndpointRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ReplicateTaskQueueUserDataResponse) Size() int { +func (val *CreateNexusEndpointRequest) Size() int { return proto.Size(val) } -// Equal returns whether two ReplicateTaskQueueUserDataResponse values are equivalent by recursively +// Equal returns whether two CreateNexusEndpointRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ReplicateTaskQueueUserDataResponse) Equal(that interface{}) bool { +func (this *CreateNexusEndpointRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ReplicateTaskQueueUserDataResponse + var that1 *CreateNexusEndpointRequest switch t := that.(type) { - case *ReplicateTaskQueueUserDataResponse: + case *CreateNexusEndpointRequest: that1 = t - case ReplicateTaskQueueUserDataResponse: + case CreateNexusEndpointRequest: that1 = &t default: return false @@ -1287,35 +2299,35 @@ func (this *ReplicateTaskQueueUserDataResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type DispatchNexusTaskRequest to the protobuf v3 wire format -func (val *DispatchNexusTaskRequest) Marshal() ([]byte, error) { +// Marshal an object of type CreateNexusEndpointResponse to the protobuf v3 wire format +func (val *CreateNexusEndpointResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type DispatchNexusTaskRequest from the protobuf v3 wire format -func (val *DispatchNexusTaskRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type CreateNexusEndpointResponse from the protobuf v3 wire format +func (val *CreateNexusEndpointResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *DispatchNexusTaskRequest) Size() int { +func (val *CreateNexusEndpointResponse) Size() int { return proto.Size(val) } -// Equal returns whether two DispatchNexusTaskRequest values are equivalent by recursively +// Equal returns whether two CreateNexusEndpointResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *DispatchNexusTaskRequest) Equal(that interface{}) bool { +func (this *CreateNexusEndpointResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *DispatchNexusTaskRequest + var that1 *CreateNexusEndpointResponse switch t := that.(type) { - case *DispatchNexusTaskRequest: + case *CreateNexusEndpointResponse: that1 = t - case DispatchNexusTaskRequest: + case CreateNexusEndpointResponse: that1 = &t default: return false @@ -1324,35 +2336,35 @@ func (this *DispatchNexusTaskRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type DispatchNexusTaskResponse to the protobuf v3 wire format -func (val *DispatchNexusTaskResponse) Marshal() ([]byte, error) { +// Marshal an object of type UpdateNexusEndpointRequest to the protobuf v3 wire format +func (val *UpdateNexusEndpointRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type DispatchNexusTaskResponse from the protobuf v3 wire format -func (val *DispatchNexusTaskResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateNexusEndpointRequest from the protobuf v3 wire format +func (val *UpdateNexusEndpointRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *DispatchNexusTaskResponse) Size() int { +func (val *UpdateNexusEndpointRequest) Size() int { return proto.Size(val) } -// Equal returns whether two DispatchNexusTaskResponse values are equivalent by recursively +// Equal returns whether two UpdateNexusEndpointRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *DispatchNexusTaskResponse) Equal(that interface{}) bool { +func (this *UpdateNexusEndpointRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *DispatchNexusTaskResponse + var that1 *UpdateNexusEndpointRequest switch t := that.(type) { - case *DispatchNexusTaskResponse: + case *UpdateNexusEndpointRequest: that1 = t - case DispatchNexusTaskResponse: + case UpdateNexusEndpointRequest: that1 = &t default: return false @@ -1361,35 +2373,35 @@ func (this *DispatchNexusTaskResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type PollNexusTaskQueueRequest to the protobuf v3 wire format -func (val *PollNexusTaskQueueRequest) Marshal() ([]byte, error) { +// Marshal an object of type UpdateNexusEndpointResponse to the protobuf v3 wire format +func (val *UpdateNexusEndpointResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type PollNexusTaskQueueRequest from the protobuf v3 wire format -func (val *PollNexusTaskQueueRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateNexusEndpointResponse from the protobuf v3 wire format +func (val *UpdateNexusEndpointResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *PollNexusTaskQueueRequest) Size() int { +func (val *UpdateNexusEndpointResponse) Size() int { return proto.Size(val) } -// Equal returns whether two PollNexusTaskQueueRequest values are equivalent by recursively +// Equal returns whether two UpdateNexusEndpointResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *PollNexusTaskQueueRequest) Equal(that interface{}) bool { +func (this *UpdateNexusEndpointResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *PollNexusTaskQueueRequest + var that1 *UpdateNexusEndpointResponse switch t := that.(type) { - case *PollNexusTaskQueueRequest: + case *UpdateNexusEndpointResponse: that1 = t - case PollNexusTaskQueueRequest: + case UpdateNexusEndpointResponse: that1 = &t default: return false @@ -1398,35 +2410,35 @@ func (this *PollNexusTaskQueueRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type PollNexusTaskQueueResponse to the protobuf v3 wire format -func (val *PollNexusTaskQueueResponse) Marshal() ([]byte, error) { +// Marshal an object of type DeleteNexusEndpointRequest to the protobuf v3 wire format +func (val *DeleteNexusEndpointRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type PollNexusTaskQueueResponse from the protobuf v3 wire format -func (val *PollNexusTaskQueueResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type DeleteNexusEndpointRequest from the protobuf v3 wire format +func (val *DeleteNexusEndpointRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *PollNexusTaskQueueResponse) Size() int { +func (val *DeleteNexusEndpointRequest) Size() int { return proto.Size(val) } -// Equal returns whether two PollNexusTaskQueueResponse values are equivalent by recursively +// Equal returns whether two DeleteNexusEndpointRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *PollNexusTaskQueueResponse) Equal(that interface{}) bool { +func (this *DeleteNexusEndpointRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *PollNexusTaskQueueResponse + var that1 *DeleteNexusEndpointRequest switch t := that.(type) { - case *PollNexusTaskQueueResponse: + case *DeleteNexusEndpointRequest: that1 = t - case PollNexusTaskQueueResponse: + case DeleteNexusEndpointRequest: that1 = &t default: return false @@ -1435,35 +2447,35 @@ func (this *PollNexusTaskQueueResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type RespondNexusTaskCompletedRequest to the protobuf v3 wire format -func (val *RespondNexusTaskCompletedRequest) Marshal() ([]byte, error) { +// Marshal an object of type DeleteNexusEndpointResponse to the protobuf v3 wire format +func (val *DeleteNexusEndpointResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type RespondNexusTaskCompletedRequest from the protobuf v3 wire format -func (val *RespondNexusTaskCompletedRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type DeleteNexusEndpointResponse from the protobuf v3 wire format +func (val *DeleteNexusEndpointResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *RespondNexusTaskCompletedRequest) Size() int { +func (val *DeleteNexusEndpointResponse) Size() int { return proto.Size(val) } -// Equal returns whether two RespondNexusTaskCompletedRequest values are equivalent by recursively +// Equal returns whether two DeleteNexusEndpointResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *RespondNexusTaskCompletedRequest) Equal(that interface{}) bool { +func (this *DeleteNexusEndpointResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *RespondNexusTaskCompletedRequest + var that1 *DeleteNexusEndpointResponse switch t := that.(type) { - case *RespondNexusTaskCompletedRequest: + case *DeleteNexusEndpointResponse: that1 = t - case RespondNexusTaskCompletedRequest: + case DeleteNexusEndpointResponse: that1 = &t default: return false @@ -1472,35 +2484,35 @@ func (this *RespondNexusTaskCompletedRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type RespondNexusTaskCompletedResponse to the protobuf v3 wire format -func (val *RespondNexusTaskCompletedResponse) Marshal() ([]byte, error) { +// Marshal an object of type ListNexusEndpointsRequest to the protobuf v3 wire format +func (val *ListNexusEndpointsRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type RespondNexusTaskCompletedResponse from the protobuf v3 wire format -func (val *RespondNexusTaskCompletedResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type ListNexusEndpointsRequest from the protobuf v3 wire format +func (val *ListNexusEndpointsRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *RespondNexusTaskCompletedResponse) Size() int { +func (val *ListNexusEndpointsRequest) Size() int { return proto.Size(val) } -// Equal returns whether two RespondNexusTaskCompletedResponse values are equivalent by recursively +// Equal returns whether two ListNexusEndpointsRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *RespondNexusTaskCompletedResponse) Equal(that interface{}) bool { +func (this *ListNexusEndpointsRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *RespondNexusTaskCompletedResponse + var that1 *ListNexusEndpointsRequest switch t := that.(type) { - case *RespondNexusTaskCompletedResponse: + case *ListNexusEndpointsRequest: that1 = t - case RespondNexusTaskCompletedResponse: + case ListNexusEndpointsRequest: that1 = &t default: return false @@ -1509,35 +2521,35 @@ func (this *RespondNexusTaskCompletedResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type RespondNexusTaskFailedRequest to the protobuf v3 wire format -func (val *RespondNexusTaskFailedRequest) Marshal() ([]byte, error) { +// Marshal an object of type ListNexusEndpointsResponse to the protobuf v3 wire format +func (val *ListNexusEndpointsResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type RespondNexusTaskFailedRequest from the protobuf v3 wire format -func (val *RespondNexusTaskFailedRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type ListNexusEndpointsResponse from the protobuf v3 wire format +func (val *ListNexusEndpointsResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *RespondNexusTaskFailedRequest) Size() int { +func (val *ListNexusEndpointsResponse) Size() int { return proto.Size(val) } -// Equal returns whether two RespondNexusTaskFailedRequest values are equivalent by recursively +// Equal returns whether two ListNexusEndpointsResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *RespondNexusTaskFailedRequest) Equal(that interface{}) bool { +func (this *ListNexusEndpointsResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *RespondNexusTaskFailedRequest + var that1 *ListNexusEndpointsResponse switch t := that.(type) { - case *RespondNexusTaskFailedRequest: + case *ListNexusEndpointsResponse: that1 = t - case RespondNexusTaskFailedRequest: + case ListNexusEndpointsResponse: that1 = &t default: return false @@ -1546,35 +2558,183 @@ func (this *RespondNexusTaskFailedRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type RespondNexusTaskFailedResponse to the protobuf v3 wire format -func (val *RespondNexusTaskFailedResponse) Marshal() ([]byte, error) { +// Marshal an object of type RecordWorkerHeartbeatRequest to the protobuf v3 wire format +func (val *RecordWorkerHeartbeatRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type RespondNexusTaskFailedResponse from the protobuf v3 wire format -func (val *RespondNexusTaskFailedResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type RecordWorkerHeartbeatRequest from the protobuf v3 wire format +func (val *RecordWorkerHeartbeatRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *RespondNexusTaskFailedResponse) Size() int { +func (val *RecordWorkerHeartbeatRequest) Size() int { return proto.Size(val) } -// Equal returns whether two RespondNexusTaskFailedResponse values are equivalent by recursively +// Equal returns whether two RecordWorkerHeartbeatRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *RespondNexusTaskFailedResponse) Equal(that interface{}) bool { +func (this *RecordWorkerHeartbeatRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *RespondNexusTaskFailedResponse + var that1 *RecordWorkerHeartbeatRequest switch t := that.(type) { - case *RespondNexusTaskFailedResponse: + case *RecordWorkerHeartbeatRequest: that1 = t - case RespondNexusTaskFailedResponse: + case RecordWorkerHeartbeatRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RecordWorkerHeartbeatResponse to the protobuf v3 wire format +func (val *RecordWorkerHeartbeatResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RecordWorkerHeartbeatResponse from the protobuf v3 wire format +func (val *RecordWorkerHeartbeatResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RecordWorkerHeartbeatResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RecordWorkerHeartbeatResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RecordWorkerHeartbeatResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RecordWorkerHeartbeatResponse + switch t := that.(type) { + case *RecordWorkerHeartbeatResponse: + that1 = t + case RecordWorkerHeartbeatResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ListWorkersRequest to the protobuf v3 wire format +func (val *ListWorkersRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ListWorkersRequest from the protobuf v3 wire format +func (val *ListWorkersRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ListWorkersRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ListWorkersRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ListWorkersRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ListWorkersRequest + switch t := that.(type) { + case *ListWorkersRequest: + that1 = t + case ListWorkersRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ListWorkersResponse to the protobuf v3 wire format +func (val *ListWorkersResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ListWorkersResponse from the protobuf v3 wire format +func (val *ListWorkersResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ListWorkersResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ListWorkersResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ListWorkersResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ListWorkersResponse + switch t := that.(type) { + case *ListWorkersResponse: + that1 = t + case ListWorkersResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateTaskQueueConfigRequest to the protobuf v3 wire format +func (val *UpdateTaskQueueConfigRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateTaskQueueConfigRequest from the protobuf v3 wire format +func (val *UpdateTaskQueueConfigRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateTaskQueueConfigRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateTaskQueueConfigRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateTaskQueueConfigRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateTaskQueueConfigRequest + switch t := that.(type) { + case *UpdateTaskQueueConfigRequest: + that1 = t + case UpdateTaskQueueConfigRequest: that1 = &t default: return false @@ -1583,35 +2743,35 @@ func (this *RespondNexusTaskFailedResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type CreateNexusIncomingServiceRequest to the protobuf v3 wire format -func (val *CreateNexusIncomingServiceRequest) Marshal() ([]byte, error) { +// Marshal an object of type UpdateTaskQueueConfigResponse to the protobuf v3 wire format +func (val *UpdateTaskQueueConfigResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type CreateNexusIncomingServiceRequest from the protobuf v3 wire format -func (val *CreateNexusIncomingServiceRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateTaskQueueConfigResponse from the protobuf v3 wire format +func (val *UpdateTaskQueueConfigResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *CreateNexusIncomingServiceRequest) Size() int { +func (val *UpdateTaskQueueConfigResponse) Size() int { return proto.Size(val) } -// Equal returns whether two CreateNexusIncomingServiceRequest values are equivalent by recursively +// Equal returns whether two UpdateTaskQueueConfigResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *CreateNexusIncomingServiceRequest) Equal(that interface{}) bool { +func (this *UpdateTaskQueueConfigResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *CreateNexusIncomingServiceRequest + var that1 *UpdateTaskQueueConfigResponse switch t := that.(type) { - case *CreateNexusIncomingServiceRequest: + case *UpdateTaskQueueConfigResponse: that1 = t - case CreateNexusIncomingServiceRequest: + case UpdateTaskQueueConfigResponse: that1 = &t default: return false @@ -1620,35 +2780,35 @@ func (this *CreateNexusIncomingServiceRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type CreateNexusIncomingServiceResponse to the protobuf v3 wire format -func (val *CreateNexusIncomingServiceResponse) Marshal() ([]byte, error) { +// Marshal an object of type DescribeWorkerRequest to the protobuf v3 wire format +func (val *DescribeWorkerRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type CreateNexusIncomingServiceResponse from the protobuf v3 wire format -func (val *CreateNexusIncomingServiceResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type DescribeWorkerRequest from the protobuf v3 wire format +func (val *DescribeWorkerRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *CreateNexusIncomingServiceResponse) Size() int { +func (val *DescribeWorkerRequest) Size() int { return proto.Size(val) } -// Equal returns whether two CreateNexusIncomingServiceResponse values are equivalent by recursively +// Equal returns whether two DescribeWorkerRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *CreateNexusIncomingServiceResponse) Equal(that interface{}) bool { +func (this *DescribeWorkerRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *CreateNexusIncomingServiceResponse + var that1 *DescribeWorkerRequest switch t := that.(type) { - case *CreateNexusIncomingServiceResponse: + case *DescribeWorkerRequest: that1 = t - case CreateNexusIncomingServiceResponse: + case DescribeWorkerRequest: that1 = &t default: return false @@ -1657,35 +2817,35 @@ func (this *CreateNexusIncomingServiceResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type UpdateNexusIncomingServiceRequest to the protobuf v3 wire format -func (val *UpdateNexusIncomingServiceRequest) Marshal() ([]byte, error) { +// Marshal an object of type DescribeWorkerResponse to the protobuf v3 wire format +func (val *DescribeWorkerResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type UpdateNexusIncomingServiceRequest from the protobuf v3 wire format -func (val *UpdateNexusIncomingServiceRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type DescribeWorkerResponse from the protobuf v3 wire format +func (val *DescribeWorkerResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *UpdateNexusIncomingServiceRequest) Size() int { +func (val *DescribeWorkerResponse) Size() int { return proto.Size(val) } -// Equal returns whether two UpdateNexusIncomingServiceRequest values are equivalent by recursively +// Equal returns whether two DescribeWorkerResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *UpdateNexusIncomingServiceRequest) Equal(that interface{}) bool { +func (this *DescribeWorkerResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *UpdateNexusIncomingServiceRequest + var that1 *DescribeWorkerResponse switch t := that.(type) { - case *UpdateNexusIncomingServiceRequest: + case *DescribeWorkerResponse: that1 = t - case UpdateNexusIncomingServiceRequest: + case DescribeWorkerResponse: that1 = &t default: return false @@ -1694,35 +2854,35 @@ func (this *UpdateNexusIncomingServiceRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type UpdateNexusIncomingServiceResponse to the protobuf v3 wire format -func (val *UpdateNexusIncomingServiceResponse) Marshal() ([]byte, error) { +// Marshal an object of type UpdateFairnessStateRequest to the protobuf v3 wire format +func (val *UpdateFairnessStateRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type UpdateNexusIncomingServiceResponse from the protobuf v3 wire format -func (val *UpdateNexusIncomingServiceResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateFairnessStateRequest from the protobuf v3 wire format +func (val *UpdateFairnessStateRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *UpdateNexusIncomingServiceResponse) Size() int { +func (val *UpdateFairnessStateRequest) Size() int { return proto.Size(val) } -// Equal returns whether two UpdateNexusIncomingServiceResponse values are equivalent by recursively +// Equal returns whether two UpdateFairnessStateRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *UpdateNexusIncomingServiceResponse) Equal(that interface{}) bool { +func (this *UpdateFairnessStateRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *UpdateNexusIncomingServiceResponse + var that1 *UpdateFairnessStateRequest switch t := that.(type) { - case *UpdateNexusIncomingServiceResponse: + case *UpdateFairnessStateRequest: that1 = t - case UpdateNexusIncomingServiceResponse: + case UpdateFairnessStateRequest: that1 = &t default: return false @@ -1731,35 +2891,35 @@ func (this *UpdateNexusIncomingServiceResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type DeleteNexusIncomingServiceRequest to the protobuf v3 wire format -func (val *DeleteNexusIncomingServiceRequest) Marshal() ([]byte, error) { +// Marshal an object of type UpdateFairnessStateResponse to the protobuf v3 wire format +func (val *UpdateFairnessStateResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type DeleteNexusIncomingServiceRequest from the protobuf v3 wire format -func (val *DeleteNexusIncomingServiceRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type UpdateFairnessStateResponse from the protobuf v3 wire format +func (val *UpdateFairnessStateResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *DeleteNexusIncomingServiceRequest) Size() int { +func (val *UpdateFairnessStateResponse) Size() int { return proto.Size(val) } -// Equal returns whether two DeleteNexusIncomingServiceRequest values are equivalent by recursively +// Equal returns whether two UpdateFairnessStateResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *DeleteNexusIncomingServiceRequest) Equal(that interface{}) bool { +func (this *UpdateFairnessStateResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *DeleteNexusIncomingServiceRequest + var that1 *UpdateFairnessStateResponse switch t := that.(type) { - case *DeleteNexusIncomingServiceRequest: + case *UpdateFairnessStateResponse: that1 = t - case DeleteNexusIncomingServiceRequest: + case UpdateFairnessStateResponse: that1 = &t default: return false @@ -1768,35 +2928,35 @@ func (this *DeleteNexusIncomingServiceRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type DeleteNexusIncomingServiceResponse to the protobuf v3 wire format -func (val *DeleteNexusIncomingServiceResponse) Marshal() ([]byte, error) { +// Marshal an object of type CheckTaskQueueVersionMembershipRequest to the protobuf v3 wire format +func (val *CheckTaskQueueVersionMembershipRequest) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type DeleteNexusIncomingServiceResponse from the protobuf v3 wire format -func (val *DeleteNexusIncomingServiceResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type CheckTaskQueueVersionMembershipRequest from the protobuf v3 wire format +func (val *CheckTaskQueueVersionMembershipRequest) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *DeleteNexusIncomingServiceResponse) Size() int { +func (val *CheckTaskQueueVersionMembershipRequest) Size() int { return proto.Size(val) } -// Equal returns whether two DeleteNexusIncomingServiceResponse values are equivalent by recursively +// Equal returns whether two CheckTaskQueueVersionMembershipRequest values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *DeleteNexusIncomingServiceResponse) Equal(that interface{}) bool { +func (this *CheckTaskQueueVersionMembershipRequest) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *DeleteNexusIncomingServiceResponse + var that1 *CheckTaskQueueVersionMembershipRequest switch t := that.(type) { - case *DeleteNexusIncomingServiceResponse: + case *CheckTaskQueueVersionMembershipRequest: that1 = t - case DeleteNexusIncomingServiceResponse: + case CheckTaskQueueVersionMembershipRequest: that1 = &t default: return false @@ -1805,35 +2965,35 @@ func (this *DeleteNexusIncomingServiceResponse) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ListNexusIncomingServicesRequest to the protobuf v3 wire format -func (val *ListNexusIncomingServicesRequest) Marshal() ([]byte, error) { +// Marshal an object of type CheckTaskQueueVersionMembershipResponse to the protobuf v3 wire format +func (val *CheckTaskQueueVersionMembershipResponse) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ListNexusIncomingServicesRequest from the protobuf v3 wire format -func (val *ListNexusIncomingServicesRequest) Unmarshal(buf []byte) error { +// Unmarshal an object of type CheckTaskQueueVersionMembershipResponse from the protobuf v3 wire format +func (val *CheckTaskQueueVersionMembershipResponse) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ListNexusIncomingServicesRequest) Size() int { +func (val *CheckTaskQueueVersionMembershipResponse) Size() int { return proto.Size(val) } -// Equal returns whether two ListNexusIncomingServicesRequest values are equivalent by recursively +// Equal returns whether two CheckTaskQueueVersionMembershipResponse values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ListNexusIncomingServicesRequest) Equal(that interface{}) bool { +func (this *CheckTaskQueueVersionMembershipResponse) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ListNexusIncomingServicesRequest + var that1 *CheckTaskQueueVersionMembershipResponse switch t := that.(type) { - case *ListNexusIncomingServicesRequest: + case *CheckTaskQueueVersionMembershipResponse: that1 = t - case ListNexusIncomingServicesRequest: + case CheckTaskQueueVersionMembershipResponse: that1 = &t default: return false @@ -1842,35 +3002,35 @@ func (this *ListNexusIncomingServicesRequest) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type ListNexusIncomingServicesResponse to the protobuf v3 wire format -func (val *ListNexusIncomingServicesResponse) Marshal() ([]byte, error) { +// Marshal an object of type PollConditions to the protobuf v3 wire format +func (val *PollConditions) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type ListNexusIncomingServicesResponse from the protobuf v3 wire format -func (val *ListNexusIncomingServicesResponse) Unmarshal(buf []byte) error { +// Unmarshal an object of type PollConditions from the protobuf v3 wire format +func (val *PollConditions) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *ListNexusIncomingServicesResponse) Size() int { +func (val *PollConditions) Size() int { return proto.Size(val) } -// Equal returns whether two ListNexusIncomingServicesResponse values are equivalent by recursively +// Equal returns whether two PollConditions values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *ListNexusIncomingServicesResponse) Equal(that interface{}) bool { +func (this *PollConditions) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *ListNexusIncomingServicesResponse + var that1 *PollConditions switch t := that.(type) { - case *ListNexusIncomingServicesResponse: + case *PollConditions: that1 = t - case ListNexusIncomingServicesResponse: + case PollConditions: that1 = &t default: return false diff --git a/api/matchingservice/v1/request_response.pb.go b/api/matchingservice/v1/request_response.pb.go index 31e303ad4f4..3221c319e9b 100644 --- a/api/matchingservice/v1/request_response.pb.go +++ b/api/matchingservice/v1/request_response.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,20 +9,25 @@ package matchingservice import ( reflect "reflect" sync "sync" + unsafe "unsafe" v11 "go.temporal.io/api/common/v1" - v110 "go.temporal.io/api/enums/v1" + v112 "go.temporal.io/api/deployment/v1" + v19 "go.temporal.io/api/enums/v1" + v114 "go.temporal.io/api/failure/v1" v16 "go.temporal.io/api/history/v1" - v112 "go.temporal.io/api/nexus/v1" + v113 "go.temporal.io/api/nexus/v1" v15 "go.temporal.io/api/protocol/v1" v12 "go.temporal.io/api/query/v1" v14 "go.temporal.io/api/taskqueue/v1" + v115 "go.temporal.io/api/worker/v1" v1 "go.temporal.io/api/workflowservice/v1" - v18 "go.temporal.io/server/api/clock/v1" - v17 "go.temporal.io/server/api/enums/v1" + v17 "go.temporal.io/server/api/clock/v1" + v110 "go.temporal.io/server/api/deployment/v1" + v116 "go.temporal.io/server/api/enums/v1" v13 "go.temporal.io/server/api/history/v1" v111 "go.temporal.io/server/api/persistence/v1" - v19 "go.temporal.io/server/api/taskqueue/v1" + v18 "go.temporal.io/server/api/taskqueue/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -59,23 +42,22 @@ const ( ) type PollWorkflowTaskQueueRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` PollerId string `protobuf:"bytes,2,opt,name=poller_id,json=pollerId,proto3" json:"poller_id,omitempty"` PollRequest *v1.PollWorkflowTaskQueueRequest `protobuf:"bytes,3,opt,name=poll_request,json=pollRequest,proto3" json:"poll_request,omitempty"` ForwardedSource string `protobuf:"bytes,4,opt,name=forwarded_source,json=forwardedSource,proto3" json:"forwarded_source,omitempty"` + // Extra conditions on this poll request. Only supported with new matcher. + Conditions *PollConditions `protobuf:"bytes,5,opt,name=conditions,proto3" json:"conditions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PollWorkflowTaskQueueRequest) Reset() { *x = PollWorkflowTaskQueueRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PollWorkflowTaskQueueRequest) String() string { @@ -86,7 +68,7 @@ func (*PollWorkflowTaskQueueRequest) ProtoMessage() {} func (x *PollWorkflowTaskQueueRequest) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -129,11 +111,15 @@ func (x *PollWorkflowTaskQueueRequest) GetForwardedSource() string { return "" } -type PollWorkflowTaskQueueResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *PollWorkflowTaskQueueRequest) GetConditions() *PollConditions { + if x != nil { + return x.Conditions + } + return nil +} +type PollWorkflowTaskQueueResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` TaskToken []byte `protobuf:"bytes,1,opt,name=task_token,json=taskToken,proto3" json:"task_token,omitempty"` WorkflowExecution *v11.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` WorkflowType *v11.WorkflowType `protobuf:"bytes,3,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` @@ -149,22 +135,26 @@ type PollWorkflowTaskQueueResponse struct { BranchToken []byte `protobuf:"bytes,14,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` StartedTime *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` - Queries map[string]*v12.WorkflowQuery `protobuf:"bytes,17,rep,name=queries,proto3" json:"queries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Queries map[string]*v12.WorkflowQuery `protobuf:"bytes,17,rep,name=queries,proto3" json:"queries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Messages []*v15.Message `protobuf:"bytes,18,rep,name=messages,proto3" json:"messages,omitempty"` // The history for this workflow, which will either be complete or partial. Partial histories // are sent to workers who have signaled that they are using a sticky queue when completing // a workflow task. Sticky query tasks will not include any history. - History *v16.History `protobuf:"bytes,19,opt,name=history,proto3" json:"history,omitempty"` - NextPageToken []byte `protobuf:"bytes,20,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + History *v16.History `protobuf:"bytes,19,opt,name=history,proto3" json:"history,omitempty"` + NextPageToken []byte `protobuf:"bytes,20,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + PollerScalingDecision *v14.PollerScalingDecision `protobuf:"bytes,21,opt,name=poller_scaling_decision,json=pollerScalingDecision,proto3" json:"poller_scaling_decision,omitempty"` + // Raw history bytes sent from matching service when history.sendRawHistoryBetweenInternalServices is enabled. + // Matching client will deserialize this to History when it receives the response. + RawHistory *v16.History `protobuf:"bytes,22,opt,name=raw_history,json=rawHistory,proto3" json:"raw_history,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PollWorkflowTaskQueueResponse) Reset() { *x = PollWorkflowTaskQueueResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PollWorkflowTaskQueueResponse) String() string { @@ -175,7 +165,7 @@ func (*PollWorkflowTaskQueueResponse) ProtoMessage() {} func (x *PollWorkflowTaskQueueResponse) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -323,24 +313,266 @@ func (x *PollWorkflowTaskQueueResponse) GetNextPageToken() []byte { return nil } -type PollActivityTaskQueueRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +func (x *PollWorkflowTaskQueueResponse) GetPollerScalingDecision() *v14.PollerScalingDecision { + if x != nil { + return x.PollerScalingDecision + } + return nil +} + +func (x *PollWorkflowTaskQueueResponse) GetRawHistory() *v16.History { + if x != nil { + return x.RawHistory + } + return nil +} + +// PollWorkflowTaskQueueResponseWithRawHistory is wire-compatible with PollWorkflowTaskQueueResponse. +// +// WIRE COMPATIBILITY PATTERN: +// This message uses the same field numbers as PollWorkflowTaskQueueResponse (1-21 are identical), +// but field 22 differs in type: `repeated bytes raw_history` vs `History raw_history`. +// This enables the following optimization: +// +// 1. Matching service serializes PollWorkflowTaskQueueResponseWithRawHistory with raw_history as [][]byte +// 2. Matching client receives the raw bytes and deserializes them as PollWorkflowTaskQueueResponse +// 3. Protobuf automatically deserializes the [][]byte into a History message because: +// - Field 22 in PollWorkflowTaskQueueResponse expects History +// - Each []byte in the repeated field is a valid proto-encoded HistoryEventBatch +// - Protobuf concatenates repeated message fields into a single message +// +// This pattern avoids deserialization in matching service, reducing CPU usage. +// The matching service passes raw history bytes through without parsing them. +// +// IMPORTANT: Field numbers and all other fields must remain identical between these two messages. +// Any change to PollWorkflowTaskQueueResponse must be mirrored here. +type PollWorkflowTaskQueueResponseWithRawHistory struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskToken []byte `protobuf:"bytes,1,opt,name=task_token,json=taskToken,proto3" json:"task_token,omitempty"` + WorkflowExecution *v11.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` + WorkflowType *v11.WorkflowType `protobuf:"bytes,3,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + PreviousStartedEventId int64 `protobuf:"varint,4,opt,name=previous_started_event_id,json=previousStartedEventId,proto3" json:"previous_started_event_id,omitempty"` + StartedEventId int64 `protobuf:"varint,5,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` + Attempt int32 `protobuf:"varint,6,opt,name=attempt,proto3" json:"attempt,omitempty"` + NextEventId int64 `protobuf:"varint,7,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` + BacklogCountHint int64 `protobuf:"varint,8,opt,name=backlog_count_hint,json=backlogCountHint,proto3" json:"backlog_count_hint,omitempty"` + StickyExecutionEnabled bool `protobuf:"varint,9,opt,name=sticky_execution_enabled,json=stickyExecutionEnabled,proto3" json:"sticky_execution_enabled,omitempty"` + Query *v12.WorkflowQuery `protobuf:"bytes,10,opt,name=query,proto3" json:"query,omitempty"` + TransientWorkflowTask *v13.TransientWorkflowTaskInfo `protobuf:"bytes,11,opt,name=transient_workflow_task,json=transientWorkflowTask,proto3" json:"transient_workflow_task,omitempty"` + WorkflowExecutionTaskQueue *v14.TaskQueue `protobuf:"bytes,12,opt,name=workflow_execution_task_queue,json=workflowExecutionTaskQueue,proto3" json:"workflow_execution_task_queue,omitempty"` + BranchToken []byte `protobuf:"bytes,14,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + StartedTime *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + Queries map[string]*v12.WorkflowQuery `protobuf:"bytes,17,rep,name=queries,proto3" json:"queries,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Messages []*v15.Message `protobuf:"bytes,18,rep,name=messages,proto3" json:"messages,omitempty"` + // The history for this workflow, which will either be complete or partial. Partial histories + // are sent to workers who have signaled that they are using a sticky queue when completing + // a workflow task. Sticky query tasks will not include any history. + History *v16.History `protobuf:"bytes,19,opt,name=history,proto3" json:"history,omitempty"` + NextPageToken []byte `protobuf:"bytes,20,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + PollerScalingDecision *v14.PollerScalingDecision `protobuf:"bytes,21,opt,name=poller_scaling_decision,json=pollerScalingDecision,proto3" json:"poller_scaling_decision,omitempty"` + // Raw history bytes. Each element is a proto-encoded batch of history events. + // When matching client deserializes this to PollWorkflowTaskQueueResponse, this field + // will be automatically deserialized to the raw_history field as History. + RawHistory [][]byte `protobuf:"bytes,22,rep,name=raw_history,json=rawHistory,proto3" json:"raw_history,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) Reset() { + *x = PollWorkflowTaskQueueResponseWithRawHistory{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollWorkflowTaskQueueResponseWithRawHistory) ProtoMessage() {} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollWorkflowTaskQueueResponseWithRawHistory.ProtoReflect.Descriptor instead. +func (*PollWorkflowTaskQueueResponseWithRawHistory) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{2} +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetTaskToken() []byte { + if x != nil { + return x.TaskToken + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetWorkflowExecution() *v11.WorkflowExecution { + if x != nil { + return x.WorkflowExecution + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetWorkflowType() *v11.WorkflowType { + if x != nil { + return x.WorkflowType + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetPreviousStartedEventId() int64 { + if x != nil { + return x.PreviousStartedEventId + } + return 0 +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetStartedEventId() int64 { + if x != nil { + return x.StartedEventId + } + return 0 +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetNextEventId() int64 { + if x != nil { + return x.NextEventId + } + return 0 +} +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetBacklogCountHint() int64 { + if x != nil { + return x.BacklogCountHint + } + return 0 +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetStickyExecutionEnabled() bool { + if x != nil { + return x.StickyExecutionEnabled + } + return false +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetQuery() *v12.WorkflowQuery { + if x != nil { + return x.Query + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetTransientWorkflowTask() *v13.TransientWorkflowTaskInfo { + if x != nil { + return x.TransientWorkflowTask + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetWorkflowExecutionTaskQueue() *v14.TaskQueue { + if x != nil { + return x.WorkflowExecutionTaskQueue + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetBranchToken() []byte { + if x != nil { + return x.BranchToken + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduledTime + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetStartedTime() *timestamppb.Timestamp { + if x != nil { + return x.StartedTime + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetQueries() map[string]*v12.WorkflowQuery { + if x != nil { + return x.Queries + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetMessages() []*v15.Message { + if x != nil { + return x.Messages + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetHistory() *v16.History { + if x != nil { + return x.History + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetPollerScalingDecision() *v14.PollerScalingDecision { + if x != nil { + return x.PollerScalingDecision + } + return nil +} + +func (x *PollWorkflowTaskQueueResponseWithRawHistory) GetRawHistory() [][]byte { + if x != nil { + return x.RawHistory + } + return nil +} + +type PollActivityTaskQueueRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` PollerId string `protobuf:"bytes,2,opt,name=poller_id,json=pollerId,proto3" json:"poller_id,omitempty"` PollRequest *v1.PollActivityTaskQueueRequest `protobuf:"bytes,3,opt,name=poll_request,json=pollRequest,proto3" json:"poll_request,omitempty"` ForwardedSource string `protobuf:"bytes,4,opt,name=forwarded_source,json=forwardedSource,proto3" json:"forwarded_source,omitempty"` + // Extra conditions on this poll request. Only supported with new matcher. + Conditions *PollConditions `protobuf:"bytes,5,opt,name=conditions,proto3" json:"conditions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PollActivityTaskQueueRequest) Reset() { *x = PollActivityTaskQueueRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PollActivityTaskQueueRequest) String() string { @@ -350,8 +582,8 @@ func (x *PollActivityTaskQueueRequest) String() string { func (*PollActivityTaskQueueRequest) ProtoMessage() {} func (x *PollActivityTaskQueueRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -363,7 +595,7 @@ func (x *PollActivityTaskQueueRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PollActivityTaskQueueRequest.ProtoReflect.Descriptor instead. func (*PollActivityTaskQueueRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{2} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{3} } func (x *PollActivityTaskQueueRequest) GetNamespaceId() string { @@ -394,11 +626,15 @@ func (x *PollActivityTaskQueueRequest) GetForwardedSource() string { return "" } -type PollActivityTaskQueueResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *PollActivityTaskQueueRequest) GetConditions() *PollConditions { + if x != nil { + return x.Conditions + } + return nil +} +type PollActivityTaskQueueResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` TaskToken []byte `protobuf:"bytes,1,opt,name=task_token,json=taskToken,proto3" json:"task_token,omitempty"` WorkflowExecution *v11.WorkflowExecution `protobuf:"bytes,2,opt,name=workflow_execution,json=workflowExecution,proto3" json:"workflow_execution,omitempty"` ActivityId string `protobuf:"bytes,3,opt,name=activity_id,json=activityId,proto3" json:"activity_id,omitempty"` @@ -413,23 +649,28 @@ type PollActivityTaskQueueResponse struct { // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "to" is used to indicate interval. --) - StartToCloseTimeout *durationpb.Duration `protobuf:"bytes,9,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3" json:"start_to_close_timeout,omitempty"` - HeartbeatTimeout *durationpb.Duration `protobuf:"bytes,10,opt,name=heartbeat_timeout,json=heartbeatTimeout,proto3" json:"heartbeat_timeout,omitempty"` - Attempt int32 `protobuf:"varint,11,opt,name=attempt,proto3" json:"attempt,omitempty"` - CurrentAttemptScheduledTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=current_attempt_scheduled_time,json=currentAttemptScheduledTime,proto3" json:"current_attempt_scheduled_time,omitempty"` - HeartbeatDetails *v11.Payloads `protobuf:"bytes,13,opt,name=heartbeat_details,json=heartbeatDetails,proto3" json:"heartbeat_details,omitempty"` - WorkflowType *v11.WorkflowType `protobuf:"bytes,14,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` - WorkflowNamespace string `protobuf:"bytes,15,opt,name=workflow_namespace,json=workflowNamespace,proto3" json:"workflow_namespace,omitempty"` - Header *v11.Header `protobuf:"bytes,16,opt,name=header,proto3" json:"header,omitempty"` + StartToCloseTimeout *durationpb.Duration `protobuf:"bytes,9,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3" json:"start_to_close_timeout,omitempty"` + HeartbeatTimeout *durationpb.Duration `protobuf:"bytes,10,opt,name=heartbeat_timeout,json=heartbeatTimeout,proto3" json:"heartbeat_timeout,omitempty"` + Attempt int32 `protobuf:"varint,11,opt,name=attempt,proto3" json:"attempt,omitempty"` + CurrentAttemptScheduledTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=current_attempt_scheduled_time,json=currentAttemptScheduledTime,proto3" json:"current_attempt_scheduled_time,omitempty"` + HeartbeatDetails *v11.Payloads `protobuf:"bytes,13,opt,name=heartbeat_details,json=heartbeatDetails,proto3" json:"heartbeat_details,omitempty"` + WorkflowType *v11.WorkflowType `protobuf:"bytes,14,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` + WorkflowNamespace string `protobuf:"bytes,15,opt,name=workflow_namespace,json=workflowNamespace,proto3" json:"workflow_namespace,omitempty"` + Header *v11.Header `protobuf:"bytes,16,opt,name=header,proto3" json:"header,omitempty"` + PollerScalingDecision *v14.PollerScalingDecision `protobuf:"bytes,17,opt,name=poller_scaling_decision,json=pollerScalingDecision,proto3" json:"poller_scaling_decision,omitempty"` + Priority *v11.Priority `protobuf:"bytes,18,opt,name=priority,proto3" json:"priority,omitempty"` + RetryPolicy *v11.RetryPolicy `protobuf:"bytes,19,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // ID of the activity run (applicable for standalone activities only) + ActivityRunId string `protobuf:"bytes,20,opt,name=activity_run_id,json=activityRunId,proto3" json:"activity_run_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PollActivityTaskQueueResponse) Reset() { *x = PollActivityTaskQueueResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PollActivityTaskQueueResponse) String() string { @@ -439,8 +680,8 @@ func (x *PollActivityTaskQueueResponse) String() string { func (*PollActivityTaskQueueResponse) ProtoMessage() {} func (x *PollActivityTaskQueueResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -452,7 +693,7 @@ func (x *PollActivityTaskQueueResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PollActivityTaskQueueResponse.ProtoReflect.Descriptor instead. func (*PollActivityTaskQueueResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{3} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{4} } func (x *PollActivityTaskQueueResponse) GetTaskToken() []byte { @@ -567,34 +808,61 @@ func (x *PollActivityTaskQueueResponse) GetHeader() *v11.Header { return nil } -type AddWorkflowTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Execution *v11.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` - TaskQueue *v14.TaskQueue `protobuf:"bytes,3,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - ScheduledEventId int64 `protobuf:"varint,4,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - // (-- api-linter: core::0140::prepositions=disabled - // - // aip.dev/not-precedent: "to" is used to indicate interval. --) - ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` - ForwardedSource string `protobuf:"bytes,6,opt,name=forwarded_source,json=forwardedSource,proto3" json:"forwarded_source,omitempty"` - Source v17.TaskSource `protobuf:"varint,7,opt,name=source,proto3,enum=temporal.server.api.enums.v1.TaskSource" json:"source,omitempty"` - Clock *v18.VectorClock `protobuf:"bytes,9,opt,name=clock,proto3" json:"clock,omitempty"` - // How this task should be directed by matching. (Missing means the default - // for TaskVersionDirective, which is unversioned.) - VersionDirective *v19.TaskVersionDirective `protobuf:"bytes,10,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` +func (x *PollActivityTaskQueueResponse) GetPollerScalingDecision() *v14.PollerScalingDecision { + if x != nil { + return x.PollerScalingDecision + } + return nil +} + +func (x *PollActivityTaskQueueResponse) GetPriority() *v11.Priority { + if x != nil { + return x.Priority + } + return nil +} + +func (x *PollActivityTaskQueueResponse) GetRetryPolicy() *v11.RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *PollActivityTaskQueueResponse) GetActivityRunId() string { + if x != nil { + return x.ActivityRunId + } + return "" +} + +type AddWorkflowTaskRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Execution *v11.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,3,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + ScheduledEventId int64 `protobuf:"varint,4,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` + Clock *v17.VectorClock `protobuf:"bytes,9,opt,name=clock,proto3" json:"clock,omitempty"` + // How this task should be directed by matching. (Missing means the default + // for TaskVersionDirective, which is unversioned.) + VersionDirective *v18.TaskVersionDirective `protobuf:"bytes,10,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` + ForwardInfo *v18.TaskForwardInfo `protobuf:"bytes,11,opt,name=forward_info,json=forwardInfo,proto3" json:"forward_info,omitempty"` + Priority *v11.Priority `protobuf:"bytes,12,opt,name=priority,proto3" json:"priority,omitempty"` + // Stamp value from when the workflow task was scheduled. Used to validate the task is still relevant. + Stamp int32 `protobuf:"varint,13,opt,name=stamp,proto3" json:"stamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddWorkflowTaskRequest) Reset() { *x = AddWorkflowTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddWorkflowTaskRequest) String() string { @@ -604,8 +872,8 @@ func (x *AddWorkflowTaskRequest) String() string { func (*AddWorkflowTaskRequest) ProtoMessage() {} func (x *AddWorkflowTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -617,7 +885,7 @@ func (x *AddWorkflowTaskRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddWorkflowTaskRequest.ProtoReflect.Descriptor instead. func (*AddWorkflowTaskRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{4} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{5} } func (x *AddWorkflowTaskRequest) GetNamespaceId() string { @@ -655,47 +923,55 @@ func (x *AddWorkflowTaskRequest) GetScheduleToStartTimeout() *durationpb.Duratio return nil } -func (x *AddWorkflowTaskRequest) GetForwardedSource() string { +func (x *AddWorkflowTaskRequest) GetClock() *v17.VectorClock { if x != nil { - return x.ForwardedSource + return x.Clock } - return "" + return nil } -func (x *AddWorkflowTaskRequest) GetSource() v17.TaskSource { +func (x *AddWorkflowTaskRequest) GetVersionDirective() *v18.TaskVersionDirective { if x != nil { - return x.Source + return x.VersionDirective } - return v17.TaskSource(0) + return nil } -func (x *AddWorkflowTaskRequest) GetClock() *v18.VectorClock { +func (x *AddWorkflowTaskRequest) GetForwardInfo() *v18.TaskForwardInfo { if x != nil { - return x.Clock + return x.ForwardInfo } return nil } -func (x *AddWorkflowTaskRequest) GetVersionDirective() *v19.TaskVersionDirective { +func (x *AddWorkflowTaskRequest) GetPriority() *v11.Priority { if x != nil { - return x.VersionDirective + return x.Priority } return nil } +func (x *AddWorkflowTaskRequest) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + type AddWorkflowTaskResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState `protogen:"open.v1"` + // When present, it means that the task is spooled to a versioned queue of this build ID + // Deprecated. [cleanup-old-wv] + AssignedBuildId string `protobuf:"bytes,1,opt,name=assigned_build_id,json=assignedBuildId,proto3" json:"assigned_build_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddWorkflowTaskResponse) Reset() { *x = AddWorkflowTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddWorkflowTaskResponse) String() string { @@ -705,8 +981,8 @@ func (x *AddWorkflowTaskResponse) String() string { func (*AddWorkflowTaskResponse) ProtoMessage() {} func (x *AddWorkflowTaskResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -718,14 +994,18 @@ func (x *AddWorkflowTaskResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AddWorkflowTaskResponse.ProtoReflect.Descriptor instead. func (*AddWorkflowTaskResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{5} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{6} } -type AddActivityTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *AddWorkflowTaskResponse) GetAssignedBuildId() string { + if x != nil { + return x.AssignedBuildId + } + return "" +} +type AddActivityTaskRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` Execution *v11.WorkflowExecution `protobuf:"bytes,2,opt,name=execution,proto3" json:"execution,omitempty"` TaskQueue *v14.TaskQueue `protobuf:"bytes,4,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` @@ -734,21 +1014,26 @@ type AddActivityTaskRequest struct { // // aip.dev/not-precedent: "to" is used to indicate interval. --) ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` - ForwardedSource string `protobuf:"bytes,7,opt,name=forwarded_source,json=forwardedSource,proto3" json:"forwarded_source,omitempty"` - Source v17.TaskSource `protobuf:"varint,8,opt,name=source,proto3,enum=temporal.server.api.enums.v1.TaskSource" json:"source,omitempty"` - Clock *v18.VectorClock `protobuf:"bytes,9,opt,name=clock,proto3" json:"clock,omitempty"` + Clock *v17.VectorClock `protobuf:"bytes,9,opt,name=clock,proto3" json:"clock,omitempty"` // How this task should be directed by matching. (Missing means the default // for TaskVersionDirective, which is unversioned.) - VersionDirective *v19.TaskVersionDirective `protobuf:"bytes,10,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` + VersionDirective *v18.TaskVersionDirective `protobuf:"bytes,10,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` + ForwardInfo *v18.TaskForwardInfo `protobuf:"bytes,11,opt,name=forward_info,json=forwardInfo,proto3" json:"forward_info,omitempty"` + Stamp int32 `protobuf:"varint,12,opt,name=stamp,proto3" json:"stamp,omitempty"` + Priority *v11.Priority `protobuf:"bytes,13,opt,name=priority,proto3" json:"priority,omitempty"` + // Reference to the Chasm component for activity execution (if applicable). For standalone activities, all + // necessary start information is carried within this component, obviating the need to use the fields that apply to + // embedded activities. + ComponentRef []byte `protobuf:"bytes,14,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddActivityTaskRequest) Reset() { *x = AddActivityTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddActivityTaskRequest) String() string { @@ -758,8 +1043,8 @@ func (x *AddActivityTaskRequest) String() string { func (*AddActivityTaskRequest) ProtoMessage() {} func (x *AddActivityTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[7] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -771,7 +1056,7 @@ func (x *AddActivityTaskRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AddActivityTaskRequest.ProtoReflect.Descriptor instead. func (*AddActivityTaskRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{6} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{7} } func (x *AddActivityTaskRequest) GetNamespaceId() string { @@ -809,47 +1094,62 @@ func (x *AddActivityTaskRequest) GetScheduleToStartTimeout() *durationpb.Duratio return nil } -func (x *AddActivityTaskRequest) GetForwardedSource() string { +func (x *AddActivityTaskRequest) GetClock() *v17.VectorClock { if x != nil { - return x.ForwardedSource + return x.Clock } - return "" + return nil } -func (x *AddActivityTaskRequest) GetSource() v17.TaskSource { +func (x *AddActivityTaskRequest) GetVersionDirective() *v18.TaskVersionDirective { if x != nil { - return x.Source + return x.VersionDirective } - return v17.TaskSource(0) + return nil } -func (x *AddActivityTaskRequest) GetClock() *v18.VectorClock { +func (x *AddActivityTaskRequest) GetForwardInfo() *v18.TaskForwardInfo { if x != nil { - return x.Clock + return x.ForwardInfo } return nil } -func (x *AddActivityTaskRequest) GetVersionDirective() *v19.TaskVersionDirective { +func (x *AddActivityTaskRequest) GetStamp() int32 { if x != nil { - return x.VersionDirective + return x.Stamp + } + return 0 +} + +func (x *AddActivityTaskRequest) GetPriority() *v11.Priority { + if x != nil { + return x.Priority + } + return nil +} + +func (x *AddActivityTaskRequest) GetComponentRef() []byte { + if x != nil { + return x.ComponentRef } return nil } type AddActivityTaskResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState `protogen:"open.v1"` + // When present, it means that the task is spooled to a versioned queue of this build ID + // Deprecated. [cleanup-old-wv] + AssignedBuildId string `protobuf:"bytes,1,opt,name=assigned_build_id,json=assignedBuildId,proto3" json:"assigned_build_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AddActivityTaskResponse) Reset() { *x = AddActivityTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AddActivityTaskResponse) String() string { @@ -859,8 +1159,8 @@ func (x *AddActivityTaskResponse) String() string { func (*AddActivityTaskResponse) ProtoMessage() {} func (x *AddActivityTaskResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -872,30 +1172,35 @@ func (x *AddActivityTaskResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AddActivityTaskResponse.ProtoReflect.Descriptor instead. func (*AddActivityTaskResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{7} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{8} } -type QueryWorkflowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *AddActivityTaskResponse) GetAssignedBuildId() string { + if x != nil { + return x.AssignedBuildId + } + return "" +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - QueryRequest *v1.QueryWorkflowRequest `protobuf:"bytes,3,opt,name=query_request,json=queryRequest,proto3" json:"query_request,omitempty"` - ForwardedSource string `protobuf:"bytes,4,opt,name=forwarded_source,json=forwardedSource,proto3" json:"forwarded_source,omitempty"` +type QueryWorkflowRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + QueryRequest *v1.QueryWorkflowRequest `protobuf:"bytes,3,opt,name=query_request,json=queryRequest,proto3" json:"query_request,omitempty"` // How this task should be directed by matching. (Missing means the default // for TaskVersionDirective, which is unversioned.) - VersionDirective *v19.TaskVersionDirective `protobuf:"bytes,5,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` + VersionDirective *v18.TaskVersionDirective `protobuf:"bytes,5,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` + ForwardInfo *v18.TaskForwardInfo `protobuf:"bytes,6,opt,name=forward_info,json=forwardInfo,proto3" json:"forward_info,omitempty"` + Priority *v11.Priority `protobuf:"bytes,7,opt,name=priority,proto3" json:"priority,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *QueryWorkflowRequest) Reset() { *x = QueryWorkflowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueryWorkflowRequest) String() string { @@ -905,8 +1210,8 @@ func (x *QueryWorkflowRequest) String() string { func (*QueryWorkflowRequest) ProtoMessage() {} func (x *QueryWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[9] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -918,7 +1223,7 @@ func (x *QueryWorkflowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWorkflowRequest.ProtoReflect.Descriptor instead. func (*QueryWorkflowRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{8} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{9} } func (x *QueryWorkflowRequest) GetNamespaceId() string { @@ -942,36 +1247,40 @@ func (x *QueryWorkflowRequest) GetQueryRequest() *v1.QueryWorkflowRequest { return nil } -func (x *QueryWorkflowRequest) GetForwardedSource() string { +func (x *QueryWorkflowRequest) GetVersionDirective() *v18.TaskVersionDirective { if x != nil { - return x.ForwardedSource + return x.VersionDirective } - return "" + return nil } -func (x *QueryWorkflowRequest) GetVersionDirective() *v19.TaskVersionDirective { +func (x *QueryWorkflowRequest) GetForwardInfo() *v18.TaskForwardInfo { if x != nil { - return x.VersionDirective + return x.ForwardInfo + } + return nil +} + +func (x *QueryWorkflowRequest) GetPriority() *v11.Priority { + if x != nil { + return x.Priority } return nil } type QueryWorkflowResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + QueryResult *v11.Payloads `protobuf:"bytes,1,opt,name=query_result,json=queryResult,proto3" json:"query_result,omitempty"` + QueryRejected *v12.QueryRejected `protobuf:"bytes,2,opt,name=query_rejected,json=queryRejected,proto3" json:"query_rejected,omitempty"` unknownFields protoimpl.UnknownFields - - QueryResult *v11.Payloads `protobuf:"bytes,1,opt,name=query_result,json=queryResult,proto3" json:"query_result,omitempty"` - QueryRejected *v12.QueryRejected `protobuf:"bytes,2,opt,name=query_rejected,json=queryRejected,proto3" json:"query_rejected,omitempty"` + sizeCache protoimpl.SizeCache } func (x *QueryWorkflowResponse) Reset() { *x = QueryWorkflowResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueryWorkflowResponse) String() string { @@ -981,8 +1290,8 @@ func (x *QueryWorkflowResponse) String() string { func (*QueryWorkflowResponse) ProtoMessage() {} func (x *QueryWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[10] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -994,7 +1303,7 @@ func (x *QueryWorkflowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWorkflowResponse.ProtoReflect.Descriptor instead. func (*QueryWorkflowResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{9} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{10} } func (x *QueryWorkflowResponse) GetQueryResult() *v11.Payloads { @@ -1012,23 +1321,20 @@ func (x *QueryWorkflowResponse) GetQueryRejected() *v12.QueryRejected { } type RespondQueryTaskCompletedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` CompletedRequest *v1.RespondQueryTaskCompletedRequest `protobuf:"bytes,4,opt,name=completed_request,json=completedRequest,proto3" json:"completed_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RespondQueryTaskCompletedRequest) Reset() { *x = RespondQueryTaskCompletedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RespondQueryTaskCompletedRequest) String() string { @@ -1038,8 +1344,8 @@ func (x *RespondQueryTaskCompletedRequest) String() string { func (*RespondQueryTaskCompletedRequest) ProtoMessage() {} func (x *RespondQueryTaskCompletedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[11] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1051,7 +1357,7 @@ func (x *RespondQueryTaskCompletedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RespondQueryTaskCompletedRequest.ProtoReflect.Descriptor instead. func (*RespondQueryTaskCompletedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{10} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{11} } func (x *RespondQueryTaskCompletedRequest) GetNamespaceId() string { @@ -1083,18 +1389,16 @@ func (x *RespondQueryTaskCompletedRequest) GetCompletedRequest() *v1.RespondQuer } type RespondQueryTaskCompletedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RespondQueryTaskCompletedResponse) Reset() { *x = RespondQueryTaskCompletedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RespondQueryTaskCompletedResponse) String() string { @@ -1104,8 +1408,8 @@ func (x *RespondQueryTaskCompletedResponse) String() string { func (*RespondQueryTaskCompletedResponse) ProtoMessage() {} func (x *RespondQueryTaskCompletedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[12] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1117,27 +1421,24 @@ func (x *RespondQueryTaskCompletedResponse) ProtoReflect() protoreflect.Message // Deprecated: Use RespondQueryTaskCompletedResponse.ProtoReflect.Descriptor instead. func (*RespondQueryTaskCompletedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{11} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{12} } type CancelOutstandingPollRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueueType v19.TaskQueueType `protobuf:"varint,2,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,3,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + PollerId string `protobuf:"bytes,4,opt,name=poller_id,json=pollerId,proto3" json:"poller_id,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueueType v110.TaskQueueType `protobuf:"varint,2,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` - TaskQueue *v14.TaskQueue `protobuf:"bytes,3,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - PollerId string `protobuf:"bytes,4,opt,name=poller_id,json=pollerId,proto3" json:"poller_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CancelOutstandingPollRequest) Reset() { *x = CancelOutstandingPollRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CancelOutstandingPollRequest) String() string { @@ -1147,8 +1448,8 @@ func (x *CancelOutstandingPollRequest) String() string { func (*CancelOutstandingPollRequest) ProtoMessage() {} func (x *CancelOutstandingPollRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[13] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1160,7 +1461,7 @@ func (x *CancelOutstandingPollRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelOutstandingPollRequest.ProtoReflect.Descriptor instead. func (*CancelOutstandingPollRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{12} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{13} } func (x *CancelOutstandingPollRequest) GetNamespaceId() string { @@ -1170,11 +1471,11 @@ func (x *CancelOutstandingPollRequest) GetNamespaceId() string { return "" } -func (x *CancelOutstandingPollRequest) GetTaskQueueType() v110.TaskQueueType { +func (x *CancelOutstandingPollRequest) GetTaskQueueType() v19.TaskQueueType { if x != nil { return x.TaskQueueType } - return v110.TaskQueueType(0) + return v19.TaskQueueType(0) } func (x *CancelOutstandingPollRequest) GetTaskQueue() *v14.TaskQueue { @@ -1192,18 +1493,16 @@ func (x *CancelOutstandingPollRequest) GetPollerId() string { } type CancelOutstandingPollResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CancelOutstandingPollResponse) Reset() { *x = CancelOutstandingPollResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CancelOutstandingPollResponse) String() string { @@ -1213,8 +1512,8 @@ func (x *CancelOutstandingPollResponse) String() string { func (*CancelOutstandingPollResponse) ProtoMessage() {} func (x *CancelOutstandingPollResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[14] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1226,36 +1525,41 @@ func (x *CancelOutstandingPollResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelOutstandingPollResponse.ProtoReflect.Descriptor instead. func (*CancelOutstandingPollResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{13} -} - -type DescribeTaskQueueRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - DescRequest *v1.DescribeTaskQueueRequest `protobuf:"bytes,2,opt,name=desc_request,json=descRequest,proto3" json:"desc_request,omitempty"` + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{14} } -func (x *DescribeTaskQueueRequest) Reset() { - *x = DescribeTaskQueueRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +// CancelOutstandingWorkerPollsRequest cancels all outstanding polls for a given worker instance key. +type CancelOutstandingWorkerPollsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskQueueType v19.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + WorkerInstanceKey string `protobuf:"bytes,4,opt,name=worker_instance_key,json=workerInstanceKey,proto3" json:"worker_instance_key,omitempty"` + // Worker identity string (e.g., "pid@hostname"). Used to eagerly remove the worker + // from pollerHistory so DescribeTaskQueue doesn't show stale pollers. + // Note: pollerHistory predates worker_instance_key and uses identity as its key, + // so we pass both for backward compatibility. + WorkerIdentity string `protobuf:"bytes,5,opt,name=worker_identity,json=workerIdentity,proto3" json:"worker_identity,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CancelOutstandingWorkerPollsRequest) Reset() { + *x = CancelOutstandingWorkerPollsRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DescribeTaskQueueRequest) String() string { +func (x *CancelOutstandingWorkerPollsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DescribeTaskQueueRequest) ProtoMessage() {} +func (*CancelOutstandingWorkerPollsRequest) ProtoMessage() {} -func (x *DescribeTaskQueueRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CancelOutstandingWorkerPollsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[15] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1265,52 +1569,70 @@ func (x *DescribeTaskQueueRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DescribeTaskQueueRequest.ProtoReflect.Descriptor instead. -func (*DescribeTaskQueueRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{14} +// Deprecated: Use CancelOutstandingWorkerPollsRequest.ProtoReflect.Descriptor instead. +func (*CancelOutstandingWorkerPollsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{15} } -func (x *DescribeTaskQueueRequest) GetNamespaceId() string { +func (x *CancelOutstandingWorkerPollsRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *DescribeTaskQueueRequest) GetDescRequest() *v1.DescribeTaskQueueRequest { +func (x *CancelOutstandingWorkerPollsRequest) GetTaskQueue() *v14.TaskQueue { if x != nil { - return x.DescRequest + return x.TaskQueue } return nil } -type DescribeTaskQueueResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *CancelOutstandingWorkerPollsRequest) GetTaskQueueType() v19.TaskQueueType { + if x != nil { + return x.TaskQueueType + } + return v19.TaskQueueType(0) +} - Pollers []*v14.PollerInfo `protobuf:"bytes,1,rep,name=pollers,proto3" json:"pollers,omitempty"` - TaskQueueStatus *v14.TaskQueueStatus `protobuf:"bytes,2,opt,name=task_queue_status,json=taskQueueStatus,proto3" json:"task_queue_status,omitempty"` +func (x *CancelOutstandingWorkerPollsRequest) GetWorkerInstanceKey() string { + if x != nil { + return x.WorkerInstanceKey + } + return "" } -func (x *DescribeTaskQueueResponse) Reset() { - *x = DescribeTaskQueueResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *CancelOutstandingWorkerPollsRequest) GetWorkerIdentity() string { + if x != nil { + return x.WorkerIdentity } + return "" } -func (x *DescribeTaskQueueResponse) String() string { +type CancelOutstandingWorkerPollsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Used for debugging. + CancelledCount int32 `protobuf:"varint,1,opt,name=cancelled_count,json=cancelledCount,proto3" json:"cancelled_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CancelOutstandingWorkerPollsResponse) Reset() { + *x = CancelOutstandingWorkerPollsResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CancelOutstandingWorkerPollsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DescribeTaskQueueResponse) ProtoMessage() {} +func (*CancelOutstandingWorkerPollsResponse) ProtoMessage() {} -func (x *DescribeTaskQueueResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CancelOutstandingWorkerPollsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[16] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1320,53 +1642,43 @@ func (x *DescribeTaskQueueResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DescribeTaskQueueResponse.ProtoReflect.Descriptor instead. -func (*DescribeTaskQueueResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{15} -} - -func (x *DescribeTaskQueueResponse) GetPollers() []*v14.PollerInfo { - if x != nil { - return x.Pollers - } - return nil +// Deprecated: Use CancelOutstandingWorkerPollsResponse.ProtoReflect.Descriptor instead. +func (*CancelOutstandingWorkerPollsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{16} } -func (x *DescribeTaskQueueResponse) GetTaskQueueStatus() *v14.TaskQueueStatus { +func (x *CancelOutstandingWorkerPollsResponse) GetCancelledCount() int32 { if x != nil { - return x.TaskQueueStatus + return x.CancelledCount } - return nil + return 0 } -type ListTaskQueuePartitionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DescribeTaskQueueRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + DescRequest *v1.DescribeTaskQueueRequest `protobuf:"bytes,2,opt,name=desc_request,json=descRequest,proto3" json:"desc_request,omitempty"` + Version *v110.WorkerDeploymentVersion `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` - NamespaceId string `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` +func (x *DescribeTaskQueueRequest) Reset() { + *x = DescribeTaskQueueRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ListTaskQueuePartitionsRequest) Reset() { - *x = ListTaskQueuePartitionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListTaskQueuePartitionsRequest) String() string { +func (x *DescribeTaskQueueRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListTaskQueuePartitionsRequest) ProtoMessage() {} +func (*DescribeTaskQueueRequest) ProtoMessage() {} -func (x *ListTaskQueuePartitionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeTaskQueueRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[17] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1376,59 +1688,55 @@ func (x *ListTaskQueuePartitionsRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListTaskQueuePartitionsRequest.ProtoReflect.Descriptor instead. -func (*ListTaskQueuePartitionsRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{16} +// Deprecated: Use DescribeTaskQueueRequest.ProtoReflect.Descriptor instead. +func (*DescribeTaskQueueRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{17} } -func (x *ListTaskQueuePartitionsRequest) GetNamespace() string { +func (x *DescribeTaskQueueRequest) GetNamespaceId() string { if x != nil { - return x.Namespace + return x.NamespaceId } return "" } -func (x *ListTaskQueuePartitionsRequest) GetNamespaceId() string { +func (x *DescribeTaskQueueRequest) GetDescRequest() *v1.DescribeTaskQueueRequest { if x != nil { - return x.NamespaceId + return x.DescRequest } - return "" + return nil } -func (x *ListTaskQueuePartitionsRequest) GetTaskQueue() *v14.TaskQueue { +func (x *DescribeTaskQueueRequest) GetVersion() *v110.WorkerDeploymentVersion { if x != nil { - return x.TaskQueue + return x.Version } return nil } -type ListTaskQueuePartitionsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DescribeTaskQueueResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + DescResponse *v1.DescribeTaskQueueResponse `protobuf:"bytes,3,opt,name=desc_response,json=descResponse,proto3" json:"desc_response,omitempty"` unknownFields protoimpl.UnknownFields - - ActivityTaskQueuePartitions []*v14.TaskQueuePartitionMetadata `protobuf:"bytes,1,rep,name=activity_task_queue_partitions,json=activityTaskQueuePartitions,proto3" json:"activity_task_queue_partitions,omitempty"` - WorkflowTaskQueuePartitions []*v14.TaskQueuePartitionMetadata `protobuf:"bytes,2,rep,name=workflow_task_queue_partitions,json=workflowTaskQueuePartitions,proto3" json:"workflow_task_queue_partitions,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ListTaskQueuePartitionsResponse) Reset() { - *x = ListTaskQueuePartitionsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeTaskQueueResponse) Reset() { + *x = DescribeTaskQueueResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ListTaskQueuePartitionsResponse) String() string { +func (x *DescribeTaskQueueResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListTaskQueuePartitionsResponse) ProtoMessage() {} +func (*DescribeTaskQueueResponse) ProtoMessage() {} -func (x *ListTaskQueuePartitionsResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeTaskQueueResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[18] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1438,65 +1746,47 @@ func (x *ListTaskQueuePartitionsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListTaskQueuePartitionsResponse.ProtoReflect.Descriptor instead. -func (*ListTaskQueuePartitionsResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{17} -} - -func (x *ListTaskQueuePartitionsResponse) GetActivityTaskQueuePartitions() []*v14.TaskQueuePartitionMetadata { - if x != nil { - return x.ActivityTaskQueuePartitions - } - return nil +// Deprecated: Use DescribeTaskQueueResponse.ProtoReflect.Descriptor instead. +func (*DescribeTaskQueueResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{18} } -func (x *ListTaskQueuePartitionsResponse) GetWorkflowTaskQueuePartitions() []*v14.TaskQueuePartitionMetadata { +func (x *DescribeTaskQueueResponse) GetDescResponse() *v1.DescribeTaskQueueResponse { if x != nil { - return x.WorkflowTaskQueuePartitions + return x.DescResponse } return nil } -// (-- api-linter: core::0134::request-mask-required=disabled -// -// aip.dev/not-precedent: UpdateWorkerBuildIdCompatibilityRequest doesn't follow Google API format --) -// -// (-- api-linter: core::0134::request-resource-required=disabled -// -// aip.dev/not-precedent: UpdateWorkerBuildIdCompatibilityRequest RPC doesn't follow Google API format. --) -type UpdateWorkerBuildIdCompatibilityRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - // Types that are assignable to Operation: - // - // *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_ - // *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_ - // *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId - Operation isUpdateWorkerBuildIdCompatibilityRequest_Operation `protobuf_oneof:"operation"` +type DescribeVersionedTaskQueuesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // This task queue is for routing purposes. + TaskQueueType v19.TaskQueueType `protobuf:"varint,2,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,3,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + Version *v110.WorkerDeploymentVersion `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // List of task queues to describe. + VersionTaskQueues []*DescribeVersionedTaskQueuesRequest_VersionTaskQueue `protobuf:"bytes,5,rep,name=version_task_queues,json=versionTaskQueues,proto3" json:"version_task_queues,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *UpdateWorkerBuildIdCompatibilityRequest) Reset() { - *x = UpdateWorkerBuildIdCompatibilityRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeVersionedTaskQueuesRequest) Reset() { + *x = DescribeVersionedTaskQueuesRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateWorkerBuildIdCompatibilityRequest) String() string { +func (x *DescribeVersionedTaskQueuesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateWorkerBuildIdCompatibilityRequest) ProtoMessage() {} +func (*DescribeVersionedTaskQueuesRequest) ProtoMessage() {} -func (x *UpdateWorkerBuildIdCompatibilityRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeVersionedTaskQueuesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[19] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1506,102 +1796,69 @@ func (x *UpdateWorkerBuildIdCompatibilityRequest) ProtoReflect() protoreflect.Me return mi.MessageOf(x) } -// Deprecated: Use UpdateWorkerBuildIdCompatibilityRequest.ProtoReflect.Descriptor instead. -func (*UpdateWorkerBuildIdCompatibilityRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{18} +// Deprecated: Use DescribeVersionedTaskQueuesRequest.ProtoReflect.Descriptor instead. +func (*DescribeVersionedTaskQueuesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{19} } -func (x *UpdateWorkerBuildIdCompatibilityRequest) GetNamespaceId() string { +func (x *DescribeVersionedTaskQueuesRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *UpdateWorkerBuildIdCompatibilityRequest) GetTaskQueue() string { +func (x *DescribeVersionedTaskQueuesRequest) GetTaskQueueType() v19.TaskQueueType { if x != nil { - return x.TaskQueue + return x.TaskQueueType } - return "" + return v19.TaskQueueType(0) } -func (m *UpdateWorkerBuildIdCompatibilityRequest) GetOperation() isUpdateWorkerBuildIdCompatibilityRequest_Operation { - if m != nil { - return m.Operation +func (x *DescribeVersionedTaskQueuesRequest) GetTaskQueue() *v14.TaskQueue { + if x != nil { + return x.TaskQueue } return nil } -func (x *UpdateWorkerBuildIdCompatibilityRequest) GetApplyPublicRequest() *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest { - if x, ok := x.GetOperation().(*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_); ok { - return x.ApplyPublicRequest +func (x *DescribeVersionedTaskQueuesRequest) GetVersion() *v110.WorkerDeploymentVersion { + if x != nil { + return x.Version } return nil } -func (x *UpdateWorkerBuildIdCompatibilityRequest) GetRemoveBuildIds() *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds { - if x, ok := x.GetOperation().(*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_); ok { - return x.RemoveBuildIds +func (x *DescribeVersionedTaskQueuesRequest) GetVersionTaskQueues() []*DescribeVersionedTaskQueuesRequest_VersionTaskQueue { + if x != nil { + return x.VersionTaskQueues } return nil } -func (x *UpdateWorkerBuildIdCompatibilityRequest) GetPersistUnknownBuildId() string { - if x, ok := x.GetOperation().(*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId); ok { - return x.PersistUnknownBuildId - } - return "" -} - -type isUpdateWorkerBuildIdCompatibilityRequest_Operation interface { - isUpdateWorkerBuildIdCompatibilityRequest_Operation() -} - -type UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_ struct { - ApplyPublicRequest *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest `protobuf:"bytes,3,opt,name=apply_public_request,json=applyPublicRequest,proto3,oneof"` -} - -type UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_ struct { - RemoveBuildIds *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds `protobuf:"bytes,4,opt,name=remove_build_ids,json=removeBuildIds,proto3,oneof"` -} - -type UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId struct { - PersistUnknownBuildId string `protobuf:"bytes,5,opt,name=persist_unknown_build_id,json=persistUnknownBuildId,proto3,oneof"` -} - -func (*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { -} - -func (*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { -} - -func (*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { -} - -type UpdateWorkerBuildIdCompatibilityResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type DescribeVersionedTaskQueuesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + VersionTaskQueues []*DescribeVersionedTaskQueuesResponse_VersionTaskQueue `protobuf:"bytes,1,rep,name=version_task_queues,json=versionTaskQueues,proto3" json:"version_task_queues,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *UpdateWorkerBuildIdCompatibilityResponse) Reset() { - *x = UpdateWorkerBuildIdCompatibilityResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeVersionedTaskQueuesResponse) Reset() { + *x = DescribeVersionedTaskQueuesResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateWorkerBuildIdCompatibilityResponse) String() string { +func (x *DescribeVersionedTaskQueuesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateWorkerBuildIdCompatibilityResponse) ProtoMessage() {} +func (*DescribeVersionedTaskQueuesResponse) ProtoMessage() {} -func (x *UpdateWorkerBuildIdCompatibilityResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeVersionedTaskQueuesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[20] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1611,38 +1868,48 @@ func (x *UpdateWorkerBuildIdCompatibilityResponse) ProtoReflect() protoreflect.M return mi.MessageOf(x) } -// Deprecated: Use UpdateWorkerBuildIdCompatibilityResponse.ProtoReflect.Descriptor instead. -func (*UpdateWorkerBuildIdCompatibilityResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{19} +// Deprecated: Use DescribeVersionedTaskQueuesResponse.ProtoReflect.Descriptor instead. +func (*DescribeVersionedTaskQueuesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{20} } -type GetWorkerBuildIdCompatibilityRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *DescribeVersionedTaskQueuesResponse) GetVersionTaskQueues() []*DescribeVersionedTaskQueuesResponse_VersionTaskQueue { + if x != nil { + return x.VersionTaskQueues + } + return nil +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Request *v1.GetWorkerBuildIdCompatibilityRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` +type DescribeTaskQueuePartitionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueuePartition *v18.TaskQueuePartition `protobuf:"bytes,2,opt,name=task_queue_partition,json=taskQueuePartition,proto3" json:"task_queue_partition,omitempty"` + Versions *v14.TaskQueueVersionSelection `protobuf:"bytes,3,opt,name=versions,proto3" json:"versions,omitempty"` + // Report task queue stats for the requested task queue types and versions + ReportStats bool `protobuf:"varint,4,opt,name=report_stats,json=reportStats,proto3" json:"report_stats,omitempty"` + // Report list of pollers for requested task queue types and versions + ReportPollers bool `protobuf:"varint,5,opt,name=report_pollers,json=reportPollers,proto3" json:"report_pollers,omitempty"` + ReportInternalTaskQueueStatus bool `protobuf:"varint,6,opt,name=report_internal_task_queue_status,json=reportInternalTaskQueueStatus,proto3" json:"report_internal_task_queue_status,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetWorkerBuildIdCompatibilityRequest) Reset() { - *x = GetWorkerBuildIdCompatibilityRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeTaskQueuePartitionRequest) Reset() { + *x = DescribeTaskQueuePartitionRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetWorkerBuildIdCompatibilityRequest) String() string { +func (x *DescribeTaskQueuePartitionRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkerBuildIdCompatibilityRequest) ProtoMessage() {} +func (*DescribeTaskQueuePartitionRequest) ProtoMessage() {} -func (x *GetWorkerBuildIdCompatibilityRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeTaskQueuePartitionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[21] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1652,51 +1919,76 @@ func (x *GetWorkerBuildIdCompatibilityRequest) ProtoReflect() protoreflect.Messa return mi.MessageOf(x) } -// Deprecated: Use GetWorkerBuildIdCompatibilityRequest.ProtoReflect.Descriptor instead. -func (*GetWorkerBuildIdCompatibilityRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{20} +// Deprecated: Use DescribeTaskQueuePartitionRequest.ProtoReflect.Descriptor instead. +func (*DescribeTaskQueuePartitionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{21} } -func (x *GetWorkerBuildIdCompatibilityRequest) GetNamespaceId() string { +func (x *DescribeTaskQueuePartitionRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *GetWorkerBuildIdCompatibilityRequest) GetRequest() *v1.GetWorkerBuildIdCompatibilityRequest { +func (x *DescribeTaskQueuePartitionRequest) GetTaskQueuePartition() *v18.TaskQueuePartition { if x != nil { - return x.Request + return x.TaskQueuePartition } return nil } -type GetWorkerBuildIdCompatibilityResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *DescribeTaskQueuePartitionRequest) GetVersions() *v14.TaskQueueVersionSelection { + if x != nil { + return x.Versions + } + return nil +} - Response *v1.GetWorkerBuildIdCompatibilityResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` +func (x *DescribeTaskQueuePartitionRequest) GetReportStats() bool { + if x != nil { + return x.ReportStats + } + return false } -func (x *GetWorkerBuildIdCompatibilityResponse) Reset() { - *x = GetWorkerBuildIdCompatibilityResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *DescribeTaskQueuePartitionRequest) GetReportPollers() bool { + if x != nil { + return x.ReportPollers } + return false } -func (x *GetWorkerBuildIdCompatibilityResponse) String() string { +func (x *DescribeTaskQueuePartitionRequest) GetReportInternalTaskQueueStatus() bool { + if x != nil { + return x.ReportInternalTaskQueueStatus + } + return false +} + +type DescribeTaskQueuePartitionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + VersionsInfoInternal map[string]*v18.TaskQueueVersionInfoInternal `protobuf:"bytes,1,rep,name=versions_info_internal,json=versionsInfoInternal,proto3" json:"versions_info_internal,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeTaskQueuePartitionResponse) Reset() { + *x = DescribeTaskQueuePartitionResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeTaskQueuePartitionResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetWorkerBuildIdCompatibilityResponse) ProtoMessage() {} +func (*DescribeTaskQueuePartitionResponse) ProtoMessage() {} -func (x *GetWorkerBuildIdCompatibilityResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeTaskQueuePartitionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[22] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1706,57 +1998,43 @@ func (x *GetWorkerBuildIdCompatibilityResponse) ProtoReflect() protoreflect.Mess return mi.MessageOf(x) } -// Deprecated: Use GetWorkerBuildIdCompatibilityResponse.ProtoReflect.Descriptor instead. -func (*GetWorkerBuildIdCompatibilityResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{21} +// Deprecated: Use DescribeTaskQueuePartitionResponse.ProtoReflect.Descriptor instead. +func (*DescribeTaskQueuePartitionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{22} } -func (x *GetWorkerBuildIdCompatibilityResponse) GetResponse() *v1.GetWorkerBuildIdCompatibilityResponse { +func (x *DescribeTaskQueuePartitionResponse) GetVersionsInfoInternal() map[string]*v18.TaskQueueVersionInfoInternal { if x != nil { - return x.Response + return x.VersionsInfoInternal } return nil } -type GetTaskQueueUserDataRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ListTaskQueuePartitionsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + NamespaceId string `protobuf:"bytes,3,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - // The task queue to fetch data from. The task queue is always considered as a normal - // queue, since sticky queues have no user data. - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - // Normally task queue type should always be TASK_QUEUE_TYPE_WORKFLOW here, but querying - // activity task queues is useful for testing. - TaskQueueType v110.TaskQueueType `protobuf:"varint,5,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` - // The value of the last known user data version. - // If the requester has no data, it should set this to 0. - // This value must not be set to a negative number (note that our linter suggests avoiding uint64). - LastKnownUserDataVersion int64 `protobuf:"varint,3,opt,name=last_known_user_data_version,json=lastKnownUserDataVersion,proto3" json:"last_known_user_data_version,omitempty"` - // If set and last_known_user_data_version is the current version, block until new data is - // available (or timeout). - WaitNewData bool `protobuf:"varint,4,opt,name=wait_new_data,json=waitNewData,proto3" json:"wait_new_data,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetTaskQueueUserDataRequest) Reset() { - *x = GetTaskQueueUserDataRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ListTaskQueuePartitionsRequest) Reset() { + *x = ListTaskQueuePartitionsRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetTaskQueueUserDataRequest) String() string { +func (x *ListTaskQueuePartitionsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTaskQueueUserDataRequest) ProtoMessage() {} +func (*ListTaskQueuePartitionsRequest) ProtoMessage() {} -func (x *GetTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ListTaskQueuePartitionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[23] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1766,74 +2044,2070 @@ func (x *GetTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTaskQueueUserDataRequest.ProtoReflect.Descriptor instead. -func (*GetTaskQueueUserDataRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{22} -} +// Deprecated: Use ListTaskQueuePartitionsRequest.ProtoReflect.Descriptor instead. +func (*ListTaskQueuePartitionsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{23} +} + +func (x *ListTaskQueuePartitionsRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ListTaskQueuePartitionsRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ListTaskQueuePartitionsRequest) GetTaskQueue() *v14.TaskQueue { + if x != nil { + return x.TaskQueue + } + return nil +} + +type ListTaskQueuePartitionsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + ActivityTaskQueuePartitions []*v14.TaskQueuePartitionMetadata `protobuf:"bytes,1,rep,name=activity_task_queue_partitions,json=activityTaskQueuePartitions,proto3" json:"activity_task_queue_partitions,omitempty"` + WorkflowTaskQueuePartitions []*v14.TaskQueuePartitionMetadata `protobuf:"bytes,2,rep,name=workflow_task_queue_partitions,json=workflowTaskQueuePartitions,proto3" json:"workflow_task_queue_partitions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListTaskQueuePartitionsResponse) Reset() { + *x = ListTaskQueuePartitionsResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListTaskQueuePartitionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTaskQueuePartitionsResponse) ProtoMessage() {} + +func (x *ListTaskQueuePartitionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTaskQueuePartitionsResponse.ProtoReflect.Descriptor instead. +func (*ListTaskQueuePartitionsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{24} +} + +func (x *ListTaskQueuePartitionsResponse) GetActivityTaskQueuePartitions() []*v14.TaskQueuePartitionMetadata { + if x != nil { + return x.ActivityTaskQueuePartitions + } + return nil +} + +func (x *ListTaskQueuePartitionsResponse) GetWorkflowTaskQueuePartitions() []*v14.TaskQueuePartitionMetadata { + if x != nil { + return x.WorkflowTaskQueuePartitions + } + return nil +} + +// (-- api-linter: core::0134::request-mask-required=disabled +// +// aip.dev/not-precedent: UpdateWorkerBuildIdCompatibilityRequest doesn't follow Google API format --) +// +// (-- api-linter: core::0134::request-resource-required=disabled +// +// aip.dev/not-precedent: UpdateWorkerBuildIdCompatibilityRequest RPC doesn't follow Google API format. --) +type UpdateWorkerBuildIdCompatibilityRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // Types that are valid to be assigned to Operation: + // + // *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_ + // *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_ + // *UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId + Operation isUpdateWorkerBuildIdCompatibilityRequest_Operation `protobuf_oneof:"operation"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) Reset() { + *x = UpdateWorkerBuildIdCompatibilityRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateWorkerBuildIdCompatibilityRequest) ProtoMessage() {} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[25] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateWorkerBuildIdCompatibilityRequest.ProtoReflect.Descriptor instead. +func (*UpdateWorkerBuildIdCompatibilityRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{25} +} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) GetOperation() isUpdateWorkerBuildIdCompatibilityRequest_Operation { + if x != nil { + return x.Operation + } + return nil +} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) GetApplyPublicRequest() *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest { + if x != nil { + if x, ok := x.Operation.(*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_); ok { + return x.ApplyPublicRequest + } + } + return nil +} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) GetRemoveBuildIds() *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds { + if x != nil { + if x, ok := x.Operation.(*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_); ok { + return x.RemoveBuildIds + } + } + return nil +} + +func (x *UpdateWorkerBuildIdCompatibilityRequest) GetPersistUnknownBuildId() string { + if x != nil { + if x, ok := x.Operation.(*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId); ok { + return x.PersistUnknownBuildId + } + } + return "" +} + +type isUpdateWorkerBuildIdCompatibilityRequest_Operation interface { + isUpdateWorkerBuildIdCompatibilityRequest_Operation() +} + +type UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_ struct { + ApplyPublicRequest *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest `protobuf:"bytes,3,opt,name=apply_public_request,json=applyPublicRequest,proto3,oneof"` +} + +type UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_ struct { + RemoveBuildIds *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds `protobuf:"bytes,4,opt,name=remove_build_ids,json=removeBuildIds,proto3,oneof"` +} + +type UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId struct { + PersistUnknownBuildId string `protobuf:"bytes,5,opt,name=persist_unknown_build_id,json=persistUnknownBuildId,proto3,oneof"` +} + +func (*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { +} + +func (*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { +} + +func (*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId) isUpdateWorkerBuildIdCompatibilityRequest_Operation() { +} + +type UpdateWorkerBuildIdCompatibilityResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateWorkerBuildIdCompatibilityResponse) Reset() { + *x = UpdateWorkerBuildIdCompatibilityResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateWorkerBuildIdCompatibilityResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateWorkerBuildIdCompatibilityResponse) ProtoMessage() {} + +func (x *UpdateWorkerBuildIdCompatibilityResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[26] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateWorkerBuildIdCompatibilityResponse.ProtoReflect.Descriptor instead. +func (*UpdateWorkerBuildIdCompatibilityResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{26} +} + +type GetWorkerVersioningRulesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // Types that are valid to be assigned to Command: + // + // *GetWorkerVersioningRulesRequest_Request + Command isGetWorkerVersioningRulesRequest_Command `protobuf_oneof:"command"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetWorkerVersioningRulesRequest) Reset() { + *x = GetWorkerVersioningRulesRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetWorkerVersioningRulesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWorkerVersioningRulesRequest) ProtoMessage() {} + +func (x *GetWorkerVersioningRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[27] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetWorkerVersioningRulesRequest.ProtoReflect.Descriptor instead. +func (*GetWorkerVersioningRulesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{27} +} + +func (x *GetWorkerVersioningRulesRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *GetWorkerVersioningRulesRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *GetWorkerVersioningRulesRequest) GetCommand() isGetWorkerVersioningRulesRequest_Command { + if x != nil { + return x.Command + } + return nil +} + +func (x *GetWorkerVersioningRulesRequest) GetRequest() *v1.GetWorkerVersioningRulesRequest { + if x != nil { + if x, ok := x.Command.(*GetWorkerVersioningRulesRequest_Request); ok { + return x.Request + } + } + return nil +} + +type isGetWorkerVersioningRulesRequest_Command interface { + isGetWorkerVersioningRulesRequest_Command() +} + +type GetWorkerVersioningRulesRequest_Request struct { + Request *v1.GetWorkerVersioningRulesRequest `protobuf:"bytes,3,opt,name=request,proto3,oneof"` +} + +func (*GetWorkerVersioningRulesRequest_Request) isGetWorkerVersioningRulesRequest_Command() {} + +type GetWorkerVersioningRulesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.GetWorkerVersioningRulesResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetWorkerVersioningRulesResponse) Reset() { + *x = GetWorkerVersioningRulesResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetWorkerVersioningRulesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWorkerVersioningRulesResponse) ProtoMessage() {} + +func (x *GetWorkerVersioningRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[28] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetWorkerVersioningRulesResponse.ProtoReflect.Descriptor instead. +func (*GetWorkerVersioningRulesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{28} +} + +func (x *GetWorkerVersioningRulesResponse) GetResponse() *v1.GetWorkerVersioningRulesResponse { + if x != nil { + return x.Response + } + return nil +} + +// (-- api-linter: core::0134::request-mask-required=disabled +// +// aip.dev/not-precedent: UpdateWorkerVersioningRulesRequest doesn't follow Google API format --) +// +// (-- api-linter: core::0134::request-resource-required=disabled +// +// aip.dev/not-precedent: UpdateWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) +type UpdateWorkerVersioningRulesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // Types that are valid to be assigned to Command: + // + // *UpdateWorkerVersioningRulesRequest_Request + Command isUpdateWorkerVersioningRulesRequest_Command `protobuf_oneof:"command"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateWorkerVersioningRulesRequest) Reset() { + *x = UpdateWorkerVersioningRulesRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateWorkerVersioningRulesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateWorkerVersioningRulesRequest) ProtoMessage() {} + +func (x *UpdateWorkerVersioningRulesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[29] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateWorkerVersioningRulesRequest.ProtoReflect.Descriptor instead. +func (*UpdateWorkerVersioningRulesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{29} +} + +func (x *UpdateWorkerVersioningRulesRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *UpdateWorkerVersioningRulesRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *UpdateWorkerVersioningRulesRequest) GetCommand() isUpdateWorkerVersioningRulesRequest_Command { + if x != nil { + return x.Command + } + return nil +} + +func (x *UpdateWorkerVersioningRulesRequest) GetRequest() *v1.UpdateWorkerVersioningRulesRequest { + if x != nil { + if x, ok := x.Command.(*UpdateWorkerVersioningRulesRequest_Request); ok { + return x.Request + } + } + return nil +} + +type isUpdateWorkerVersioningRulesRequest_Command interface { + isUpdateWorkerVersioningRulesRequest_Command() +} + +type UpdateWorkerVersioningRulesRequest_Request struct { + Request *v1.UpdateWorkerVersioningRulesRequest `protobuf:"bytes,3,opt,name=request,proto3,oneof"` +} + +func (*UpdateWorkerVersioningRulesRequest_Request) isUpdateWorkerVersioningRulesRequest_Command() {} + +type UpdateWorkerVersioningRulesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.UpdateWorkerVersioningRulesResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateWorkerVersioningRulesResponse) Reset() { + *x = UpdateWorkerVersioningRulesResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateWorkerVersioningRulesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateWorkerVersioningRulesResponse) ProtoMessage() {} + +func (x *UpdateWorkerVersioningRulesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[30] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateWorkerVersioningRulesResponse.ProtoReflect.Descriptor instead. +func (*UpdateWorkerVersioningRulesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{30} +} + +func (x *UpdateWorkerVersioningRulesResponse) GetResponse() *v1.UpdateWorkerVersioningRulesResponse { + if x != nil { + return x.Response + } + return nil +} + +type GetWorkerBuildIdCompatibilityRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v1.GetWorkerBuildIdCompatibilityRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetWorkerBuildIdCompatibilityRequest) Reset() { + *x = GetWorkerBuildIdCompatibilityRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetWorkerBuildIdCompatibilityRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWorkerBuildIdCompatibilityRequest) ProtoMessage() {} + +func (x *GetWorkerBuildIdCompatibilityRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[31] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetWorkerBuildIdCompatibilityRequest.ProtoReflect.Descriptor instead. +func (*GetWorkerBuildIdCompatibilityRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{31} +} + +func (x *GetWorkerBuildIdCompatibilityRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *GetWorkerBuildIdCompatibilityRequest) GetRequest() *v1.GetWorkerBuildIdCompatibilityRequest { + if x != nil { + return x.Request + } + return nil +} + +type GetWorkerBuildIdCompatibilityResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Response *v1.GetWorkerBuildIdCompatibilityResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetWorkerBuildIdCompatibilityResponse) Reset() { + *x = GetWorkerBuildIdCompatibilityResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetWorkerBuildIdCompatibilityResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetWorkerBuildIdCompatibilityResponse) ProtoMessage() {} + +func (x *GetWorkerBuildIdCompatibilityResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[32] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetWorkerBuildIdCompatibilityResponse.ProtoReflect.Descriptor instead. +func (*GetWorkerBuildIdCompatibilityResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{32} +} + +func (x *GetWorkerBuildIdCompatibilityResponse) GetResponse() *v1.GetWorkerBuildIdCompatibilityResponse { + if x != nil { + return x.Response + } + return nil +} + +type GetTaskQueueUserDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // The task queue to fetch data from. The task queue is always considered as a normal + // queue, since sticky queues have no user data. + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskQueueType v19.TaskQueueType `protobuf:"varint,5,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + // The value of the last known user data version. + // If the requester has no data, it should set this to 0. + // This value must not be set to a negative number (note that our linter suggests avoiding uint64). + LastKnownUserDataVersion int64 `protobuf:"varint,3,opt,name=last_known_user_data_version,json=lastKnownUserDataVersion,proto3" json:"last_known_user_data_version,omitempty"` + // The value of the last known ephemeral data version. + // If the requester has no data yet, it should use 0. + // If the requester doesn't want ephemeral data (i.e. it's root of an activity/nexus + // queue which have separate ephemeral data), it should use -1 (noEphemeralDataVersion). + LastKnownEphemeralDataVersion int64 `protobuf:"varint,7,opt,name=last_known_ephemeral_data_version,json=lastKnownEphemeralDataVersion,proto3" json:"last_known_ephemeral_data_version,omitempty"` + // If set and last_known_{user_data,ephemeral_data}_version is the current version, + // block until new data is available (or timeout). + WaitNewData bool `protobuf:"varint,4,opt,name=wait_new_data,json=waitNewData,proto3" json:"wait_new_data,omitempty"` + // If set, do not load task queue if unloaded. (Returns FailedPrecondition error in that case.) + OnlyIfLoaded bool `protobuf:"varint,6,opt,name=only_if_loaded,json=onlyIfLoaded,proto3" json:"only_if_loaded,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetTaskQueueUserDataRequest) Reset() { + *x = GetTaskQueueUserDataRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetTaskQueueUserDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTaskQueueUserDataRequest) ProtoMessage() {} + +func (x *GetTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[33] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTaskQueueUserDataRequest.ProtoReflect.Descriptor instead. +func (*GetTaskQueueUserDataRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{33} +} + +func (x *GetTaskQueueUserDataRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *GetTaskQueueUserDataRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *GetTaskQueueUserDataRequest) GetTaskQueueType() v19.TaskQueueType { + if x != nil { + return x.TaskQueueType + } + return v19.TaskQueueType(0) +} + +func (x *GetTaskQueueUserDataRequest) GetLastKnownUserDataVersion() int64 { + if x != nil { + return x.LastKnownUserDataVersion + } + return 0 +} + +func (x *GetTaskQueueUserDataRequest) GetLastKnownEphemeralDataVersion() int64 { + if x != nil { + return x.LastKnownEphemeralDataVersion + } + return 0 +} + +func (x *GetTaskQueueUserDataRequest) GetWaitNewData() bool { + if x != nil { + return x.WaitNewData + } + return false +} + +func (x *GetTaskQueueUserDataRequest) GetOnlyIfLoaded() bool { + if x != nil { + return x.OnlyIfLoaded + } + return false +} + +type GetTaskQueueUserDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Versioned user data, set if the task queue has user data and the request's last_known_user_data_version is less + // than the version cached in the root partition. + UserData *v111.VersionedTaskQueueUserData `protobuf:"bytes,2,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + EphemeralData *v18.VersionedEphemeralData `protobuf:"bytes,3,opt,name=ephemeral_data,json=ephemeralData,proto3" json:"ephemeral_data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetTaskQueueUserDataResponse) Reset() { + *x = GetTaskQueueUserDataResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetTaskQueueUserDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTaskQueueUserDataResponse) ProtoMessage() {} + +func (x *GetTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[34] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTaskQueueUserDataResponse.ProtoReflect.Descriptor instead. +func (*GetTaskQueueUserDataResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{34} +} + +func (x *GetTaskQueueUserDataResponse) GetUserData() *v111.VersionedTaskQueueUserData { + if x != nil { + return x.UserData + } + return nil +} + +func (x *GetTaskQueueUserDataResponse) GetEphemeralData() *v18.VersionedEphemeralData { + if x != nil { + return x.EphemeralData + } + return nil +} + +type SyncDeploymentUserDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // Required, unless deprecated fields are used. + // (-- api-linter: core::0203::required=disabled + // + // aip.dev/not-precedent: Not following Google API format --) + DeploymentName string `protobuf:"bytes,9,opt,name=deployment_name,json=deploymentName,proto3" json:"deployment_name,omitempty"` + TaskQueueTypes []v19.TaskQueueType `protobuf:"varint,8,rep,packed,name=task_queue_types,json=taskQueueTypes,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_types,omitempty"` + // Types that are valid to be assigned to Operation: + // + // *SyncDeploymentUserDataRequest_UpdateVersionData + // *SyncDeploymentUserDataRequest_ForgetVersion + Operation isSyncDeploymentUserDataRequest_Operation `protobuf_oneof:"operation"` + // Absent means no change. + // Ignored by the task queue if new revision number is not greater that what it has. + UpdateRoutingConfig *v112.RoutingConfig `protobuf:"bytes,10,opt,name=update_routing_config,json=updateRoutingConfig,proto3" json:"update_routing_config,omitempty"` + // Optional map of build id to upsert version data. + // (-- api-linter: core::0203::required=disabled + // + // aip.dev/not-precedent: Not following Google API format --) + UpsertVersionsData map[string]*v110.WorkerDeploymentVersionData `protobuf:"bytes,11,rep,name=upsert_versions_data,json=upsertVersionsData,proto3" json:"upsert_versions_data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // List of build ids to forget from task queue. + ForgetVersions []string `protobuf:"bytes,12,rep,name=forget_versions,json=forgetVersions,proto3" json:"forget_versions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncDeploymentUserDataRequest) Reset() { + *x = SyncDeploymentUserDataRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDeploymentUserDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDeploymentUserDataRequest) ProtoMessage() {} + +func (x *SyncDeploymentUserDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncDeploymentUserDataRequest.ProtoReflect.Descriptor instead. +func (*SyncDeploymentUserDataRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{35} +} + +func (x *SyncDeploymentUserDataRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *SyncDeploymentUserDataRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *SyncDeploymentUserDataRequest) GetDeploymentName() string { + if x != nil { + return x.DeploymentName + } + return "" +} + +func (x *SyncDeploymentUserDataRequest) GetTaskQueueTypes() []v19.TaskQueueType { + if x != nil { + return x.TaskQueueTypes + } + return nil +} + +func (x *SyncDeploymentUserDataRequest) GetOperation() isSyncDeploymentUserDataRequest_Operation { + if x != nil { + return x.Operation + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. +func (x *SyncDeploymentUserDataRequest) GetUpdateVersionData() *v110.DeploymentVersionData { + if x != nil { + if x, ok := x.Operation.(*SyncDeploymentUserDataRequest_UpdateVersionData); ok { + return x.UpdateVersionData + } + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. +func (x *SyncDeploymentUserDataRequest) GetForgetVersion() *v110.WorkerDeploymentVersion { + if x != nil { + if x, ok := x.Operation.(*SyncDeploymentUserDataRequest_ForgetVersion); ok { + return x.ForgetVersion + } + } + return nil +} + +func (x *SyncDeploymentUserDataRequest) GetUpdateRoutingConfig() *v112.RoutingConfig { + if x != nil { + return x.UpdateRoutingConfig + } + return nil +} + +func (x *SyncDeploymentUserDataRequest) GetUpsertVersionsData() map[string]*v110.WorkerDeploymentVersionData { + if x != nil { + return x.UpsertVersionsData + } + return nil +} + +func (x *SyncDeploymentUserDataRequest) GetForgetVersions() []string { + if x != nil { + return x.ForgetVersions + } + return nil +} + +type isSyncDeploymentUserDataRequest_Operation interface { + isSyncDeploymentUserDataRequest_Operation() +} + +type SyncDeploymentUserDataRequest_UpdateVersionData struct { + // The deployment version and its data that is being updated. + // + // Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. + UpdateVersionData *v110.DeploymentVersionData `protobuf:"bytes,6,opt,name=update_version_data,json=updateVersionData,proto3,oneof"` +} + +type SyncDeploymentUserDataRequest_ForgetVersion struct { + // The version whose data should be cleaned from the task queue. + // + // Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. + ForgetVersion *v110.WorkerDeploymentVersion `protobuf:"bytes,7,opt,name=forget_version,json=forgetVersion,proto3,oneof"` +} + +func (*SyncDeploymentUserDataRequest_UpdateVersionData) isSyncDeploymentUserDataRequest_Operation() {} + +func (*SyncDeploymentUserDataRequest_ForgetVersion) isSyncDeploymentUserDataRequest_Operation() {} + +type SyncDeploymentUserDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // New task queue user data version. Can be used to wait for propagation. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // If the routing config changed after applying this operation. Compared base on revision number. + // Deprecated. using this is not totaly safe in case of retries. + // + // Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. + RoutingConfigChanged bool `protobuf:"varint,2,opt,name=routing_config_changed,json=routingConfigChanged,proto3" json:"routing_config_changed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncDeploymentUserDataResponse) Reset() { + *x = SyncDeploymentUserDataResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncDeploymentUserDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncDeploymentUserDataResponse) ProtoMessage() {} + +func (x *SyncDeploymentUserDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[36] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncDeploymentUserDataResponse.ProtoReflect.Descriptor instead. +func (*SyncDeploymentUserDataResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{36} +} + +func (x *SyncDeploymentUserDataResponse) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +// Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. +func (x *SyncDeploymentUserDataResponse) GetRoutingConfigChanged() bool { + if x != nil { + return x.RoutingConfigChanged + } + return false +} + +type ApplyTaskQueueUserDataReplicationEventRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + UserData *v111.TaskQueueUserData `protobuf:"bytes,3,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ApplyTaskQueueUserDataReplicationEventRequest) Reset() { + *x = ApplyTaskQueueUserDataReplicationEventRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ApplyTaskQueueUserDataReplicationEventRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyTaskQueueUserDataReplicationEventRequest) ProtoMessage() {} + +func (x *ApplyTaskQueueUserDataReplicationEventRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[37] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyTaskQueueUserDataReplicationEventRequest.ProtoReflect.Descriptor instead. +func (*ApplyTaskQueueUserDataReplicationEventRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{37} +} + +func (x *ApplyTaskQueueUserDataReplicationEventRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ApplyTaskQueueUserDataReplicationEventRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *ApplyTaskQueueUserDataReplicationEventRequest) GetUserData() *v111.TaskQueueUserData { + if x != nil { + return x.UserData + } + return nil +} + +type ApplyTaskQueueUserDataReplicationEventResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ApplyTaskQueueUserDataReplicationEventResponse) Reset() { + *x = ApplyTaskQueueUserDataReplicationEventResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ApplyTaskQueueUserDataReplicationEventResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApplyTaskQueueUserDataReplicationEventResponse) ProtoMessage() {} + +func (x *ApplyTaskQueueUserDataReplicationEventResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[38] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApplyTaskQueueUserDataReplicationEventResponse.ProtoReflect.Descriptor instead. +func (*ApplyTaskQueueUserDataReplicationEventResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{38} +} + +type GetBuildIdTaskQueueMappingRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetBuildIdTaskQueueMappingRequest) Reset() { + *x = GetBuildIdTaskQueueMappingRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetBuildIdTaskQueueMappingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBuildIdTaskQueueMappingRequest) ProtoMessage() {} + +func (x *GetBuildIdTaskQueueMappingRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[39] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBuildIdTaskQueueMappingRequest.ProtoReflect.Descriptor instead. +func (*GetBuildIdTaskQueueMappingRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{39} +} + +func (x *GetBuildIdTaskQueueMappingRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *GetBuildIdTaskQueueMappingRequest) GetBuildId() string { + if x != nil { + return x.BuildId + } + return "" +} + +type GetBuildIdTaskQueueMappingResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskQueues []string `protobuf:"bytes,1,rep,name=task_queues,json=taskQueues,proto3" json:"task_queues,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetBuildIdTaskQueueMappingResponse) Reset() { + *x = GetBuildIdTaskQueueMappingResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetBuildIdTaskQueueMappingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBuildIdTaskQueueMappingResponse) ProtoMessage() {} + +func (x *GetBuildIdTaskQueueMappingResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[40] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBuildIdTaskQueueMappingResponse.ProtoReflect.Descriptor instead. +func (*GetBuildIdTaskQueueMappingResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{40} +} + +func (x *GetBuildIdTaskQueueMappingResponse) GetTaskQueues() []string { + if x != nil { + return x.TaskQueues + } + return nil +} + +type ForceLoadTaskQueuePartitionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueuePartition *v18.TaskQueuePartition `protobuf:"bytes,2,opt,name=task_queue_partition,json=taskQueuePartition,proto3" json:"task_queue_partition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceLoadTaskQueuePartitionRequest) Reset() { + *x = ForceLoadTaskQueuePartitionRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceLoadTaskQueuePartitionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceLoadTaskQueuePartitionRequest) ProtoMessage() {} + +func (x *ForceLoadTaskQueuePartitionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[41] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceLoadTaskQueuePartitionRequest.ProtoReflect.Descriptor instead. +func (*ForceLoadTaskQueuePartitionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{41} +} + +func (x *ForceLoadTaskQueuePartitionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ForceLoadTaskQueuePartitionRequest) GetTaskQueuePartition() *v18.TaskQueuePartition { + if x != nil { + return x.TaskQueuePartition + } + return nil +} + +type ForceLoadTaskQueuePartitionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + WasUnloaded bool `protobuf:"varint,1,opt,name=was_unloaded,json=wasUnloaded,proto3" json:"was_unloaded,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceLoadTaskQueuePartitionResponse) Reset() { + *x = ForceLoadTaskQueuePartitionResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceLoadTaskQueuePartitionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceLoadTaskQueuePartitionResponse) ProtoMessage() {} + +func (x *ForceLoadTaskQueuePartitionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[42] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceLoadTaskQueuePartitionResponse.ProtoReflect.Descriptor instead. +func (*ForceLoadTaskQueuePartitionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{42} +} + +func (x *ForceLoadTaskQueuePartitionResponse) GetWasUnloaded() bool { + if x != nil { + return x.WasUnloaded + } + return false +} + +// TODO Shivam - Please remove this in 123 +type ForceUnloadTaskQueueRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskQueueType v19.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceUnloadTaskQueueRequest) Reset() { + *x = ForceUnloadTaskQueueRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceUnloadTaskQueueRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceUnloadTaskQueueRequest) ProtoMessage() {} + +func (x *ForceUnloadTaskQueueRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[43] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceUnloadTaskQueueRequest.ProtoReflect.Descriptor instead. +func (*ForceUnloadTaskQueueRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{43} +} + +func (x *ForceUnloadTaskQueueRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ForceUnloadTaskQueueRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *ForceUnloadTaskQueueRequest) GetTaskQueueType() v19.TaskQueueType { + if x != nil { + return x.TaskQueueType + } + return v19.TaskQueueType(0) +} + +// TODO Shivam - Please remove this in 123 +type ForceUnloadTaskQueueResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + WasLoaded bool `protobuf:"varint,1,opt,name=was_loaded,json=wasLoaded,proto3" json:"was_loaded,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceUnloadTaskQueueResponse) Reset() { + *x = ForceUnloadTaskQueueResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceUnloadTaskQueueResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceUnloadTaskQueueResponse) ProtoMessage() {} + +func (x *ForceUnloadTaskQueueResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[44] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceUnloadTaskQueueResponse.ProtoReflect.Descriptor instead. +func (*ForceUnloadTaskQueueResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{44} +} + +func (x *ForceUnloadTaskQueueResponse) GetWasLoaded() bool { + if x != nil { + return x.WasLoaded + } + return false +} + +type ForceUnloadTaskQueuePartitionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueuePartition *v18.TaskQueuePartition `protobuf:"bytes,2,opt,name=task_queue_partition,json=taskQueuePartition,proto3" json:"task_queue_partition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceUnloadTaskQueuePartitionRequest) Reset() { + *x = ForceUnloadTaskQueuePartitionRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceUnloadTaskQueuePartitionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceUnloadTaskQueuePartitionRequest) ProtoMessage() {} + +func (x *ForceUnloadTaskQueuePartitionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[45] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceUnloadTaskQueuePartitionRequest.ProtoReflect.Descriptor instead. +func (*ForceUnloadTaskQueuePartitionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{45} +} + +func (x *ForceUnloadTaskQueuePartitionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ForceUnloadTaskQueuePartitionRequest) GetTaskQueuePartition() *v18.TaskQueuePartition { + if x != nil { + return x.TaskQueuePartition + } + return nil +} + +type ForceUnloadTaskQueuePartitionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + WasLoaded bool `protobuf:"varint,1,opt,name=was_loaded,json=wasLoaded,proto3" json:"was_loaded,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ForceUnloadTaskQueuePartitionResponse) Reset() { + *x = ForceUnloadTaskQueuePartitionResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ForceUnloadTaskQueuePartitionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ForceUnloadTaskQueuePartitionResponse) ProtoMessage() {} + +func (x *ForceUnloadTaskQueuePartitionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[46] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ForceUnloadTaskQueuePartitionResponse.ProtoReflect.Descriptor instead. +func (*ForceUnloadTaskQueuePartitionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{46} +} + +func (x *ForceUnloadTaskQueuePartitionResponse) GetWasLoaded() bool { + if x != nil { + return x.WasLoaded + } + return false +} + +// (-- api-linter: core::0134::request-mask-required=disabled +// +// aip.dev/not-precedent: UpdateTaskQueueUserDataRequest doesn't follow Google API format --) +// +// (-- api-linter: core::0134::request-resource-required=disabled +// +// aip.dev/not-precedent: UpdateTaskQueueUserDataRequest RPC doesn't follow Google API format. --) +type UpdateTaskQueueUserDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // Versioned user data, set if the task queue has user data and the request's last_known_user_data_version is less + // than the version cached in the root partition. + UserData *v111.VersionedTaskQueueUserData `protobuf:"bytes,3,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + // List of added build ids + BuildIdsAdded []string `protobuf:"bytes,4,rep,name=build_ids_added,json=buildIdsAdded,proto3" json:"build_ids_added,omitempty"` + // List of removed build ids + BuildIdsRemoved []string `protobuf:"bytes,5,rep,name=build_ids_removed,json=buildIdsRemoved,proto3" json:"build_ids_removed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateTaskQueueUserDataRequest) Reset() { + *x = UpdateTaskQueueUserDataRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateTaskQueueUserDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTaskQueueUserDataRequest) ProtoMessage() {} + +func (x *UpdateTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[47] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTaskQueueUserDataRequest.ProtoReflect.Descriptor instead. +func (*UpdateTaskQueueUserDataRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{47} +} + +func (x *UpdateTaskQueueUserDataRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *UpdateTaskQueueUserDataRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *UpdateTaskQueueUserDataRequest) GetUserData() *v111.VersionedTaskQueueUserData { + if x != nil { + return x.UserData + } + return nil +} + +func (x *UpdateTaskQueueUserDataRequest) GetBuildIdsAdded() []string { + if x != nil { + return x.BuildIdsAdded + } + return nil +} + +func (x *UpdateTaskQueueUserDataRequest) GetBuildIdsRemoved() []string { + if x != nil { + return x.BuildIdsRemoved + } + return nil +} + +type UpdateTaskQueueUserDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateTaskQueueUserDataResponse) Reset() { + *x = UpdateTaskQueueUserDataResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateTaskQueueUserDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTaskQueueUserDataResponse) ProtoMessage() {} + +func (x *UpdateTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[48] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTaskQueueUserDataResponse.ProtoReflect.Descriptor instead. +func (*UpdateTaskQueueUserDataResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{48} +} + +type ReplicateTaskQueueUserDataRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + UserData *v111.TaskQueueUserData `protobuf:"bytes,3,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReplicateTaskQueueUserDataRequest) Reset() { + *x = ReplicateTaskQueueUserDataRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReplicateTaskQueueUserDataRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicateTaskQueueUserDataRequest) ProtoMessage() {} + +func (x *ReplicateTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[49] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicateTaskQueueUserDataRequest.ProtoReflect.Descriptor instead. +func (*ReplicateTaskQueueUserDataRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{49} +} + +func (x *ReplicateTaskQueueUserDataRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ReplicateTaskQueueUserDataRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *ReplicateTaskQueueUserDataRequest) GetUserData() *v111.TaskQueueUserData { + if x != nil { + return x.UserData + } + return nil +} + +type ReplicateTaskQueueUserDataResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReplicateTaskQueueUserDataResponse) Reset() { + *x = ReplicateTaskQueueUserDataResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReplicateTaskQueueUserDataResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicateTaskQueueUserDataResponse) ProtoMessage() {} + +func (x *ReplicateTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[50] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicateTaskQueueUserDataResponse.ProtoReflect.Descriptor instead. +func (*ReplicateTaskQueueUserDataResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{50} +} + +type CheckTaskQueueUserDataPropagationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + Version int64 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CheckTaskQueueUserDataPropagationRequest) Reset() { + *x = CheckTaskQueueUserDataPropagationRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CheckTaskQueueUserDataPropagationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckTaskQueueUserDataPropagationRequest) ProtoMessage() {} + +func (x *CheckTaskQueueUserDataPropagationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[51] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckTaskQueueUserDataPropagationRequest.ProtoReflect.Descriptor instead. +func (*CheckTaskQueueUserDataPropagationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{51} +} + +func (x *CheckTaskQueueUserDataPropagationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CheckTaskQueueUserDataPropagationRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *CheckTaskQueueUserDataPropagationRequest) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +type CheckTaskQueueUserDataPropagationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CheckTaskQueueUserDataPropagationResponse) Reset() { + *x = CheckTaskQueueUserDataPropagationResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CheckTaskQueueUserDataPropagationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckTaskQueueUserDataPropagationResponse) ProtoMessage() {} + +func (x *CheckTaskQueueUserDataPropagationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[52] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckTaskQueueUserDataPropagationResponse.ProtoReflect.Descriptor instead. +func (*CheckTaskQueueUserDataPropagationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{52} +} + +type DispatchNexusTaskRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // Nexus request extracted by the frontend and translated into Temporal API format. + Request *v113.Request `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` + ForwardInfo *v18.TaskForwardInfo `protobuf:"bytes,4,opt,name=forward_info,json=forwardInfo,proto3" json:"forward_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DispatchNexusTaskRequest) Reset() { + *x = DispatchNexusTaskRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DispatchNexusTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DispatchNexusTaskRequest) ProtoMessage() {} + +func (x *DispatchNexusTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[53] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DispatchNexusTaskRequest.ProtoReflect.Descriptor instead. +func (*DispatchNexusTaskRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{53} +} + +func (x *DispatchNexusTaskRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DispatchNexusTaskRequest) GetTaskQueue() *v14.TaskQueue { + if x != nil { + return x.TaskQueue + } + return nil +} + +func (x *DispatchNexusTaskRequest) GetRequest() *v113.Request { + if x != nil { + return x.Request + } + return nil +} + +func (x *DispatchNexusTaskRequest) GetForwardInfo() *v18.TaskForwardInfo { + if x != nil { + return x.ForwardInfo + } + return nil +} + +type DispatchNexusTaskResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Outcome: + // + // *DispatchNexusTaskResponse_HandlerError + // *DispatchNexusTaskResponse_Response + // *DispatchNexusTaskResponse_RequestTimeout + // *DispatchNexusTaskResponse_Failure + Outcome isDispatchNexusTaskResponse_Outcome `protobuf_oneof:"outcome"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DispatchNexusTaskResponse) Reset() { + *x = DispatchNexusTaskResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DispatchNexusTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DispatchNexusTaskResponse) ProtoMessage() {} + +func (x *DispatchNexusTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[54] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DispatchNexusTaskResponse.ProtoReflect.Descriptor instead. +func (*DispatchNexusTaskResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{54} +} + +func (x *DispatchNexusTaskResponse) GetOutcome() isDispatchNexusTaskResponse_Outcome { + if x != nil { + return x.Outcome + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. +func (x *DispatchNexusTaskResponse) GetHandlerError() *v113.HandlerError { + if x != nil { + if x, ok := x.Outcome.(*DispatchNexusTaskResponse_HandlerError); ok { + return x.HandlerError + } + } + return nil +} -func (x *GetTaskQueueUserDataRequest) GetNamespaceId() string { +func (x *DispatchNexusTaskResponse) GetResponse() *v113.Response { + if x != nil { + if x, ok := x.Outcome.(*DispatchNexusTaskResponse_Response); ok { + return x.Response + } + } + return nil +} + +func (x *DispatchNexusTaskResponse) GetRequestTimeout() *DispatchNexusTaskResponse_Timeout { + if x != nil { + if x, ok := x.Outcome.(*DispatchNexusTaskResponse_RequestTimeout); ok { + return x.RequestTimeout + } + } + return nil +} + +func (x *DispatchNexusTaskResponse) GetFailure() *v114.Failure { + if x != nil { + if x, ok := x.Outcome.(*DispatchNexusTaskResponse_Failure); ok { + return x.Failure + } + } + return nil +} + +type isDispatchNexusTaskResponse_Outcome interface { + isDispatchNexusTaskResponse_Outcome() +} + +type DispatchNexusTaskResponse_HandlerError struct { + // Deprecated. Use failure field instead. + // + // Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. + HandlerError *v113.HandlerError `protobuf:"bytes,1,opt,name=handler_error,json=handlerError,proto3,oneof"` +} + +type DispatchNexusTaskResponse_Response struct { + // Set if the worker's handler responded successfully to the nexus task. + Response *v113.Response `protobuf:"bytes,2,opt,name=response,proto3,oneof"` +} + +type DispatchNexusTaskResponse_RequestTimeout struct { + RequestTimeout *DispatchNexusTaskResponse_Timeout `protobuf:"bytes,3,opt,name=request_timeout,json=requestTimeout,proto3,oneof"` +} + +type DispatchNexusTaskResponse_Failure struct { + // Set if the worker's handler failed the nexus task. Must contain a NexusHandlerFailureInfo object. + Failure *v114.Failure `protobuf:"bytes,4,opt,name=failure,proto3,oneof"` +} + +func (*DispatchNexusTaskResponse_HandlerError) isDispatchNexusTaskResponse_Outcome() {} + +func (*DispatchNexusTaskResponse_Response) isDispatchNexusTaskResponse_Outcome() {} + +func (*DispatchNexusTaskResponse_RequestTimeout) isDispatchNexusTaskResponse_Outcome() {} + +func (*DispatchNexusTaskResponse_Failure) isDispatchNexusTaskResponse_Outcome() {} + +type PollNexusTaskQueueRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // A unique ID generated by the frontend for this request. + PollerId string `protobuf:"bytes,2,opt,name=poller_id,json=pollerId,proto3" json:"poller_id,omitempty"` + // Original WorkflowService poll request as received by the frontend. + Request *v1.PollNexusTaskQueueRequest `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` + // Non-empty if this poll was forwarded from a child partition. + ForwardedSource string `protobuf:"bytes,4,opt,name=forwarded_source,json=forwardedSource,proto3" json:"forwarded_source,omitempty"` + // Extra conditions on this poll request. Only supported with new matcher. + Conditions *PollConditions `protobuf:"bytes,5,opt,name=conditions,proto3" json:"conditions,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollNexusTaskQueueRequest) Reset() { + *x = PollNexusTaskQueueRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollNexusTaskQueueRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollNexusTaskQueueRequest) ProtoMessage() {} + +func (x *PollNexusTaskQueueRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[55] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollNexusTaskQueueRequest.ProtoReflect.Descriptor instead. +func (*PollNexusTaskQueueRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{55} +} + +func (x *PollNexusTaskQueueRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *GetTaskQueueUserDataRequest) GetTaskQueue() string { +func (x *PollNexusTaskQueueRequest) GetPollerId() string { if x != nil { - return x.TaskQueue + return x.PollerId } return "" } -func (x *GetTaskQueueUserDataRequest) GetTaskQueueType() v110.TaskQueueType { +func (x *PollNexusTaskQueueRequest) GetRequest() *v1.PollNexusTaskQueueRequest { if x != nil { - return x.TaskQueueType + return x.Request } - return v110.TaskQueueType(0) + return nil } -func (x *GetTaskQueueUserDataRequest) GetLastKnownUserDataVersion() int64 { +func (x *PollNexusTaskQueueRequest) GetForwardedSource() string { if x != nil { - return x.LastKnownUserDataVersion + return x.ForwardedSource } - return 0 + return "" } -func (x *GetTaskQueueUserDataRequest) GetWaitNewData() bool { +func (x *PollNexusTaskQueueRequest) GetConditions() *PollConditions { if x != nil { - return x.WaitNewData + return x.Conditions } - return false + return nil } -type GetTaskQueueUserDataResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type PollNexusTaskQueueResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Response that should be delivered to the worker containing a request from DispatchNexusTaskRequest. + Response *v1.PollNexusTaskQueueResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` unknownFields protoimpl.UnknownFields - - // Versioned user data, set if the task queue has user data and the request's last_known_user_data_version is less - // than the version cached in the root partition. - UserData *v111.VersionedTaskQueueUserData `protobuf:"bytes,2,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetTaskQueueUserDataResponse) Reset() { - *x = GetTaskQueueUserDataResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *PollNexusTaskQueueResponse) Reset() { + *x = PollNexusTaskQueueResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetTaskQueueUserDataResponse) String() string { +func (x *PollNexusTaskQueueResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetTaskQueueUserDataResponse) ProtoMessage() {} +func (*PollNexusTaskQueueResponse) ProtoMessage() {} -func (x *GetTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PollNexusTaskQueueResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[56] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1843,46 +4117,46 @@ func (x *GetTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetTaskQueueUserDataResponse.ProtoReflect.Descriptor instead. -func (*GetTaskQueueUserDataResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{23} +// Deprecated: Use PollNexusTaskQueueResponse.ProtoReflect.Descriptor instead. +func (*PollNexusTaskQueueResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{56} } -func (x *GetTaskQueueUserDataResponse) GetUserData() *v111.VersionedTaskQueueUserData { +func (x *PollNexusTaskQueueResponse) GetResponse() *v1.PollNexusTaskQueueResponse { if x != nil { - return x.UserData + return x.Response } return nil } -type ApplyTaskQueueUserDataReplicationEventRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondNexusTaskCompletedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // A unique ID for this task generated by the matching engine. Decoded from the incoming request's task token. + TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Original completion as received by the frontend. + Request *v1.RespondNexusTaskCompletedRequest `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - UserData *v111.TaskQueueUserData `protobuf:"bytes,3,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ApplyTaskQueueUserDataReplicationEventRequest) Reset() { - *x = ApplyTaskQueueUserDataReplicationEventRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondNexusTaskCompletedRequest) Reset() { + *x = RespondNexusTaskCompletedRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ApplyTaskQueueUserDataReplicationEventRequest) String() string { +func (x *RespondNexusTaskCompletedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyTaskQueueUserDataReplicationEventRequest) ProtoMessage() {} +func (*RespondNexusTaskCompletedRequest) ProtoMessage() {} -func (x *ApplyTaskQueueUserDataReplicationEventRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondNexusTaskCompletedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[57] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1892,56 +4166,61 @@ func (x *ApplyTaskQueueUserDataReplicationEventRequest) ProtoReflect() protorefl return mi.MessageOf(x) } -// Deprecated: Use ApplyTaskQueueUserDataReplicationEventRequest.ProtoReflect.Descriptor instead. -func (*ApplyTaskQueueUserDataReplicationEventRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{24} +// Deprecated: Use RespondNexusTaskCompletedRequest.ProtoReflect.Descriptor instead. +func (*RespondNexusTaskCompletedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{57} } -func (x *ApplyTaskQueueUserDataReplicationEventRequest) GetNamespaceId() string { +func (x *RespondNexusTaskCompletedRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *ApplyTaskQueueUserDataReplicationEventRequest) GetTaskQueue() string { +func (x *RespondNexusTaskCompletedRequest) GetTaskQueue() *v14.TaskQueue { if x != nil { return x.TaskQueue } + return nil +} + +func (x *RespondNexusTaskCompletedRequest) GetTaskId() string { + if x != nil { + return x.TaskId + } return "" } -func (x *ApplyTaskQueueUserDataReplicationEventRequest) GetUserData() *v111.TaskQueueUserData { +func (x *RespondNexusTaskCompletedRequest) GetRequest() *v1.RespondNexusTaskCompletedRequest { if x != nil { - return x.UserData + return x.Request } return nil } -type ApplyTaskQueueUserDataReplicationEventResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondNexusTaskCompletedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ApplyTaskQueueUserDataReplicationEventResponse) Reset() { - *x = ApplyTaskQueueUserDataReplicationEventResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondNexusTaskCompletedResponse) Reset() { + *x = RespondNexusTaskCompletedResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ApplyTaskQueueUserDataReplicationEventResponse) String() string { +func (x *RespondNexusTaskCompletedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplyTaskQueueUserDataReplicationEventResponse) ProtoMessage() {} +func (*RespondNexusTaskCompletedResponse) ProtoMessage() {} -func (x *ApplyTaskQueueUserDataReplicationEventResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondNexusTaskCompletedResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[58] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1951,38 +4230,39 @@ func (x *ApplyTaskQueueUserDataReplicationEventResponse) ProtoReflect() protoref return mi.MessageOf(x) } -// Deprecated: Use ApplyTaskQueueUserDataReplicationEventResponse.ProtoReflect.Descriptor instead. -func (*ApplyTaskQueueUserDataReplicationEventResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{25} +// Deprecated: Use RespondNexusTaskCompletedResponse.ProtoReflect.Descriptor instead. +func (*RespondNexusTaskCompletedResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{58} } -type GetBuildIdTaskQueueMappingRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RespondNexusTaskFailedRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // A unique ID for this task generated by the matching engine. Decoded from the incoming request's task token. + TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // Original failure as received by the frontend. + Request *v1.RespondNexusTaskFailedRequest `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *GetBuildIdTaskQueueMappingRequest) Reset() { - *x = GetBuildIdTaskQueueMappingRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondNexusTaskFailedRequest) Reset() { + *x = RespondNexusTaskFailedRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetBuildIdTaskQueueMappingRequest) String() string { +func (x *RespondNexusTaskFailedRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBuildIdTaskQueueMappingRequest) ProtoMessage() {} +func (*RespondNexusTaskFailedRequest) ProtoMessage() {} -func (x *GetBuildIdTaskQueueMappingRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondNexusTaskFailedRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[59] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1992,51 +4272,61 @@ func (x *GetBuildIdTaskQueueMappingRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use GetBuildIdTaskQueueMappingRequest.ProtoReflect.Descriptor instead. -func (*GetBuildIdTaskQueueMappingRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{26} +// Deprecated: Use RespondNexusTaskFailedRequest.ProtoReflect.Descriptor instead. +func (*RespondNexusTaskFailedRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{59} } -func (x *GetBuildIdTaskQueueMappingRequest) GetNamespaceId() string { +func (x *RespondNexusTaskFailedRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *GetBuildIdTaskQueueMappingRequest) GetBuildId() string { +func (x *RespondNexusTaskFailedRequest) GetTaskQueue() *v14.TaskQueue { if x != nil { - return x.BuildId + return x.TaskQueue + } + return nil +} + +func (x *RespondNexusTaskFailedRequest) GetTaskId() string { + if x != nil { + return x.TaskId } return "" } -type GetBuildIdTaskQueueMappingResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *RespondNexusTaskFailedRequest) GetRequest() *v1.RespondNexusTaskFailedRequest { + if x != nil { + return x.Request + } + return nil +} - TaskQueues []string `protobuf:"bytes,1,rep,name=task_queues,json=taskQueues,proto3" json:"task_queues,omitempty"` +type RespondNexusTaskFailedResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GetBuildIdTaskQueueMappingResponse) Reset() { - *x = GetBuildIdTaskQueueMappingResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RespondNexusTaskFailedResponse) Reset() { + *x = RespondNexusTaskFailedResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[60] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *GetBuildIdTaskQueueMappingResponse) String() string { +func (x *RespondNexusTaskFailedResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBuildIdTaskQueueMappingResponse) ProtoMessage() {} +func (*RespondNexusTaskFailedResponse) ProtoMessage() {} -func (x *GetBuildIdTaskQueueMappingResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RespondNexusTaskFailedResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[60] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2046,46 +4336,45 @@ func (x *GetBuildIdTaskQueueMappingResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use GetBuildIdTaskQueueMappingResponse.ProtoReflect.Descriptor instead. -func (*GetBuildIdTaskQueueMappingResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{27} -} - -func (x *GetBuildIdTaskQueueMappingResponse) GetTaskQueues() []string { - if x != nil { - return x.TaskQueues - } - return nil +// Deprecated: Use RespondNexusTaskFailedResponse.ProtoReflect.Descriptor instead. +func (*RespondNexusTaskFailedResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{60} } -type ForceUnloadTaskQueueRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// (-- api-linter: core::0133::request-unknown-fields=disabled +// +// aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) +// +// (-- api-linter: core::0133::request-resource-field=disabled +// +// aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) +// +// (-- api-linter: core::0133::request-parent-required=disabled +// +// aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) +type CreateNexusEndpointRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Spec *v111.NexusEndpointSpec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - TaskQueueType v110.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ForceUnloadTaskQueueRequest) Reset() { - *x = ForceUnloadTaskQueueRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CreateNexusEndpointRequest) Reset() { + *x = CreateNexusEndpointRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[61] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ForceUnloadTaskQueueRequest) String() string { +func (x *CreateNexusEndpointRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ForceUnloadTaskQueueRequest) ProtoMessage() {} +func (*CreateNexusEndpointRequest) ProtoMessage() {} -func (x *ForceUnloadTaskQueueRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CreateNexusEndpointRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[61] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2095,58 +4384,41 @@ func (x *ForceUnloadTaskQueueRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ForceUnloadTaskQueueRequest.ProtoReflect.Descriptor instead. -func (*ForceUnloadTaskQueueRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{28} -} - -func (x *ForceUnloadTaskQueueRequest) GetNamespaceId() string { - if x != nil { - return x.NamespaceId - } - return "" -} - -func (x *ForceUnloadTaskQueueRequest) GetTaskQueue() string { - if x != nil { - return x.TaskQueue - } - return "" +// Deprecated: Use CreateNexusEndpointRequest.ProtoReflect.Descriptor instead. +func (*CreateNexusEndpointRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{61} } -func (x *ForceUnloadTaskQueueRequest) GetTaskQueueType() v110.TaskQueueType { +func (x *CreateNexusEndpointRequest) GetSpec() *v111.NexusEndpointSpec { if x != nil { - return x.TaskQueueType + return x.Spec } - return v110.TaskQueueType(0) + return nil } -type ForceUnloadTaskQueueResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type CreateNexusEndpointResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Entry *v111.NexusEndpointEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` unknownFields protoimpl.UnknownFields - - WasLoaded bool `protobuf:"varint,1,opt,name=was_loaded,json=wasLoaded,proto3" json:"was_loaded,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ForceUnloadTaskQueueResponse) Reset() { - *x = ForceUnloadTaskQueueResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CreateNexusEndpointResponse) Reset() { + *x = CreateNexusEndpointResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[62] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ForceUnloadTaskQueueResponse) String() string { +func (x *CreateNexusEndpointResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ForceUnloadTaskQueueResponse) ProtoMessage() {} +func (*CreateNexusEndpointResponse) ProtoMessage() {} -func (x *ForceUnloadTaskQueueResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CreateNexusEndpointResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[62] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2156,59 +4428,53 @@ func (x *ForceUnloadTaskQueueResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ForceUnloadTaskQueueResponse.ProtoReflect.Descriptor instead. -func (*ForceUnloadTaskQueueResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{29} +// Deprecated: Use CreateNexusEndpointResponse.ProtoReflect.Descriptor instead. +func (*CreateNexusEndpointResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{62} } -func (x *ForceUnloadTaskQueueResponse) GetWasLoaded() bool { +func (x *CreateNexusEndpointResponse) GetEntry() *v111.NexusEndpointEntry { if x != nil { - return x.WasLoaded + return x.Entry } - return false + return nil } -// (-- api-linter: core::0134::request-mask-required=disabled +// (-- api-linter: core::0134::request-resource-required=disabled // -// aip.dev/not-precedent: UpdateTaskQueueUserDataRequest doesn't follow Google API format --) +// aip.dev/not-precedent: UpdateNexusEndpoint RPC doesn't follow Google API format. --) // -// (-- api-linter: core::0134::request-resource-required=disabled +// (-- api-linter: core::0134::request-mask-required=disabled // -// aip.dev/not-precedent: UpdateTaskQueueUserDataRequest RPC doesn't follow Google API format. --) -type UpdateTaskQueueUserDataRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// aip.dev/not-precedent: UpdateNexusEndpoint RPC doesn't follow Google API format. --) +type UpdateNexusEndpointRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // ID of the endpoint to update. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Version of the endpoint, used for optimistic concurrency. Must match current version in persistence or the + // request will fail a FAILED_PRECONDITION error. + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Spec *v111.NexusEndpointSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - // Versioned user data, set if the task queue has user data and the request's last_known_user_data_version is less - // than the version cached in the root partition. - UserData *v111.VersionedTaskQueueUserData `protobuf:"bytes,3,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` - // List of added build ids - BuildIdsAdded []string `protobuf:"bytes,4,rep,name=build_ids_added,json=buildIdsAdded,proto3" json:"build_ids_added,omitempty"` - // List of removed build ids - BuildIdsRemoved []string `protobuf:"bytes,5,rep,name=build_ids_removed,json=buildIdsRemoved,proto3" json:"build_ids_removed,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *UpdateTaskQueueUserDataRequest) Reset() { - *x = UpdateTaskQueueUserDataRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateNexusEndpointRequest) Reset() { + *x = UpdateNexusEndpointRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateTaskQueueUserDataRequest) String() string { +func (x *UpdateNexusEndpointRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateTaskQueueUserDataRequest) ProtoMessage() {} +func (*UpdateNexusEndpointRequest) ProtoMessage() {} -func (x *UpdateTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateNexusEndpointRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[63] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2218,70 +4484,55 @@ func (x *UpdateTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateTaskQueueUserDataRequest.ProtoReflect.Descriptor instead. -func (*UpdateTaskQueueUserDataRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{30} -} - -func (x *UpdateTaskQueueUserDataRequest) GetNamespaceId() string { - if x != nil { - return x.NamespaceId - } - return "" +// Deprecated: Use UpdateNexusEndpointRequest.ProtoReflect.Descriptor instead. +func (*UpdateNexusEndpointRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{63} } -func (x *UpdateTaskQueueUserDataRequest) GetTaskQueue() string { +func (x *UpdateNexusEndpointRequest) GetId() string { if x != nil { - return x.TaskQueue + return x.Id } return "" } -func (x *UpdateTaskQueueUserDataRequest) GetUserData() *v111.VersionedTaskQueueUserData { - if x != nil { - return x.UserData - } - return nil -} - -func (x *UpdateTaskQueueUserDataRequest) GetBuildIdsAdded() []string { +func (x *UpdateNexusEndpointRequest) GetVersion() int64 { if x != nil { - return x.BuildIdsAdded + return x.Version } - return nil + return 0 } -func (x *UpdateTaskQueueUserDataRequest) GetBuildIdsRemoved() []string { +func (x *UpdateNexusEndpointRequest) GetSpec() *v111.NexusEndpointSpec { if x != nil { - return x.BuildIdsRemoved + return x.Spec } return nil } -type UpdateTaskQueueUserDataResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type UpdateNexusEndpointResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Entry *v111.NexusEndpointEntry `protobuf:"bytes,1,opt,name=entry,proto3" json:"entry,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *UpdateTaskQueueUserDataResponse) Reset() { - *x = UpdateTaskQueueUserDataResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateNexusEndpointResponse) Reset() { + *x = UpdateNexusEndpointResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateTaskQueueUserDataResponse) String() string { +func (x *UpdateNexusEndpointResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateTaskQueueUserDataResponse) ProtoMessage() {} +func (*UpdateNexusEndpointResponse) ProtoMessage() {} -func (x *UpdateTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateNexusEndpointResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[64] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2291,39 +4542,49 @@ func (x *UpdateTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateTaskQueueUserDataResponse.ProtoReflect.Descriptor instead. -func (*UpdateTaskQueueUserDataResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{31} +// Deprecated: Use UpdateNexusEndpointResponse.ProtoReflect.Descriptor instead. +func (*UpdateNexusEndpointResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{64} } -type ReplicateTaskQueueUserDataRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *UpdateNexusEndpointResponse) GetEntry() *v111.NexusEndpointEntry { + if x != nil { + return x.Entry + } + return nil +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - UserData *v111.TaskQueueUserData `protobuf:"bytes,3,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` +// (-- api-linter: core::0135::request-name-behavior=disabled +// +// aip.dev/not-precedent: DeleteNexusEndpointRequest RPC doesn't follow Google API format. --) +// +// (-- api-linter: core::0135::request-name-reference=disabled +// +// aip.dev/not-precedent: DeleteNexusEndpointRequest RPC doesn't follow Google API format. --) +type DeleteNexusEndpointRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // ID of the endpoint to delete. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ReplicateTaskQueueUserDataRequest) Reset() { - *x = ReplicateTaskQueueUserDataRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeleteNexusEndpointRequest) Reset() { + *x = DeleteNexusEndpointRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[65] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ReplicateTaskQueueUserDataRequest) String() string { +func (x *DeleteNexusEndpointRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplicateTaskQueueUserDataRequest) ProtoMessage() {} +func (*DeleteNexusEndpointRequest) ProtoMessage() {} -func (x *ReplicateTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeleteNexusEndpointRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[65] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2333,56 +4594,40 @@ func (x *ReplicateTaskQueueUserDataRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use ReplicateTaskQueueUserDataRequest.ProtoReflect.Descriptor instead. -func (*ReplicateTaskQueueUserDataRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{32} -} - -func (x *ReplicateTaskQueueUserDataRequest) GetNamespaceId() string { - if x != nil { - return x.NamespaceId - } - return "" +// Deprecated: Use DeleteNexusEndpointRequest.ProtoReflect.Descriptor instead. +func (*DeleteNexusEndpointRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{65} } -func (x *ReplicateTaskQueueUserDataRequest) GetTaskQueue() string { +func (x *DeleteNexusEndpointRequest) GetId() string { if x != nil { - return x.TaskQueue + return x.Id } return "" } -func (x *ReplicateTaskQueueUserDataRequest) GetUserData() *v111.TaskQueueUserData { - if x != nil { - return x.UserData - } - return nil -} - -type ReplicateTaskQueueUserDataResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DeleteNexusEndpointResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ReplicateTaskQueueUserDataResponse) Reset() { - *x = ReplicateTaskQueueUserDataResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DeleteNexusEndpointResponse) Reset() { + *x = DeleteNexusEndpointResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ReplicateTaskQueueUserDataResponse) String() string { +func (x *DeleteNexusEndpointResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplicateTaskQueueUserDataResponse) ProtoMessage() {} +func (*DeleteNexusEndpointResponse) ProtoMessage() {} -func (x *ReplicateTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DeleteNexusEndpointResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[66] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2392,42 +4637,48 @@ func (x *ReplicateTaskQueueUserDataResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use ReplicateTaskQueueUserDataResponse.ProtoReflect.Descriptor instead. -func (*ReplicateTaskQueueUserDataResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{33} +// Deprecated: Use DeleteNexusEndpointResponse.ProtoReflect.Descriptor instead. +func (*DeleteNexusEndpointResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{66} } -type DispatchNexusTaskRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ListNexusEndpointsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // To get the next page, pass in `ListNexusEndpointsResponse.next_page_token` from the previous page's response. The + // token will be empty if there's no other page. + // Note: the last page may be empty if the total number of services registered is a multiple of the page size. + // Mutually exclusive with wait. Specifying both will result in an invalid argument error. + NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The nexus_endpoints table has a monotonically increasing version number that is incremented on every change to + // the table. This field can be used to provide the last known table version in conjuction with the `wait` field to + // long poll on changes to the table. + // If next_page_token is not empty and the current table version does not match this field, this request will fail + // with a failed precondition error. + LastKnownTableVersion int64 `protobuf:"varint,3,opt,name=last_known_table_version,json=lastKnownTableVersion,proto3" json:"last_known_table_version,omitempty"` + // If true, this request becomes a long poll and will be unblocked once the DB version is incremented. + // Mutually exclusive with next_page_token. Specifying both will result in an invalid argument error. + Wait bool `protobuf:"varint,4,opt,name=wait,proto3" json:"wait,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - // Nexus request extracted by the frontend and translated into Temporal API format. - Request *v112.Request `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` - // Non-empty if this task was forwarded from a child partition. - ForwardedSource string `protobuf:"bytes,4,opt,name=forwarded_source,json=forwardedSource,proto3" json:"forwarded_source,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *DispatchNexusTaskRequest) Reset() { - *x = DispatchNexusTaskRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ListNexusEndpointsRequest) Reset() { + *x = ListNexusEndpointsRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DispatchNexusTaskRequest) String() string { +func (x *ListNexusEndpointsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DispatchNexusTaskRequest) ProtoMessage() {} +func (*ListNexusEndpointsRequest) ProtoMessage() {} -func (x *DispatchNexusTaskRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ListNexusEndpointsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[67] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2437,69 +4688,65 @@ func (x *DispatchNexusTaskRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DispatchNexusTaskRequest.ProtoReflect.Descriptor instead. -func (*DispatchNexusTaskRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{34} +// Deprecated: Use ListNexusEndpointsRequest.ProtoReflect.Descriptor instead. +func (*ListNexusEndpointsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{67} } -func (x *DispatchNexusTaskRequest) GetNamespaceId() string { +func (x *ListNexusEndpointsRequest) GetNextPageToken() []byte { if x != nil { - return x.NamespaceId + return x.NextPageToken } - return "" + return nil } -func (x *DispatchNexusTaskRequest) GetTaskQueue() *v14.TaskQueue { +func (x *ListNexusEndpointsRequest) GetPageSize() int32 { if x != nil { - return x.TaskQueue + return x.PageSize } - return nil + return 0 } -func (x *DispatchNexusTaskRequest) GetRequest() *v112.Request { +func (x *ListNexusEndpointsRequest) GetLastKnownTableVersion() int64 { if x != nil { - return x.Request + return x.LastKnownTableVersion } - return nil + return 0 } -func (x *DispatchNexusTaskRequest) GetForwardedSource() string { +func (x *ListNexusEndpointsRequest) GetWait() bool { if x != nil { - return x.ForwardedSource + return x.Wait } - return "" -} - -type DispatchNexusTaskResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields + return false +} - // Types that are assignable to Outcome: - // - // *DispatchNexusTaskResponse_HandlerError - // *DispatchNexusTaskResponse_Response - Outcome isDispatchNexusTaskResponse_Outcome `protobuf_oneof:"outcome"` +type ListNexusEndpointsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Token for getting the next page. + NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + TableVersion int64 `protobuf:"varint,2,opt,name=table_version,json=tableVersion,proto3" json:"table_version,omitempty"` + Entries []*v111.NexusEndpointEntry `protobuf:"bytes,3,rep,name=entries,proto3" json:"entries,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *DispatchNexusTaskResponse) Reset() { - *x = DispatchNexusTaskResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ListNexusEndpointsResponse) Reset() { + *x = ListNexusEndpointsResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[68] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DispatchNexusTaskResponse) String() string { +func (x *ListNexusEndpointsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DispatchNexusTaskResponse) ProtoMessage() {} +func (*ListNexusEndpointsResponse) ProtoMessage() {} -func (x *DispatchNexusTaskResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ListNexusEndpointsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[68] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2509,82 +4756,56 @@ func (x *DispatchNexusTaskResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DispatchNexusTaskResponse.ProtoReflect.Descriptor instead. -func (*DispatchNexusTaskResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{35} +// Deprecated: Use ListNexusEndpointsResponse.ProtoReflect.Descriptor instead. +func (*ListNexusEndpointsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{68} } -func (m *DispatchNexusTaskResponse) GetOutcome() isDispatchNexusTaskResponse_Outcome { - if m != nil { - return m.Outcome +func (x *ListNexusEndpointsResponse) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken } return nil } -func (x *DispatchNexusTaskResponse) GetHandlerError() *v112.HandlerError { - if x, ok := x.GetOutcome().(*DispatchNexusTaskResponse_HandlerError); ok { - return x.HandlerError +func (x *ListNexusEndpointsResponse) GetTableVersion() int64 { + if x != nil { + return x.TableVersion } - return nil + return 0 } -func (x *DispatchNexusTaskResponse) GetResponse() *v112.Response { - if x, ok := x.GetOutcome().(*DispatchNexusTaskResponse_Response); ok { - return x.Response +func (x *ListNexusEndpointsResponse) GetEntries() []*v111.NexusEndpointEntry { + if x != nil { + return x.Entries } return nil } -type isDispatchNexusTaskResponse_Outcome interface { - isDispatchNexusTaskResponse_Outcome() -} - -type DispatchNexusTaskResponse_HandlerError struct { - // Set if the worker's handler failed the nexus task. - HandlerError *v112.HandlerError `protobuf:"bytes,1,opt,name=handler_error,json=handlerError,proto3,oneof"` -} - -type DispatchNexusTaskResponse_Response struct { - // Set if the worker's handler responded successfully to the nexus task. - Response *v112.Response `protobuf:"bytes,2,opt,name=response,proto3,oneof"` -} - -func (*DispatchNexusTaskResponse_HandlerError) isDispatchNexusTaskResponse_Outcome() {} - -func (*DispatchNexusTaskResponse_Response) isDispatchNexusTaskResponse_Outcome() {} - -type PollNexusTaskQueueRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - // A unique ID generated by the frontend for this request. - PollerId string `protobuf:"bytes,2,opt,name=poller_id,json=pollerId,proto3" json:"poller_id,omitempty"` - // Original WorkflowService poll request as received by the frontend. - Request *v1.PollNexusTaskQueueRequest `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` - // Non-empty if this poll was forwarded from a child partition. - ForwardedSource string `protobuf:"bytes,4,opt,name=forwarded_source,json=forwardedSource,proto3" json:"forwarded_source,omitempty"` +type RecordWorkerHeartbeatRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + HeartbeartRequest *v1.RecordWorkerHeartbeatRequest `protobuf:"bytes,2,opt,name=heartbeart_request,json=heartbeartRequest,proto3" json:"heartbeart_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *PollNexusTaskQueueRequest) Reset() { - *x = PollNexusTaskQueueRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RecordWorkerHeartbeatRequest) Reset() { + *x = RecordWorkerHeartbeatRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[69] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *PollNexusTaskQueueRequest) String() string { +func (x *RecordWorkerHeartbeatRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PollNexusTaskQueueRequest) ProtoMessage() {} +func (*RecordWorkerHeartbeatRequest) ProtoMessage() {} -func (x *PollNexusTaskQueueRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RecordWorkerHeartbeatRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[69] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2594,66 +4815,47 @@ func (x *PollNexusTaskQueueRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PollNexusTaskQueueRequest.ProtoReflect.Descriptor instead. -func (*PollNexusTaskQueueRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{36} +// Deprecated: Use RecordWorkerHeartbeatRequest.ProtoReflect.Descriptor instead. +func (*RecordWorkerHeartbeatRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{69} } -func (x *PollNexusTaskQueueRequest) GetNamespaceId() string { +func (x *RecordWorkerHeartbeatRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *PollNexusTaskQueueRequest) GetPollerId() string { - if x != nil { - return x.PollerId - } - return "" -} - -func (x *PollNexusTaskQueueRequest) GetRequest() *v1.PollNexusTaskQueueRequest { +func (x *RecordWorkerHeartbeatRequest) GetHeartbeartRequest() *v1.RecordWorkerHeartbeatRequest { if x != nil { - return x.Request + return x.HeartbeartRequest } return nil } -func (x *PollNexusTaskQueueRequest) GetForwardedSource() string { - if x != nil { - return x.ForwardedSource - } - return "" -} - -type PollNexusTaskQueueResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RecordWorkerHeartbeatResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields - - // Response that should be delivered to the worker containing a request from DispatchNexusTaskRequest. - Response *v1.PollNexusTaskQueueResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *PollNexusTaskQueueResponse) Reset() { - *x = PollNexusTaskQueueResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RecordWorkerHeartbeatResponse) Reset() { + *x = RecordWorkerHeartbeatResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[70] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *PollNexusTaskQueueResponse) String() string { +func (x *RecordWorkerHeartbeatResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PollNexusTaskQueueResponse) ProtoMessage() {} +func (*RecordWorkerHeartbeatResponse) ProtoMessage() {} -func (x *PollNexusTaskQueueResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { +func (x *RecordWorkerHeartbeatResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[70] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2663,49 +4865,35 @@ func (x *PollNexusTaskQueueResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PollNexusTaskQueueResponse.ProtoReflect.Descriptor instead. -func (*PollNexusTaskQueueResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{37} -} - -func (x *PollNexusTaskQueueResponse) GetResponse() *v1.PollNexusTaskQueueResponse { - if x != nil { - return x.Response - } - return nil +// Deprecated: Use RecordWorkerHeartbeatResponse.ProtoReflect.Descriptor instead. +func (*RecordWorkerHeartbeatResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{70} } -type RespondNexusTaskCompletedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ListWorkersRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + ListRequest *v1.ListWorkersRequest `protobuf:"bytes,2,opt,name=list_request,json=listRequest,proto3" json:"list_request,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - // A unique ID for this task generated by the matching engine. Decoded from the incoming request's task token. - TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // Original completion as received by the frontend. - Request *v1.RespondNexusTaskCompletedRequest `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *RespondNexusTaskCompletedRequest) Reset() { - *x = RespondNexusTaskCompletedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ListWorkersRequest) Reset() { + *x = ListWorkersRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondNexusTaskCompletedRequest) String() string { +func (x *ListWorkersRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondNexusTaskCompletedRequest) ProtoMessage() {} +func (*ListWorkersRequest) ProtoMessage() {} -func (x *RespondNexusTaskCompletedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ListWorkersRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[71] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2715,63 +4903,54 @@ func (x *RespondNexusTaskCompletedRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RespondNexusTaskCompletedRequest.ProtoReflect.Descriptor instead. -func (*RespondNexusTaskCompletedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{38} +// Deprecated: Use ListWorkersRequest.ProtoReflect.Descriptor instead. +func (*ListWorkersRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{71} } -func (x *RespondNexusTaskCompletedRequest) GetNamespaceId() string { +func (x *ListWorkersRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RespondNexusTaskCompletedRequest) GetTaskQueue() *v14.TaskQueue { - if x != nil { - return x.TaskQueue - } - return nil -} - -func (x *RespondNexusTaskCompletedRequest) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *RespondNexusTaskCompletedRequest) GetRequest() *v1.RespondNexusTaskCompletedRequest { +func (x *ListWorkersRequest) GetListRequest() *v1.ListWorkersRequest { if x != nil { - return x.Request + return x.ListRequest } return nil } -type RespondNexusTaskCompletedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type ListWorkersResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Deprecated: Use workers instead. This field returns full WorkerInfo which + // includes expensive runtime metrics. We will stop populating this field in the future. + // + // Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. + WorkersInfo []*v115.WorkerInfo `protobuf:"bytes,1,rep,name=workers_info,json=workersInfo,proto3" json:"workers_info,omitempty"` + NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + Workers []*v115.WorkerListInfo `protobuf:"bytes,3,rep,name=workers,proto3" json:"workers,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RespondNexusTaskCompletedResponse) Reset() { - *x = RespondNexusTaskCompletedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ListWorkersResponse) Reset() { + *x = ListWorkersResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondNexusTaskCompletedResponse) String() string { +func (x *ListWorkersResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondNexusTaskCompletedResponse) ProtoMessage() {} +func (*ListWorkersResponse) ProtoMessage() {} -func (x *RespondNexusTaskCompletedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ListWorkersResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[72] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2781,42 +4960,68 @@ func (x *RespondNexusTaskCompletedResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use RespondNexusTaskCompletedResponse.ProtoReflect.Descriptor instead. -func (*RespondNexusTaskCompletedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{39} +// Deprecated: Use ListWorkersResponse.ProtoReflect.Descriptor instead. +func (*ListWorkersResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{72} } -type RespondNexusTaskFailedRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +// Deprecated: Marked as deprecated in temporal/server/api/matchingservice/v1/request_response.proto. +func (x *ListWorkersResponse) GetWorkersInfo() []*v115.WorkerInfo { + if x != nil { + return x.WorkersInfo + } + return nil +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue *v14.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - // A unique ID for this task generated by the matching engine. Decoded from the incoming request's task token. - TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - // Original failure as received by the frontend. - Request *v1.RespondNexusTaskFailedRequest `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"` +func (x *ListWorkersResponse) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken + } + return nil } -func (x *RespondNexusTaskFailedRequest) Reset() { - *x = RespondNexusTaskFailedRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *ListWorkersResponse) GetWorkers() []*v115.WorkerListInfo { + if x != nil { + return x.Workers } + return nil } -func (x *RespondNexusTaskFailedRequest) String() string { +// (-- api-linter: core::0134::request-resource-required=disabled +// +// aip.dev/not-precedent: UpdateTaskQueueConfigRequest RPC doesn't follow Google API format. --) +// +// (-- api-linter: core::0134::request-mask-required=disabled +// +// aip.dev/not-precedent: UpdateTaskQueueConfigRequest RPC doesn't follow Google API format. --) +// +// (-- api-linter: core::0134::method-signature=disabled +// +// aip.dev/not-precedent: UpdateTaskQueueConfigRequest RPC doesn't follow Google API format. --) +type UpdateTaskQueueConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + UpdateTaskqueueConfig *v1.UpdateTaskQueueConfigRequest `protobuf:"bytes,3,opt,name=update_taskqueue_config,json=updateTaskqueueConfig,proto3" json:"update_taskqueue_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateTaskQueueConfigRequest) Reset() { + *x = UpdateTaskQueueConfigRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[73] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateTaskQueueConfigRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondNexusTaskFailedRequest) ProtoMessage() {} +func (*UpdateTaskQueueConfigRequest) ProtoMessage() {} -func (x *RespondNexusTaskFailedRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateTaskQueueConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[73] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2826,63 +5031,48 @@ func (x *RespondNexusTaskFailedRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RespondNexusTaskFailedRequest.ProtoReflect.Descriptor instead. -func (*RespondNexusTaskFailedRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{40} +// Deprecated: Use UpdateTaskQueueConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateTaskQueueConfigRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{73} } -func (x *RespondNexusTaskFailedRequest) GetNamespaceId() string { +func (x *UpdateTaskQueueConfigRequest) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *RespondNexusTaskFailedRequest) GetTaskQueue() *v14.TaskQueue { - if x != nil { - return x.TaskQueue - } - return nil -} - -func (x *RespondNexusTaskFailedRequest) GetTaskId() string { - if x != nil { - return x.TaskId - } - return "" -} - -func (x *RespondNexusTaskFailedRequest) GetRequest() *v1.RespondNexusTaskFailedRequest { +func (x *UpdateTaskQueueConfigRequest) GetUpdateTaskqueueConfig() *v1.UpdateTaskQueueConfigRequest { if x != nil { - return x.Request + return x.UpdateTaskqueueConfig } return nil } -type RespondNexusTaskFailedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type UpdateTaskQueueConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + UpdatedTaskqueueConfig *v14.TaskQueueConfig `protobuf:"bytes,1,opt,name=updated_taskqueue_config,json=updatedTaskqueueConfig,proto3" json:"updated_taskqueue_config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *RespondNexusTaskFailedResponse) Reset() { - *x = RespondNexusTaskFailedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateTaskQueueConfigResponse) Reset() { + *x = UpdateTaskQueueConfigResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *RespondNexusTaskFailedResponse) String() string { +func (x *UpdateTaskQueueConfigResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RespondNexusTaskFailedResponse) ProtoMessage() {} +func (*UpdateTaskQueueConfigResponse) ProtoMessage() {} -func (x *RespondNexusTaskFailedResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateTaskQueueConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[74] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2892,48 +5082,42 @@ func (x *RespondNexusTaskFailedResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RespondNexusTaskFailedResponse.ProtoReflect.Descriptor instead. -func (*RespondNexusTaskFailedResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{41} +// Deprecated: Use UpdateTaskQueueConfigResponse.ProtoReflect.Descriptor instead. +func (*UpdateTaskQueueConfigResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{74} } -// (-- api-linter: core::0133::request-unknown-fields=disabled -// -// aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) -// -// (-- api-linter: core::0133::request-resource-field=disabled -// -// aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) -// -// (-- api-linter: core::0133::request-parent-required=disabled -// -// aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) -type CreateNexusIncomingServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *UpdateTaskQueueConfigResponse) GetUpdatedTaskqueueConfig() *v14.TaskQueueConfig { + if x != nil { + return x.UpdatedTaskqueueConfig + } + return nil +} - Spec *v112.IncomingServiceSpec `protobuf:"bytes,1,opt,name=spec,proto3" json:"spec,omitempty"` +type DescribeWorkerRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Request *v1.DescribeWorkerRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *CreateNexusIncomingServiceRequest) Reset() { - *x = CreateNexusIncomingServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeWorkerRequest) Reset() { + *x = DescribeWorkerRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CreateNexusIncomingServiceRequest) String() string { +func (x *DescribeWorkerRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateNexusIncomingServiceRequest) ProtoMessage() {} +func (*DescribeWorkerRequest) ProtoMessage() {} -func (x *CreateNexusIncomingServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeWorkerRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[75] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2943,44 +5127,48 @@ func (x *CreateNexusIncomingServiceRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use CreateNexusIncomingServiceRequest.ProtoReflect.Descriptor instead. -func (*CreateNexusIncomingServiceRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{42} +// Deprecated: Use DescribeWorkerRequest.ProtoReflect.Descriptor instead. +func (*DescribeWorkerRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{75} } -func (x *CreateNexusIncomingServiceRequest) GetSpec() *v112.IncomingServiceSpec { +func (x *DescribeWorkerRequest) GetNamespaceId() string { if x != nil { - return x.Spec + return x.NamespaceId + } + return "" +} + +func (x *DescribeWorkerRequest) GetRequest() *v1.DescribeWorkerRequest { + if x != nil { + return x.Request } return nil } -type CreateNexusIncomingServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type DescribeWorkerResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + WorkerInfo *v115.WorkerInfo `protobuf:"bytes,1,opt,name=worker_info,json=workerInfo,proto3" json:"worker_info,omitempty"` unknownFields protoimpl.UnknownFields - - Service *v112.IncomingService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *CreateNexusIncomingServiceResponse) Reset() { - *x = CreateNexusIncomingServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeWorkerResponse) Reset() { + *x = DescribeWorkerResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[76] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CreateNexusIncomingServiceResponse) String() string { +func (x *DescribeWorkerResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateNexusIncomingServiceResponse) ProtoMessage() {} +func (*DescribeWorkerResponse) ProtoMessage() {} -func (x *CreateNexusIncomingServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeWorkerResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[76] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2990,56 +5178,55 @@ func (x *CreateNexusIncomingServiceResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use CreateNexusIncomingServiceResponse.ProtoReflect.Descriptor instead. -func (*CreateNexusIncomingServiceResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{43} +// Deprecated: Use DescribeWorkerResponse.ProtoReflect.Descriptor instead. +func (*DescribeWorkerResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{76} } -func (x *CreateNexusIncomingServiceResponse) GetService() *v112.IncomingService { +func (x *DescribeWorkerResponse) GetWorkerInfo() *v115.WorkerInfo { if x != nil { - return x.Service + return x.WorkerInfo } return nil } // (-- api-linter: core::0134::request-resource-required=disabled // -// aip.dev/not-precedent: UpdateNexusIncomingService RPC doesn't follow Google API format. --) +// aip.dev/not-precedent: UpdateFairnessStateRequest RPC doesn't follow Google API format. --) // // (-- api-linter: core::0134::request-mask-required=disabled // -// aip.dev/not-precedent: UpdateNexusIncomingService RPC doesn't follow Google API format. --) -type UpdateNexusIncomingServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// aip.dev/not-precedent: UpdateFairnessStateRequest RPC doesn't follow Google API format. --) +// +// (-- api-linter: core::0134::method-signature=disabled +// +// aip.dev/not-precedent: UpdateFairnessStateRequest RPC doesn't follow Google API format. --) +type UpdateFairnessStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskQueueType v19.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + FairnessState v116.FairnessState `protobuf:"varint,4,opt,name=fairness_state,json=fairnessState,proto3,enum=temporal.server.api.enums.v1.FairnessState" json:"fairness_state,omitempty"` unknownFields protoimpl.UnknownFields - - // ID of the service to update. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Version of the service, used for optimistic concurrency. Must match current version in persistence or the request - // will fail a FAILED_PRECONDITION error. - Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` - Spec *v112.IncomingServiceSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *UpdateNexusIncomingServiceRequest) Reset() { - *x = UpdateNexusIncomingServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateFairnessStateRequest) Reset() { + *x = UpdateFairnessStateRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[77] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateNexusIncomingServiceRequest) String() string { +func (x *UpdateFairnessStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateNexusIncomingServiceRequest) ProtoMessage() {} +func (*UpdateFairnessStateRequest) ProtoMessage() {} -func (x *UpdateNexusIncomingServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateFairnessStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[77] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3049,58 +5236,61 @@ func (x *UpdateNexusIncomingServiceRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use UpdateNexusIncomingServiceRequest.ProtoReflect.Descriptor instead. -func (*UpdateNexusIncomingServiceRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{44} +// Deprecated: Use UpdateFairnessStateRequest.ProtoReflect.Descriptor instead. +func (*UpdateFairnessStateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{77} } -func (x *UpdateNexusIncomingServiceRequest) GetId() string { +func (x *UpdateFairnessStateRequest) GetNamespaceId() string { if x != nil { - return x.Id + return x.NamespaceId } return "" } -func (x *UpdateNexusIncomingServiceRequest) GetVersion() int64 { +func (x *UpdateFairnessStateRequest) GetTaskQueue() string { if x != nil { - return x.Version + return x.TaskQueue } - return 0 + return "" } -func (x *UpdateNexusIncomingServiceRequest) GetSpec() *v112.IncomingServiceSpec { +func (x *UpdateFairnessStateRequest) GetTaskQueueType() v19.TaskQueueType { if x != nil { - return x.Spec + return x.TaskQueueType } - return nil + return v19.TaskQueueType(0) } -type UpdateNexusIncomingServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *UpdateFairnessStateRequest) GetFairnessState() v116.FairnessState { + if x != nil { + return x.FairnessState + } + return v116.FairnessState(0) +} - Service *v112.IncomingService `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +type UpdateFairnessStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *UpdateNexusIncomingServiceResponse) Reset() { - *x = UpdateNexusIncomingServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *UpdateFairnessStateResponse) Reset() { + *x = UpdateFairnessStateResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[78] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *UpdateNexusIncomingServiceResponse) String() string { +func (x *UpdateFairnessStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateNexusIncomingServiceResponse) ProtoMessage() {} +func (*UpdateFairnessStateResponse) ProtoMessage() {} -func (x *UpdateNexusIncomingServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { +func (x *UpdateFairnessStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[78] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3110,52 +5300,37 @@ func (x *UpdateNexusIncomingServiceResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use UpdateNexusIncomingServiceResponse.ProtoReflect.Descriptor instead. -func (*UpdateNexusIncomingServiceResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{45} -} - -func (x *UpdateNexusIncomingServiceResponse) GetService() *v112.IncomingService { - if x != nil { - return x.Service - } - return nil +// Deprecated: Use UpdateFairnessStateResponse.ProtoReflect.Descriptor instead. +func (*UpdateFairnessStateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{78} } -// (-- api-linter: core::0135::request-name-behavior=disabled -// -// aip.dev/not-precedent: DeleteNexusIncomingServiceRequest RPC doesn't follow Google API format. --) -// -// (-- api-linter: core::0135::request-name-reference=disabled -// -// aip.dev/not-precedent: DeleteNexusIncomingServiceRequest RPC doesn't follow Google API format. --) -type DeleteNexusIncomingServiceRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type CheckTaskQueueVersionMembershipRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskQueueType v19.TaskQueueType `protobuf:"varint,3,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + Version *v110.WorkerDeploymentVersion `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields - - // ID of the service to delete. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *DeleteNexusIncomingServiceRequest) Reset() { - *x = DeleteNexusIncomingServiceRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CheckTaskQueueVersionMembershipRequest) Reset() { + *x = CheckTaskQueueVersionMembershipRequest{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[79] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *DeleteNexusIncomingServiceRequest) String() string { +func (x *CheckTaskQueueVersionMembershipRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteNexusIncomingServiceRequest) ProtoMessage() {} +func (*CheckTaskQueueVersionMembershipRequest) ProtoMessage() {} -func (x *DeleteNexusIncomingServiceRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CheckTaskQueueVersionMembershipRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[79] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3165,42 +5340,77 @@ func (x *DeleteNexusIncomingServiceRequest) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use DeleteNexusIncomingServiceRequest.ProtoReflect.Descriptor instead. -func (*DeleteNexusIncomingServiceRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{46} +// Deprecated: Use CheckTaskQueueVersionMembershipRequest.ProtoReflect.Descriptor instead. +func (*CheckTaskQueueVersionMembershipRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{79} } -func (x *DeleteNexusIncomingServiceRequest) GetId() string { +func (x *CheckTaskQueueVersionMembershipRequest) GetNamespaceId() string { if x != nil { - return x.Id + return x.NamespaceId } return "" } -type DeleteNexusIncomingServiceResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *CheckTaskQueueVersionMembershipRequest) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" } -func (x *DeleteNexusIncomingServiceResponse) Reset() { - *x = DeleteNexusIncomingServiceResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *CheckTaskQueueVersionMembershipRequest) GetTaskQueueType() v19.TaskQueueType { + if x != nil { + return x.TaskQueueType + } + return v19.TaskQueueType(0) +} + +func (x *CheckTaskQueueVersionMembershipRequest) GetVersion() *v110.WorkerDeploymentVersion { + if x != nil { + return x.Version } + return nil } -func (x *DeleteNexusIncomingServiceResponse) String() string { +type CheckTaskQueueVersionMembershipResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + IsMember bool `protobuf:"varint,1,opt,name=is_member,json=isMember,proto3" json:"is_member,omitempty"` + // True when a reactivation signal to this version would be redundant — i.e., matching + // determined the version is already in a state where it does not need to be reactivated + // (today: CURRENT, RAMPING, or DRAINING). History uses this to suppress such signals. + // The zero value (false) is the safe default; it applies when matching has no definitive + // answer (version not present in matching's deployment data, or old matching servers + // that do not set this field) and tells history to send the signal. + ShouldSkipReactivation bool `protobuf:"varint,2,opt,name=should_skip_reactivation,json=shouldSkipReactivation,proto3" json:"should_skip_reactivation,omitempty"` + // revision_number is the version's current revision as tracked in matching's per-TQ + // deployment data. It is returned so history can compose a stable, cluster-wide-deterministic + // RequestId on the reactivation signal. All history pods querying the same version at the + // same point in time converge on the same revision_number, so Temporal's built-in + // SignalRequestedIds dedup (see signalworkflow/api.go) collapses the N-pod signal fan-out + // into exactly one event on the version workflow. Zero when unknown (old matching server or + // legacy DeploymentVersionData format that does not carry revision_number). + RevisionNumber int64 `protobuf:"varint,3,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CheckTaskQueueVersionMembershipResponse) Reset() { + *x = CheckTaskQueueVersionMembershipResponse{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[80] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CheckTaskQueueVersionMembershipResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeleteNexusIncomingServiceResponse) ProtoMessage() {} +func (*CheckTaskQueueVersionMembershipResponse) ProtoMessage() {} -func (x *DeleteNexusIncomingServiceResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CheckTaskQueueVersionMembershipResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[80] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3210,51 +5420,63 @@ func (x *DeleteNexusIncomingServiceResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use DeleteNexusIncomingServiceResponse.ProtoReflect.Descriptor instead. -func (*DeleteNexusIncomingServiceResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{47} +// Deprecated: Use CheckTaskQueueVersionMembershipResponse.ProtoReflect.Descriptor instead. +func (*CheckTaskQueueVersionMembershipResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{80} } -type ListNexusIncomingServicesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *CheckTaskQueueVersionMembershipResponse) GetIsMember() bool { + if x != nil { + return x.IsMember + } + return false +} - // To get the next page, pass in `ListServicesResponse.next_page_token` from the previous page's response. The token - // will be empty if there's no other page. - // Note: the last page may be empty if the total number of services registered is a multiple of the page size. - // Mutually exclusive with wait. Specifying both will result in an invalid argument error. - NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // The services table has a monotonically increasing version number that is incremented on every change to the - // table. This field can be used to provide the last known table version in conjuction with the `wait` field to long - // poll on changes to the table. - // If next_page_token is not empty and the current table version does not match this field, this request will fail - // with a failed precondition error. - LastKnownTableVersion int64 `protobuf:"varint,3,opt,name=last_known_table_version,json=lastKnownTableVersion,proto3" json:"last_known_table_version,omitempty"` - // If true, this request becomes a long poll and will be unblocked once the DB version is incremented. - // Mutually exclusive with next_page_token. Specifying both will result in an invalid argument error. - Wait bool `protobuf:"varint,4,opt,name=wait,proto3" json:"wait,omitempty"` +func (x *CheckTaskQueueVersionMembershipResponse) GetShouldSkipReactivation() bool { + if x != nil { + return x.ShouldSkipReactivation + } + return false } -func (x *ListNexusIncomingServicesRequest) Reset() { - *x = ListNexusIncomingServicesRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *CheckTaskQueueVersionMembershipResponse) GetRevisionNumber() int64 { + if x != nil { + return x.RevisionNumber } + return 0 +} + +// PollConditions are extra conditions to set on the poll. Only supported with new matcher. +type PollConditions struct { + state protoimpl.MessageState `protogen:"open.v1"` + // If set (non-zero), this poll will not match a task with lower priority than this value. + // Note that "min" priority is "max" numeric value, e.g. "min_priority: 3" means to match + // tasks with priority 1, 2, or 3. + MinPriority int32 `protobuf:"varint,1,opt,name=min_priority,json=minPriority,proto3" json:"min_priority,omitempty"` + // If true, don't block waiting for a task, just return a task immediately or an empty + // response. This is most useful combined with min_priority, to poll for task at a specific + // priority level on a partition that you think is there. + NoWait bool `protobuf:"varint,2,opt,name=no_wait,json=noWait,proto3" json:"no_wait,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ListNexusIncomingServicesRequest) String() string { +func (x *PollConditions) Reset() { + *x = PollConditions{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[81] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollConditions) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListNexusIncomingServicesRequest) ProtoMessage() {} +func (*PollConditions) ProtoMessage() {} -func (x *ListNexusIncomingServicesRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { +func (x *PollConditions) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[81] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3264,68 +5486,108 @@ func (x *ListNexusIncomingServicesRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ListNexusIncomingServicesRequest.ProtoReflect.Descriptor instead. -func (*ListNexusIncomingServicesRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{48} +// Deprecated: Use PollConditions.ProtoReflect.Descriptor instead. +func (*PollConditions) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{81} } -func (x *ListNexusIncomingServicesRequest) GetNextPageToken() []byte { +func (x *PollConditions) GetMinPriority() int32 { if x != nil { - return x.NextPageToken + return x.MinPriority } - return nil + return 0 } -func (x *ListNexusIncomingServicesRequest) GetPageSize() int32 { +func (x *PollConditions) GetNoWait() bool { if x != nil { - return x.PageSize + return x.NoWait } - return 0 + return false +} + +// (-- api-linter: core::0123::resource-annotation=disabled --) +type DescribeVersionedTaskQueuesRequest_VersionTaskQueue struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type v19.TaskQueueType `protobuf:"varint,2,opt,name=type,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeVersionedTaskQueuesRequest_VersionTaskQueue) Reset() { + *x = DescribeVersionedTaskQueuesRequest_VersionTaskQueue{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[84] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeVersionedTaskQueuesRequest_VersionTaskQueue) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *ListNexusIncomingServicesRequest) GetLastKnownTableVersion() int64 { +func (*DescribeVersionedTaskQueuesRequest_VersionTaskQueue) ProtoMessage() {} + +func (x *DescribeVersionedTaskQueuesRequest_VersionTaskQueue) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[84] if x != nil { - return x.LastKnownTableVersion + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeVersionedTaskQueuesRequest_VersionTaskQueue.ProtoReflect.Descriptor instead. +func (*DescribeVersionedTaskQueuesRequest_VersionTaskQueue) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{19, 0} } -func (x *ListNexusIncomingServicesRequest) GetWait() bool { +func (x *DescribeVersionedTaskQueuesRequest_VersionTaskQueue) GetName() string { if x != nil { - return x.Wait + return x.Name } - return false + return "" } -type ListNexusIncomingServicesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *DescribeVersionedTaskQueuesRequest_VersionTaskQueue) GetType() v19.TaskQueueType { + if x != nil { + return x.Type + } + return v19.TaskQueueType(0) +} - // Token for getting the next page. - NextPageToken []byte `protobuf:"bytes,1,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - TableVersion int64 `protobuf:"varint,2,opt,name=table_version,json=tableVersion,proto3" json:"table_version,omitempty"` - Services []*v112.IncomingService `protobuf:"bytes,3,rep,name=services,proto3" json:"services,omitempty"` +// (-- api-linter: core::0123::resource-annotation=disabled --) +type DescribeVersionedTaskQueuesResponse_VersionTaskQueue struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type v19.TaskQueueType `protobuf:"varint,2,opt,name=type,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"type,omitempty"` + Stats *v14.TaskQueueStats `protobuf:"bytes,3,opt,name=stats,proto3" json:"stats,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "by" is used to clarify the key. --) + StatsByPriorityKey map[int32]*v14.TaskQueueStats `protobuf:"bytes,4,rep,name=stats_by_priority_key,json=statsByPriorityKey,proto3" json:"stats_by_priority_key,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ListNexusIncomingServicesResponse) Reset() { - *x = ListNexusIncomingServicesResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *DescribeVersionedTaskQueuesResponse_VersionTaskQueue) Reset() { + *x = DescribeVersionedTaskQueuesResponse_VersionTaskQueue{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[85] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ListNexusIncomingServicesResponse) String() string { +func (x *DescribeVersionedTaskQueuesResponse_VersionTaskQueue) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ListNexusIncomingServicesResponse) ProtoMessage() {} +func (*DescribeVersionedTaskQueuesResponse_VersionTaskQueue) ProtoMessage() {} -func (x *ListNexusIncomingServicesResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { +func (x *DescribeVersionedTaskQueuesResponse_VersionTaskQueue) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[85] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3335,48 +5597,52 @@ func (x *ListNexusIncomingServicesResponse) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use ListNexusIncomingServicesResponse.ProtoReflect.Descriptor instead. -func (*ListNexusIncomingServicesResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{49} +// Deprecated: Use DescribeVersionedTaskQueuesResponse_VersionTaskQueue.ProtoReflect.Descriptor instead. +func (*DescribeVersionedTaskQueuesResponse_VersionTaskQueue) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{20, 0} } -func (x *ListNexusIncomingServicesResponse) GetNextPageToken() []byte { +func (x *DescribeVersionedTaskQueuesResponse_VersionTaskQueue) GetName() string { if x != nil { - return x.NextPageToken + return x.Name } - return nil + return "" } -func (x *ListNexusIncomingServicesResponse) GetTableVersion() int64 { +func (x *DescribeVersionedTaskQueuesResponse_VersionTaskQueue) GetType() v19.TaskQueueType { if x != nil { - return x.TableVersion + return x.Type } - return 0 + return v19.TaskQueueType(0) +} + +func (x *DescribeVersionedTaskQueuesResponse_VersionTaskQueue) GetStats() *v14.TaskQueueStats { + if x != nil { + return x.Stats + } + return nil } -func (x *ListNexusIncomingServicesResponse) GetServices() []*v112.IncomingService { +func (x *DescribeVersionedTaskQueuesResponse_VersionTaskQueue) GetStatsByPriorityKey() map[int32]*v14.TaskQueueStats { if x != nil { - return x.Services + return x.StatsByPriorityKey } return nil } // Apply request from public API. type UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Request *v1.UpdateWorkerBuildIdCompatibilityRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` unknownFields protoimpl.UnknownFields - - Request *v1.UpdateWorkerBuildIdCompatibilityRequest `protobuf:"bytes,1,opt,name=request,proto3" json:"request,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) Reset() { *x = UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[88] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) String() string { @@ -3386,8 +5652,8 @@ func (x *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) String() st func (*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) ProtoMessage() {} func (x *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[88] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3399,7 +5665,7 @@ func (x *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) ProtoReflec // Deprecated: Use UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest.ProtoReflect.Descriptor instead. func (*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{18, 0} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{25, 0} } func (x *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) GetRequest() *v1.UpdateWorkerBuildIdCompatibilityRequest { @@ -3411,23 +5677,20 @@ func (x *UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest) GetRequest( // Remove build ids (internal only) type UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The last known user data version, used to prevent concurrent updates. KnownUserDataVersion int64 `protobuf:"varint,1,opt,name=known_user_data_version,json=knownUserDataVersion,proto3" json:"known_user_data_version,omitempty"` // List of build ids to remove. - BuildIds []string `protobuf:"bytes,2,rep,name=build_ids,json=buildIds,proto3" json:"build_ids,omitempty"` + BuildIds []string `protobuf:"bytes,2,rep,name=build_ids,json=buildIds,proto3" json:"build_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) Reset() { *x = UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[89] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) String() string { @@ -3437,8 +5700,8 @@ func (x *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) String() string func (*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) ProtoMessage() {} func (x *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[89] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3450,7 +5713,7 @@ func (x *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) ProtoReflect() // Deprecated: Use UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds.ProtoReflect.Descriptor instead. func (*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) Descriptor() ([]byte, []int) { - return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{18, 1} + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{25, 1} } func (x *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) GetKnownUserDataVersion() int64 { @@ -3467,939 +5730,796 @@ func (x *UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds) GetBuildIds() [ return nil } -var File_temporal_server_api_matchingservice_v1_request_response_proto protoreflect.FileDescriptor +type DispatchNexusTaskResponse_Timeout struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DispatchNexusTaskResponse_Timeout) Reset() { + *x = DispatchNexusTaskResponse_Timeout{} + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[91] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DispatchNexusTaskResponse_Timeout) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DispatchNexusTaskResponse_Timeout) ProtoMessage() {} + +func (x *DispatchNexusTaskResponse_Timeout) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[91] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} -var file_temporal_server_api_matchingservice_v1_request_response_proto_rawDesc = []byte{ - 0x0a, 0x3d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, - 0x75, 0x65, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x61, 0x73, 0x6b, - 0x71, 0x75, 0x65, 0x75, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, - 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x34, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x23, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, - 0x65, 0x78, 0x75, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfb, 0x01, 0x0a, 0x1c, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x6c, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x6c, - 0x65, 0x72, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, 0x0a, 0x0c, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x70, 0x6f, 0x6c, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xf8, 0x0a, 0x0a, 0x1d, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4d, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x19, 0x70, - 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x70, 0x72, - 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x26, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x6c, 0x6f, 0x67, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x6c, 0x6f, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x48, 0x69, 0x6e, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x18, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x5f, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x3e, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x75, 0x0a, 0x17, - 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x69, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6b, 0x0a, - 0x1d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x72, 0x61, 0x6e, - 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x73, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, - 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x10, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x70, 0x0a, - 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x52, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x71, - 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, 0x07, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, - 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x07, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, - 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, - 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x1a, 0x68, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x0d, 0x10, 0x0e, 0x22, 0xfb, 0x01, 0x0a, 0x1c, 0x50, - 0x6f, 0x6c, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1f, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, - 0x0a, 0x0c, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, - 0x0b, 0x70, 0x6f, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x2d, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, - 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xe8, 0x08, 0x0a, 0x1d, - 0x50, 0x6f, 0x6c, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0a, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x09, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, 0x0a, - 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0d, 0x61, 0x63, 0x74, - 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3a, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x73, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, - 0x0e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x0d, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x58, 0x0a, 0x19, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, - 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x6f, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x52, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x6f, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, - 0x61, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x68, 0x65, 0x61, 0x72, 0x74, - 0x62, 0x65, 0x61, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, - 0x0a, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, - 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x63, 0x0a, 0x1e, 0x63, 0x75, - 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x5f, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x1b, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x51, 0x0a, 0x11, 0x68, 0x65, - 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x52, 0x10, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0d, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x31, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3a, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x22, 0x84, - 0x05, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, - 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, - 0x65, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, - 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x58, 0x0a, 0x19, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, - 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x2d, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x44, 0x0a, - 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x67, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x52, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x64, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8a, - 0x05, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, - 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, - 0x75, 0x65, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, - 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x58, 0x0a, 0x19, 0x73, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, - 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x44, 0x0a, - 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x67, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x52, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x19, 0x0a, 0x17, 0x41, 0x64, - 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xfe, 0x02, 0x0a, 0x14, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5e, 0x0a, 0x0d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x52, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x67, - 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, - 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x10, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x22, 0xb1, 0x01, 0x0a, 0x15, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, - 0x52, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4f, 0x0a, 0x0e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x0d, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa3, 0x02, - 0x0a, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, - 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x72, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, - 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, - 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x81, 0x02, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, - 0x65, 0x6c, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x50, 0x0a, 0x0f, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, - 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1f, 0x0a, 0x09, 0x70, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x70, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x1f, - 0x0a, 0x1d, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, - 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xa3, - 0x01, 0x0a, 0x18, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x60, - 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x61, 0x73, 0x6b, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0xbc, 0x01, 0x0a, 0x19, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x07, 0x70, 0x6f, 0x6c, 0x6c, - 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07, 0x70, - 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5a, 0x0a, 0x11, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x74, - 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x68, - 0x00, 0x22, 0xb2, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, - 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, - 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa1, 0x02, - 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x7e, 0x0a, 0x1e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x1b, 0x61, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x7e, 0x0a, 0x1e, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, - 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, - 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x52, 0x1b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x22, 0xd9, 0x05, 0x0a, 0x27, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x9a, 0x01, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x70, 0x75, 0x62, - 0x6c, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x62, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, - 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x70, - 0x70, 0x6c, 0x79, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, - 0x00, 0x52, 0x12, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x8e, 0x01, 0x0a, 0x10, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x5e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, - 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x73, 0x48, 0x00, 0x52, 0x0e, - 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x3d, 0x0a, 0x18, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x5f, 0x75, 0x6e, 0x6b, - 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x15, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x55, 0x6e, 0x6b, 0x6e, - 0x6f, 0x77, 0x6e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x7c, 0x0a, - 0x12, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x66, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x6c, 0x0a, 0x0e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x42, 0x75, - 0x69, 0x6c, 0x64, 0x49, 0x64, 0x73, 0x12, 0x39, 0x0a, 0x17, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x75, - 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x55, 0x73, 0x65, 0x72, - 0x44, 0x61, 0x74, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, - 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, 0x42, 0x0b, 0x0a, - 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2a, 0x0a, 0x28, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb2, 0x01, 0x0a, 0x24, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x63, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x8f, 0x01, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa5, 0x02, 0x0a, 0x1b, 0x47, - 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, - 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x50, 0x0a, 0x0f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x74, 0x61, 0x73, 0x6b, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x42, 0x0a, - 0x1c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, - 0x64, 0x61, 0x74, 0x61, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x18, 0x6c, 0x61, 0x73, 0x74, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x55, 0x73, 0x65, 0x72, 0x44, - 0x61, 0x74, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, - 0x0d, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0b, 0x77, 0x61, 0x69, 0x74, 0x4e, 0x65, 0x77, 0x44, 0x61, 0x74, 0x61, - 0x42, 0x02, 0x68, 0x00, 0x22, 0x85, 0x01, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x5f, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0xd1, 0x01, 0x0a, 0x2d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x56, 0x0a, 0x09, 0x75, 0x73, - 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, 0x22, 0x30, - 0x0a, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x69, 0x0a, - 0x21, 0x47, 0x65, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x49, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, - 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb9, 0x01, 0x0a, 0x1b, 0x46, - 0x6f, 0x72, 0x63, 0x65, 0x55, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, - 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x50, 0x0a, 0x0f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x74, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x41, 0x0a, - 0x1c, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x55, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0a, 0x77, - 0x61, 0x73, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x77, 0x61, 0x73, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa7, 0x02, 0x0a, - 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, - 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x5f, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, - 0x61, 0x74, 0x61, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x2a, 0x0a, 0x0f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x5f, 0x61, 0x64, - 0x64, 0x65, 0x64, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, - 0x64, 0x73, 0x41, 0x64, 0x64, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x73, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x21, 0x0a, 0x1f, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc5, 0x01, 0x0a, 0x21, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, - 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x56, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x44, - 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, 0x22, 0x24, 0x0a, 0x22, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, - 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xf7, 0x01, 0x0a, 0x18, - 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, - 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x3c, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, - 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb9, 0x01, 0x0a, 0x19, 0x44, 0x69, 0x73, - 0x70, 0x61, 0x74, 0x63, 0x68, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x0d, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, - 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, - 0x52, 0x0c, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x41, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x42, 0x09, 0x0a, 0x07, 0x6f, 0x75, 0x74, 0x63, 0x6f, 0x6d, 0x65, 0x22, 0xec, 0x01, 0x0a, 0x19, - 0x50, 0x6f, 0x6c, 0x6c, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, - 0x09, 0x70, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x58, 0x0a, 0x07, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x6c, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x64, 0x5f, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x65, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x79, - 0x0a, 0x1a, 0x50, 0x6f, 0x6c, 0x6c, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x08, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x6c, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x90, 0x02, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, - 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, - 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5f, 0x0a, 0x07, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x23, 0x0a, 0x21, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8a, 0x02, 0x0a, 0x1d, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x46, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0c, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x47, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x5c, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, - 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, - 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x20, 0x0a, 0x1e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x67, 0x0a, 0x21, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, - 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, - 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x65, 0x78, 0x75, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x6a, 0x0a, 0x22, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, - 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6f, 0x6d, - 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x99, 0x01, 0x0a, 0x21, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x42, 0x0a, 0x04, 0x73, 0x70, 0x65, - 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, - 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, - 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x42, 0x02, 0x68, 0x00, 0x22, 0x6a, 0x0a, 0x22, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, - 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x65, 0x78, - 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x22, 0x37, 0x0a, 0x21, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, - 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x24, 0x0a, 0x22, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xc4, 0x01, 0x0a, 0x20, 0x4c, - 0x69, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, - 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, - 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x6c, 0x61, 0x73, 0x74, 0x5f, - 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6c, 0x61, 0x73, 0x74, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x16, 0x0a, 0x04, 0x77, 0x61, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, - 0x77, 0x61, 0x69, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc0, 0x01, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x0f, - 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x27, 0x0a, 0x0d, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x46, 0x0a, 0x08, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x65, 0x78, - 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x42, 0x02, 0x68, - 0x00, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +// Deprecated: Use DispatchNexusTaskResponse_Timeout.ProtoReflect.Descriptor instead. +func (*DispatchNexusTaskResponse_Timeout) Descriptor() ([]byte, []int) { + return file_temporal_server_api_matchingservice_v1_request_response_proto_rawDescGZIP(), []int{54, 0} } +var File_temporal_server_api_matchingservice_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_api_matchingservice_v1_request_response_proto_rawDesc = "" + + "\n" + + "=temporal/server/api/matchingservice/v1/request_response.proto\x12&temporal.server.api.matchingservice.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a(temporal/api/deployment/v1/message.proto\x1a&temporal/api/enums/v1/task_queue.proto\x1a%temporal/api/failure/v1/message.proto\x1a%temporal/api/history/v1/message.proto\x1a#temporal/api/nexus/v1/message.proto\x1a&temporal/api/protocol/v1/message.proto\x1a#temporal/api/query/v1/message.proto\x1a'temporal/api/taskqueue/v1/message.proto\x1a$temporal/api/worker/v1/message.proto\x1a6temporal/api/workflowservice/v1/request_response.proto\x1a*temporal/server/api/clock/v1/message.proto\x1a/temporal/server/api/deployment/v1/message.proto\x1a1temporal/server/api/enums/v1/fairness_state.proto\x1a,temporal/server/api/history/v1/message.proto\x1a.temporal/server/api/persistence/v1/nexus.proto\x1a4temporal/server/api/persistence/v1/task_queues.proto\x1a.temporal/server/api/taskqueue/v1/message.proto\"\xc3\x02\n" + + "\x1cPollWorkflowTaskQueueRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1b\n" + + "\tpoller_id\x18\x02 \x01(\tR\bpollerId\x12`\n" + + "\fpoll_request\x18\x03 \x01(\v2=.temporal.api.workflowservice.v1.PollWorkflowTaskQueueRequestR\vpollRequest\x12)\n" + + "\x10forwarded_source\x18\x04 \x01(\tR\x0fforwardedSource\x12V\n" + + "\n" + + "conditions\x18\x05 \x01(\v26.temporal.server.api.matchingservice.v1.PollConditionsR\n" + + "conditions\"\xd1\v\n" + + "\x1dPollWorkflowTaskQueueResponse\x12\x1d\n" + + "\n" + + "task_token\x18\x01 \x01(\fR\ttaskToken\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12I\n" + + "\rworkflow_type\x18\x03 \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\fworkflowType\x129\n" + + "\x19previous_started_event_id\x18\x04 \x01(\x03R\x16previousStartedEventId\x12(\n" + + "\x10started_event_id\x18\x05 \x01(\x03R\x0estartedEventId\x12\x18\n" + + "\aattempt\x18\x06 \x01(\x05R\aattempt\x12\"\n" + + "\rnext_event_id\x18\a \x01(\x03R\vnextEventId\x12,\n" + + "\x12backlog_count_hint\x18\b \x01(\x03R\x10backlogCountHint\x128\n" + + "\x18sticky_execution_enabled\x18\t \x01(\bR\x16stickyExecutionEnabled\x12:\n" + + "\x05query\x18\n" + + " \x01(\v2$.temporal.api.query.v1.WorkflowQueryR\x05query\x12q\n" + + "\x17transient_workflow_task\x18\v \x01(\v29.temporal.server.api.history.v1.TransientWorkflowTaskInfoR\x15transientWorkflowTask\x12g\n" + + "\x1dworkflow_execution_task_queue\x18\f \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\x1aworkflowExecutionTaskQueue\x12!\n" + + "\fbranch_token\x18\x0e \x01(\fR\vbranchToken\x12A\n" + + "\x0escheduled_time\x18\x0f \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12=\n" + + "\fstarted_time\x18\x10 \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12l\n" + + "\aqueries\x18\x11 \x03(\v2R.temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.QueriesEntryR\aqueries\x12=\n" + + "\bmessages\x18\x12 \x03(\v2!.temporal.api.protocol.v1.MessageR\bmessages\x12:\n" + + "\ahistory\x18\x13 \x01(\v2 .temporal.api.history.v1.HistoryR\ahistory\x12&\n" + + "\x0fnext_page_token\x18\x14 \x01(\fR\rnextPageToken\x12h\n" + + "\x17poller_scaling_decision\x18\x15 \x01(\v20.temporal.api.taskqueue.v1.PollerScalingDecisionR\x15pollerScalingDecision\x12A\n" + + "\vraw_history\x18\x16 \x01(\v2 .temporal.api.history.v1.HistoryR\n" + + "rawHistory\x1a`\n" + + "\fQueriesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12:\n" + + "\x05value\x18\x02 \x01(\v2$.temporal.api.query.v1.WorkflowQueryR\x05value:\x028\x01J\x04\b\r\x10\x0e\"\xcb\v\n" + + "+PollWorkflowTaskQueueResponseWithRawHistory\x12\x1d\n" + + "\n" + + "task_token\x18\x01 \x01(\fR\ttaskToken\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12I\n" + + "\rworkflow_type\x18\x03 \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\fworkflowType\x129\n" + + "\x19previous_started_event_id\x18\x04 \x01(\x03R\x16previousStartedEventId\x12(\n" + + "\x10started_event_id\x18\x05 \x01(\x03R\x0estartedEventId\x12\x18\n" + + "\aattempt\x18\x06 \x01(\x05R\aattempt\x12\"\n" + + "\rnext_event_id\x18\a \x01(\x03R\vnextEventId\x12,\n" + + "\x12backlog_count_hint\x18\b \x01(\x03R\x10backlogCountHint\x128\n" + + "\x18sticky_execution_enabled\x18\t \x01(\bR\x16stickyExecutionEnabled\x12:\n" + + "\x05query\x18\n" + + " \x01(\v2$.temporal.api.query.v1.WorkflowQueryR\x05query\x12q\n" + + "\x17transient_workflow_task\x18\v \x01(\v29.temporal.server.api.history.v1.TransientWorkflowTaskInfoR\x15transientWorkflowTask\x12g\n" + + "\x1dworkflow_execution_task_queue\x18\f \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\x1aworkflowExecutionTaskQueue\x12!\n" + + "\fbranch_token\x18\x0e \x01(\fR\vbranchToken\x12A\n" + + "\x0escheduled_time\x18\x0f \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12=\n" + + "\fstarted_time\x18\x10 \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12z\n" + + "\aqueries\x18\x11 \x03(\v2`.temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.QueriesEntryR\aqueries\x12=\n" + + "\bmessages\x18\x12 \x03(\v2!.temporal.api.protocol.v1.MessageR\bmessages\x12:\n" + + "\ahistory\x18\x13 \x01(\v2 .temporal.api.history.v1.HistoryR\ahistory\x12&\n" + + "\x0fnext_page_token\x18\x14 \x01(\fR\rnextPageToken\x12h\n" + + "\x17poller_scaling_decision\x18\x15 \x01(\v20.temporal.api.taskqueue.v1.PollerScalingDecisionR\x15pollerScalingDecision\x12\x1f\n" + + "\vraw_history\x18\x16 \x03(\fR\n" + + "rawHistory\x1a`\n" + + "\fQueriesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12:\n" + + "\x05value\x18\x02 \x01(\v2$.temporal.api.query.v1.WorkflowQueryR\x05value:\x028\x01J\x04\b\r\x10\x0e\"\xc3\x02\n" + + "\x1cPollActivityTaskQueueRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1b\n" + + "\tpoller_id\x18\x02 \x01(\tR\bpollerId\x12`\n" + + "\fpoll_request\x18\x03 \x01(\v2=.temporal.api.workflowservice.v1.PollActivityTaskQueueRequestR\vpollRequest\x12)\n" + + "\x10forwarded_source\x18\x04 \x01(\tR\x0fforwardedSource\x12V\n" + + "\n" + + "conditions\x18\x05 \x01(\v26.temporal.server.api.matchingservice.v1.PollConditionsR\n" + + "conditions\"\xc0\n" + + "\n" + + "\x1dPollActivityTaskQueueResponse\x12\x1d\n" + + "\n" + + "task_token\x18\x01 \x01(\fR\ttaskToken\x12X\n" + + "\x12workflow_execution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\x11workflowExecution\x12\x1f\n" + + "\vactivity_id\x18\x03 \x01(\tR\n" + + "activityId\x12I\n" + + "\ractivity_type\x18\x04 \x01(\v2$.temporal.api.common.v1.ActivityTypeR\factivityType\x126\n" + + "\x05input\x18\x05 \x01(\v2 .temporal.api.common.v1.PayloadsR\x05input\x12A\n" + + "\x0escheduled_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12T\n" + + "\x19schedule_to_close_timeout\x18\a \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToCloseTimeout\x12=\n" + + "\fstarted_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12N\n" + + "\x16start_to_close_timeout\x18\t \x01(\v2\x19.google.protobuf.DurationR\x13startToCloseTimeout\x12F\n" + + "\x11heartbeat_timeout\x18\n" + + " \x01(\v2\x19.google.protobuf.DurationR\x10heartbeatTimeout\x12\x18\n" + + "\aattempt\x18\v \x01(\x05R\aattempt\x12_\n" + + "\x1ecurrent_attempt_scheduled_time\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\x1bcurrentAttemptScheduledTime\x12M\n" + + "\x11heartbeat_details\x18\r \x01(\v2 .temporal.api.common.v1.PayloadsR\x10heartbeatDetails\x12I\n" + + "\rworkflow_type\x18\x0e \x01(\v2$.temporal.api.common.v1.WorkflowTypeR\fworkflowType\x12-\n" + + "\x12workflow_namespace\x18\x0f \x01(\tR\x11workflowNamespace\x126\n" + + "\x06header\x18\x10 \x01(\v2\x1e.temporal.api.common.v1.HeaderR\x06header\x12h\n" + + "\x17poller_scaling_decision\x18\x11 \x01(\v20.temporal.api.taskqueue.v1.PollerScalingDecisionR\x15pollerScalingDecision\x12<\n" + + "\bpriority\x18\x12 \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12F\n" + + "\fretry_policy\x18\x13 \x01(\v2#.temporal.api.common.v1.RetryPolicyR\vretryPolicy\x12&\n" + + "\x0factivity_run_id\x18\x14 \x01(\tR\ractivityRunId\"\x9d\x05\n" + + "\x16AddWorkflowTaskRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12C\n" + + "\n" + + "task_queue\x18\x03 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12,\n" + + "\x12scheduled_event_id\x18\x04 \x01(\x03R\x10scheduledEventId\x12T\n" + + "\x19schedule_to_start_timeout\x18\x05 \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToStartTimeout\x12?\n" + + "\x05clock\x18\t \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12c\n" + + "\x11version_directive\x18\n" + + " \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12T\n" + + "\fforward_info\x18\v \x01(\v21.temporal.server.api.taskqueue.v1.TaskForwardInfoR\vforwardInfo\x12<\n" + + "\bpriority\x18\f \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12\x14\n" + + "\x05stamp\x18\r \x01(\x05R\x05stamp\"E\n" + + "\x17AddWorkflowTaskResponse\x12*\n" + + "\x11assigned_build_id\x18\x01 \x01(\tR\x0fassignedBuildId\"\xc8\x05\n" + + "\x16AddActivityTaskRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12G\n" + + "\texecution\x18\x02 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12C\n" + + "\n" + + "task_queue\x18\x04 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12,\n" + + "\x12scheduled_event_id\x18\x05 \x01(\x03R\x10scheduledEventId\x12T\n" + + "\x19schedule_to_start_timeout\x18\x06 \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToStartTimeout\x12?\n" + + "\x05clock\x18\t \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12c\n" + + "\x11version_directive\x18\n" + + " \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12T\n" + + "\fforward_info\x18\v \x01(\v21.temporal.server.api.taskqueue.v1.TaskForwardInfoR\vforwardInfo\x12\x14\n" + + "\x05stamp\x18\f \x01(\x05R\x05stamp\x12<\n" + + "\bpriority\x18\r \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12#\n" + + "\rcomponent_ref\x18\x0e \x01(\fR\fcomponentRefJ\x04\b\x03\x10\x04\"E\n" + + "\x17AddActivityTaskResponse\x12*\n" + + "\x11assigned_build_id\x18\x01 \x01(\tR\x0fassignedBuildId\"\xd3\x03\n" + + "\x14QueryWorkflowRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12Z\n" + + "\rquery_request\x18\x03 \x01(\v25.temporal.api.workflowservice.v1.QueryWorkflowRequestR\fqueryRequest\x12c\n" + + "\x11version_directive\x18\x05 \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12T\n" + + "\fforward_info\x18\x06 \x01(\v21.temporal.server.api.taskqueue.v1.TaskForwardInfoR\vforwardInfo\x12<\n" + + "\bpriority\x18\a \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\"\xa9\x01\n" + + "\x15QueryWorkflowResponse\x12C\n" + + "\fquery_result\x18\x01 \x01(\v2 .temporal.api.common.v1.PayloadsR\vqueryResult\x12K\n" + + "\x0equery_rejected\x18\x02 \x01(\v2$.temporal.api.query.v1.QueryRejectedR\rqueryRejected\"\x93\x02\n" + + " RespondQueryTaskCompletedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12\x17\n" + + "\atask_id\x18\x03 \x01(\tR\x06taskId\x12n\n" + + "\x11completed_request\x18\x04 \x01(\v2A.temporal.api.workflowservice.v1.RespondQueryTaskCompletedRequestR\x10completedRequest\"#\n" + + "!RespondQueryTaskCompletedResponse\"\xf1\x01\n" + + "\x1cCancelOutstandingPollRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12L\n" + + "\x0ftask_queue_type\x18\x02 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12C\n" + + "\n" + + "task_queue\x18\x03 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12\x1b\n" + + "\tpoller_id\x18\x04 \x01(\tR\bpollerId\"\x1f\n" + + "\x1dCancelOutstandingPollResponse\"\xb4\x02\n" + + "#CancelOutstandingWorkerPollsRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12L\n" + + "\x0ftask_queue_type\x18\x03 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12.\n" + + "\x13worker_instance_key\x18\x04 \x01(\tR\x11workerInstanceKey\x12'\n" + + "\x0fworker_identity\x18\x05 \x01(\tR\x0eworkerIdentity\"O\n" + + "$CancelOutstandingWorkerPollsResponse\x12'\n" + + "\x0fcancelled_count\x18\x01 \x01(\x05R\x0ecancelledCount\"\xf1\x01\n" + + "\x18DescribeTaskQueueRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\\\n" + + "\fdesc_request\x18\x02 \x01(\v29.temporal.api.workflowservice.v1.DescribeTaskQueueRequestR\vdescRequest\x12T\n" + + "\aversion\x18\x03 \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionR\aversion\"\x82\x01\n" + + "\x19DescribeTaskQueueResponse\x12_\n" + + "\rdesc_response\x18\x03 \x01(\v2:.temporal.api.workflowservice.v1.DescribeTaskQueueResponseR\fdescResponseJ\x04\b\x01\x10\x03\"\xa0\x04\n" + + "\"DescribeVersionedTaskQueuesRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12L\n" + + "\x0ftask_queue_type\x18\x02 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12C\n" + + "\n" + + "task_queue\x18\x03 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12T\n" + + "\aversion\x18\x04 \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionR\aversion\x12\x8b\x01\n" + + "\x13version_task_queues\x18\x05 \x03(\v2[.temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest.VersionTaskQueueR\x11versionTaskQueues\x1a`\n" + + "\x10VersionTaskQueue\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x128\n" + + "\x04type\x18\x02 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\x04type\"\xf4\x04\n" + + "#DescribeVersionedTaskQueuesResponse\x12\x8c\x01\n" + + "\x13version_task_queues\x18\x01 \x03(\v2\\.temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.VersionTaskQueueR\x11versionTaskQueues\x1a\xbd\x03\n" + + "\x10VersionTaskQueue\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x128\n" + + "\x04type\x18\x02 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\x04type\x12?\n" + + "\x05stats\x18\x03 \x01(\v2).temporal.api.taskqueue.v1.TaskQueueStatsR\x05stats\x12\xa7\x01\n" + + "\x15stats_by_priority_key\x18\x04 \x03(\v2t.temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.VersionTaskQueue.StatsByPriorityKeyEntryR\x12statsByPriorityKey\x1ap\n" + + "\x17StatsByPriorityKeyEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x05R\x03key\x12?\n" + + "\x05value\x18\x02 \x01(\v2).temporal.api.taskqueue.v1.TaskQueueStatsR\x05value:\x028\x01\"\x94\x03\n" + + "!DescribeTaskQueuePartitionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12f\n" + + "\x14task_queue_partition\x18\x02 \x01(\v24.temporal.server.api.taskqueue.v1.TaskQueuePartitionR\x12taskQueuePartition\x12P\n" + + "\bversions\x18\x03 \x01(\v24.temporal.api.taskqueue.v1.TaskQueueVersionSelectionR\bversions\x12!\n" + + "\freport_stats\x18\x04 \x01(\bR\vreportStats\x12%\n" + + "\x0ereport_pollers\x18\x05 \x01(\bR\rreportPollers\x12H\n" + + "!report_internal_task_queue_status\x18\x06 \x01(\bR\x1dreportInternalTaskQueueStatus\"\xcb\x02\n" + + "\"DescribeTaskQueuePartitionResponse\x12\x9a\x01\n" + + "\x16versions_info_internal\x18\x01 \x03(\v2d.temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionResponse.VersionsInfoInternalEntryR\x14versionsInfoInternal\x1a\x87\x01\n" + + "\x19VersionsInfoInternalEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12T\n" + + "\x05value\x18\x02 \x01(\v2>.temporal.server.api.taskqueue.v1.TaskQueueVersionInfoInternalR\x05value:\x028\x01\"\xa6\x01\n" + + "\x1eListTaskQueuePartitionsRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12!\n" + + "\fnamespace_id\x18\x03 \x01(\tR\vnamespaceId\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\"\x99\x02\n" + + "\x1fListTaskQueuePartitionsResponse\x12z\n" + + "\x1eactivity_task_queue_partitions\x18\x01 \x03(\v25.temporal.api.taskqueue.v1.TaskQueuePartitionMetadataR\x1bactivityTaskQueuePartitions\x12z\n" + + "\x1eworkflow_task_queue_partitions\x18\x02 \x03(\v25.temporal.api.taskqueue.v1.TaskQueuePartitionMetadataR\x1bworkflowTaskQueuePartitions\"\xb9\x05\n" + + "'UpdateWorkerBuildIdCompatibilityRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12\x96\x01\n" + + "\x14apply_public_request\x18\x03 \x01(\v2b.temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.ApplyPublicRequestH\x00R\x12applyPublicRequest\x12\x8a\x01\n" + + "\x10remove_build_ids\x18\x04 \x01(\v2^.temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.RemoveBuildIdsH\x00R\x0eremoveBuildIds\x129\n" + + "\x18persist_unknown_build_id\x18\x05 \x01(\tH\x00R\x15persistUnknownBuildId\x1ax\n" + + "\x12ApplyPublicRequest\x12b\n" + + "\arequest\x18\x01 \x01(\v2H.temporal.api.workflowservice.v1.UpdateWorkerBuildIdCompatibilityRequestR\arequest\x1ad\n" + + "\x0eRemoveBuildIds\x125\n" + + "\x17known_user_data_version\x18\x01 \x01(\x03R\x14knownUserDataVersion\x12\x1b\n" + + "\tbuild_ids\x18\x02 \x03(\tR\bbuildIdsB\v\n" + + "\toperation\"*\n" + + "(UpdateWorkerBuildIdCompatibilityResponse\"\xcc\x01\n" + + "\x1fGetWorkerVersioningRulesRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12\\\n" + + "\arequest\x18\x03 \x01(\v2@.temporal.api.workflowservice.v1.GetWorkerVersioningRulesRequestH\x00R\arequestB\t\n" + + "\acommand\"\x81\x01\n" + + " GetWorkerVersioningRulesResponse\x12]\n" + + "\bresponse\x18\x01 \x01(\v2A.temporal.api.workflowservice.v1.GetWorkerVersioningRulesResponseR\bresponse\"\xd2\x01\n" + + "\"UpdateWorkerVersioningRulesRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12_\n" + + "\arequest\x18\x03 \x01(\v2C.temporal.api.workflowservice.v1.UpdateWorkerVersioningRulesRequestH\x00R\arequestB\t\n" + + "\acommand\"\x87\x01\n" + + "#UpdateWorkerVersioningRulesResponse\x12`\n" + + "\bresponse\x18\x01 \x01(\v2D.temporal.api.workflowservice.v1.UpdateWorkerVersioningRulesResponseR\bresponse\"\xaa\x01\n" + + "$GetWorkerBuildIdCompatibilityRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12_\n" + + "\arequest\x18\x02 \x01(\v2E.temporal.api.workflowservice.v1.GetWorkerBuildIdCompatibilityRequestR\arequest\"\x8b\x01\n" + + "%GetWorkerBuildIdCompatibilityResponse\x12b\n" + + "\bresponse\x18\x01 \x01(\v2F.temporal.api.workflowservice.v1.GetWorkerBuildIdCompatibilityResponseR\bresponse\"\x81\x03\n" + + "\x1bGetTaskQueueUserDataRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12L\n" + + "\x0ftask_queue_type\x18\x05 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12>\n" + + "\x1clast_known_user_data_version\x18\x03 \x01(\x03R\x18lastKnownUserDataVersion\x12H\n" + + "!last_known_ephemeral_data_version\x18\a \x01(\x03R\x1dlastKnownEphemeralDataVersion\x12\"\n" + + "\rwait_new_data\x18\x04 \x01(\bR\vwaitNewData\x12$\n" + + "\x0eonly_if_loaded\x18\x06 \x01(\bR\fonlyIfLoaded\"\xe2\x01\n" + + "\x1cGetTaskQueueUserDataResponse\x12[\n" + + "\tuser_data\x18\x02 \x01(\v2>.temporal.server.api.persistence.v1.VersionedTaskQueueUserDataR\buserData\x12_\n" + + "\x0eephemeral_data\x18\x03 \x01(\v28.temporal.server.api.taskqueue.v1.VersionedEphemeralDataR\rephemeralDataJ\x04\b\x01\x10\x02\"\xf4\x06\n" + + "\x1dSyncDeploymentUserDataRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12'\n" + + "\x0fdeployment_name\x18\t \x01(\tR\x0edeploymentName\x12N\n" + + "\x10task_queue_types\x18\b \x03(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\x0etaskQueueTypes\x12n\n" + + "\x13update_version_data\x18\x06 \x01(\v28.temporal.server.api.deployment.v1.DeploymentVersionDataB\x02\x18\x01H\x00R\x11updateVersionData\x12g\n" + + "\x0eforget_version\x18\a \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionB\x02\x18\x01H\x00R\rforgetVersion\x12]\n" + + "\x15update_routing_config\x18\n" + + " \x01(\v2).temporal.api.deployment.v1.RoutingConfigR\x13updateRoutingConfig\x12\x8f\x01\n" + + "\x14upsert_versions_data\x18\v \x03(\v2].temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest.UpsertVersionsDataEntryR\x12upsertVersionsData\x12'\n" + + "\x0fforget_versions\x18\f \x03(\tR\x0eforgetVersions\x1a\x85\x01\n" + + "\x17UpsertVersionsDataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12T\n" + + "\x05value\x18\x02 \x01(\v2>.temporal.server.api.deployment.v1.WorkerDeploymentVersionDataR\x05value:\x028\x01B\v\n" + + "\toperationJ\x04\b\x03\x10\x04J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06\"t\n" + + "\x1eSyncDeploymentUserDataResponse\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x128\n" + + "\x16routing_config_changed\x18\x02 \x01(\bB\x02\x18\x01R\x14routingConfigChanged\"\xc5\x01\n" + + "-ApplyTaskQueueUserDataReplicationEventRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12R\n" + + "\tuser_data\x18\x03 \x01(\v25.temporal.server.api.persistence.v1.TaskQueueUserDataR\buserData\"0\n" + + ".ApplyTaskQueueUserDataReplicationEventResponse\"a\n" + + "!GetBuildIdTaskQueueMappingRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x19\n" + + "\bbuild_id\x18\x02 \x01(\tR\abuildId\"E\n" + + "\"GetBuildIdTaskQueueMappingResponse\x12\x1f\n" + + "\vtask_queues\x18\x01 \x03(\tR\n" + + "taskQueues\"\xaf\x01\n" + + "\"ForceLoadTaskQueuePartitionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12f\n" + + "\x14task_queue_partition\x18\x02 \x01(\v24.temporal.server.api.taskqueue.v1.TaskQueuePartitionR\x12taskQueuePartition\"H\n" + + "#ForceLoadTaskQueuePartitionResponse\x12!\n" + + "\fwas_unloaded\x18\x01 \x01(\bR\vwasUnloaded\"\xad\x01\n" + + "\x1bForceUnloadTaskQueueRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12L\n" + + "\x0ftask_queue_type\x18\x03 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\"=\n" + + "\x1cForceUnloadTaskQueueResponse\x12\x1d\n" + + "\n" + + "was_loaded\x18\x01 \x01(\bR\twasLoaded\"\xb1\x01\n" + + "$ForceUnloadTaskQueuePartitionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12f\n" + + "\x14task_queue_partition\x18\x02 \x01(\v24.temporal.server.api.taskqueue.v1.TaskQueuePartitionR\x12taskQueuePartition\"F\n" + + "%ForceUnloadTaskQueuePartitionResponse\x12\x1d\n" + + "\n" + + "was_loaded\x18\x01 \x01(\bR\twasLoaded\"\x93\x02\n" + + "\x1eUpdateTaskQueueUserDataRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12[\n" + + "\tuser_data\x18\x03 \x01(\v2>.temporal.server.api.persistence.v1.VersionedTaskQueueUserDataR\buserData\x12&\n" + + "\x0fbuild_ids_added\x18\x04 \x03(\tR\rbuildIdsAdded\x12*\n" + + "\x11build_ids_removed\x18\x05 \x03(\tR\x0fbuildIdsRemoved\"!\n" + + "\x1fUpdateTaskQueueUserDataResponse\"\xb9\x01\n" + + "!ReplicateTaskQueueUserDataRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12R\n" + + "\tuser_data\x18\x03 \x01(\v25.temporal.server.api.persistence.v1.TaskQueueUserDataR\buserData\"$\n" + + "\"ReplicateTaskQueueUserDataResponse\"\x86\x01\n" + + "(CheckTaskQueueUserDataPropagationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12\x18\n" + + "\aversion\x18\x03 \x01(\x03R\aversion\"+\n" + + ")CheckTaskQueueUserDataPropagationResponse\"\x92\x02\n" + + "\x18DispatchNexusTaskRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x128\n" + + "\arequest\x18\x03 \x01(\v2\x1e.temporal.api.nexus.v1.RequestR\arequest\x12T\n" + + "\fforward_info\x18\x04 \x01(\v21.temporal.server.api.taskqueue.v1.TaskForwardInfoR\vforwardInfo\"\xf4\x02\n" + + "\x19DispatchNexusTaskResponse\x12N\n" + + "\rhandler_error\x18\x01 \x01(\v2#.temporal.api.nexus.v1.HandlerErrorB\x02\x18\x01H\x00R\fhandlerError\x12=\n" + + "\bresponse\x18\x02 \x01(\v2\x1f.temporal.api.nexus.v1.ResponseH\x00R\bresponse\x12t\n" + + "\x0frequest_timeout\x18\x03 \x01(\v2I.temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse.TimeoutH\x00R\x0erequestTimeout\x12<\n" + + "\afailure\x18\x04 \x01(\v2 .temporal.api.failure.v1.FailureH\x00R\afailure\x1a\t\n" + + "\aTimeoutB\t\n" + + "\aoutcome\"\xb4\x02\n" + + "\x19PollNexusTaskQueueRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1b\n" + + "\tpoller_id\x18\x02 \x01(\tR\bpollerId\x12T\n" + + "\arequest\x18\x03 \x01(\v2:.temporal.api.workflowservice.v1.PollNexusTaskQueueRequestR\arequest\x12)\n" + + "\x10forwarded_source\x18\x04 \x01(\tR\x0fforwardedSource\x12V\n" + + "\n" + + "conditions\x18\x05 \x01(\v26.temporal.server.api.matchingservice.v1.PollConditionsR\n" + + "conditions\"u\n" + + "\x1aPollNexusTaskQueueResponse\x12W\n" + + "\bresponse\x18\x01 \x01(\v2;.temporal.api.workflowservice.v1.PollNexusTaskQueueResponseR\bresponse\"\x80\x02\n" + + " RespondNexusTaskCompletedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12\x17\n" + + "\atask_id\x18\x03 \x01(\tR\x06taskId\x12[\n" + + "\arequest\x18\x04 \x01(\v2A.temporal.api.workflowservice.v1.RespondNexusTaskCompletedRequestR\arequest\"#\n" + + "!RespondNexusTaskCompletedResponse\"\xfa\x01\n" + + "\x1dRespondNexusTaskFailedRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12\x17\n" + + "\atask_id\x18\x03 \x01(\tR\x06taskId\x12X\n" + + "\arequest\x18\x04 \x01(\v2>.temporal.api.workflowservice.v1.RespondNexusTaskFailedRequestR\arequest\" \n" + + "\x1eRespondNexusTaskFailedResponse\"g\n" + + "\x1aCreateNexusEndpointRequest\x12I\n" + + "\x04spec\x18\x01 \x01(\v25.temporal.server.api.persistence.v1.NexusEndpointSpecR\x04spec\"k\n" + + "\x1bCreateNexusEndpointResponse\x12L\n" + + "\x05entry\x18\x01 \x01(\v26.temporal.server.api.persistence.v1.NexusEndpointEntryR\x05entry\"\x91\x01\n" + + "\x1aUpdateNexusEndpointRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x18\n" + + "\aversion\x18\x02 \x01(\x03R\aversion\x12I\n" + + "\x04spec\x18\x03 \x01(\v25.temporal.server.api.persistence.v1.NexusEndpointSpecR\x04spec\"k\n" + + "\x1bUpdateNexusEndpointResponse\x12L\n" + + "\x05entry\x18\x01 \x01(\v26.temporal.server.api.persistence.v1.NexusEndpointEntryR\x05entry\",\n" + + "\x1aDeleteNexusEndpointRequest\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\"\x1d\n" + + "\x1bDeleteNexusEndpointResponse\"\xad\x01\n" + + "\x19ListNexusEndpointsRequest\x12&\n" + + "\x0fnext_page_token\x18\x01 \x01(\fR\rnextPageToken\x12\x1b\n" + + "\tpage_size\x18\x02 \x01(\x05R\bpageSize\x127\n" + + "\x18last_known_table_version\x18\x03 \x01(\x03R\x15lastKnownTableVersion\x12\x12\n" + + "\x04wait\x18\x04 \x01(\bR\x04wait\"\xbb\x01\n" + + "\x1aListNexusEndpointsResponse\x12&\n" + + "\x0fnext_page_token\x18\x01 \x01(\fR\rnextPageToken\x12#\n" + + "\rtable_version\x18\x02 \x01(\x03R\ftableVersion\x12P\n" + + "\aentries\x18\x03 \x03(\v26.temporal.server.api.persistence.v1.NexusEndpointEntryR\aentries\"\xaf\x01\n" + + "\x1cRecordWorkerHeartbeatRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12l\n" + + "\x12heartbeart_request\x18\x02 \x01(\v2=.temporal.api.workflowservice.v1.RecordWorkerHeartbeatRequestR\x11heartbeartRequest\"\x1f\n" + + "\x1dRecordWorkerHeartbeatResponse\"\x8f\x01\n" + + "\x12ListWorkersRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12V\n" + + "\flist_request\x18\x02 \x01(\v23.temporal.api.workflowservice.v1.ListWorkersRequestR\vlistRequest\"\xca\x01\n" + + "\x13ListWorkersResponse\x12I\n" + + "\fworkers_info\x18\x01 \x03(\v2\".temporal.api.worker.v1.WorkerInfoB\x02\x18\x01R\vworkersInfo\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\x12@\n" + + "\aworkers\x18\x03 \x03(\v2&.temporal.api.worker.v1.WorkerListInfoR\aworkers\"\xb8\x01\n" + + "\x1cUpdateTaskQueueConfigRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12u\n" + + "\x17update_taskqueue_config\x18\x03 \x01(\v2=.temporal.api.workflowservice.v1.UpdateTaskQueueConfigRequestR\x15updateTaskqueueConfig\"\x85\x01\n" + + "\x1dUpdateTaskQueueConfigResponse\x12d\n" + + "\x18updated_taskqueue_config\x18\x01 \x01(\v2*.temporal.api.taskqueue.v1.TaskQueueConfigR\x16updatedTaskqueueConfig\"\x8c\x01\n" + + "\x15DescribeWorkerRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12P\n" + + "\arequest\x18\x02 \x01(\v26.temporal.api.workflowservice.v1.DescribeWorkerRequestR\arequest\"]\n" + + "\x16DescribeWorkerResponse\x12C\n" + + "\vworker_info\x18\x01 \x01(\v2\".temporal.api.worker.v1.WorkerInfoR\n" + + "workerInfo\"\x80\x02\n" + + "\x1aUpdateFairnessStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12L\n" + + "\x0ftask_queue_type\x18\x03 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12R\n" + + "\x0efairness_state\x18\x04 \x01(\x0e2+.temporal.server.api.enums.v1.FairnessStateR\rfairnessState\"\x1d\n" + + "\x1bUpdateFairnessStateResponse\"\x8e\x02\n" + + "&CheckTaskQueueVersionMembershipRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12L\n" + + "\x0ftask_queue_type\x18\x03 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\rtaskQueueType\x12T\n" + + "\aversion\x18\x04 \x01(\v2:.temporal.server.api.deployment.v1.WorkerDeploymentVersionR\aversion\"\xa9\x01\n" + + "'CheckTaskQueueVersionMembershipResponse\x12\x1b\n" + + "\tis_member\x18\x01 \x01(\bR\bisMember\x128\n" + + "\x18should_skip_reactivation\x18\x02 \x01(\bR\x16shouldSkipReactivation\x12'\n" + + "\x0frevision_number\x18\x03 \x01(\x03R\x0erevisionNumber\"L\n" + + "\x0ePollConditions\x12!\n" + + "\fmin_priority\x18\x01 \x01(\x05R\vminPriority\x12\x17\n" + + "\ano_wait\x18\x02 \x01(\bR\x06noWaitB>Z temporal.api.workflowservice.v1.PollWorkflowTaskQueueRequest - 54, // 1: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 55, // 2: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType - 56, // 3: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.query:type_name -> temporal.api.query.v1.WorkflowQuery - 57, // 4: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.transient_workflow_task:type_name -> temporal.server.api.history.v1.TransientWorkflowTaskInfo - 58, // 5: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.workflow_execution_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 59, // 6: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.scheduled_time:type_name -> google.protobuf.Timestamp - 59, // 7: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.started_time:type_name -> google.protobuf.Timestamp - 50, // 8: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.queries:type_name -> temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.QueriesEntry - 60, // 9: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.messages:type_name -> temporal.api.protocol.v1.Message - 61, // 10: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.history:type_name -> temporal.api.history.v1.History - 62, // 11: temporal.server.api.matchingservice.v1.PollActivityTaskQueueRequest.poll_request:type_name -> temporal.api.workflowservice.v1.PollActivityTaskQueueRequest - 54, // 12: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution - 63, // 13: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.activity_type:type_name -> temporal.api.common.v1.ActivityType - 64, // 14: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.input:type_name -> temporal.api.common.v1.Payloads - 59, // 15: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.scheduled_time:type_name -> google.protobuf.Timestamp - 65, // 16: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.schedule_to_close_timeout:type_name -> google.protobuf.Duration - 59, // 17: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.started_time:type_name -> google.protobuf.Timestamp - 65, // 18: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.start_to_close_timeout:type_name -> google.protobuf.Duration - 65, // 19: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.heartbeat_timeout:type_name -> google.protobuf.Duration - 59, // 20: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.current_attempt_scheduled_time:type_name -> google.protobuf.Timestamp - 64, // 21: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.heartbeat_details:type_name -> temporal.api.common.v1.Payloads - 55, // 22: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType - 66, // 23: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.header:type_name -> temporal.api.common.v1.Header - 54, // 24: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 58, // 25: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 65, // 26: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.schedule_to_start_timeout:type_name -> google.protobuf.Duration - 67, // 27: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.source:type_name -> temporal.server.api.enums.v1.TaskSource - 68, // 28: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 69, // 29: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective - 54, // 30: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 58, // 31: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 65, // 32: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.schedule_to_start_timeout:type_name -> google.protobuf.Duration - 67, // 33: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.source:type_name -> temporal.server.api.enums.v1.TaskSource - 68, // 34: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 69, // 35: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective - 58, // 36: temporal.server.api.matchingservice.v1.QueryWorkflowRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 70, // 37: temporal.server.api.matchingservice.v1.QueryWorkflowRequest.query_request:type_name -> temporal.api.workflowservice.v1.QueryWorkflowRequest - 69, // 38: temporal.server.api.matchingservice.v1.QueryWorkflowRequest.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective - 64, // 39: temporal.server.api.matchingservice.v1.QueryWorkflowResponse.query_result:type_name -> temporal.api.common.v1.Payloads - 71, // 40: temporal.server.api.matchingservice.v1.QueryWorkflowResponse.query_rejected:type_name -> temporal.api.query.v1.QueryRejected - 58, // 41: temporal.server.api.matchingservice.v1.RespondQueryTaskCompletedRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 72, // 42: temporal.server.api.matchingservice.v1.RespondQueryTaskCompletedRequest.completed_request:type_name -> temporal.api.workflowservice.v1.RespondQueryTaskCompletedRequest - 73, // 43: temporal.server.api.matchingservice.v1.CancelOutstandingPollRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType - 58, // 44: temporal.server.api.matchingservice.v1.CancelOutstandingPollRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 74, // 45: temporal.server.api.matchingservice.v1.DescribeTaskQueueRequest.desc_request:type_name -> temporal.api.workflowservice.v1.DescribeTaskQueueRequest - 75, // 46: temporal.server.api.matchingservice.v1.DescribeTaskQueueResponse.pollers:type_name -> temporal.api.taskqueue.v1.PollerInfo - 76, // 47: temporal.server.api.matchingservice.v1.DescribeTaskQueueResponse.task_queue_status:type_name -> temporal.api.taskqueue.v1.TaskQueueStatus - 58, // 48: temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 77, // 49: temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsResponse.activity_task_queue_partitions:type_name -> temporal.api.taskqueue.v1.TaskQueuePartitionMetadata - 77, // 50: temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsResponse.workflow_task_queue_partitions:type_name -> temporal.api.taskqueue.v1.TaskQueuePartitionMetadata - 51, // 51: temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.apply_public_request:type_name -> temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.ApplyPublicRequest - 52, // 52: temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.remove_build_ids:type_name -> temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.RemoveBuildIds - 78, // 53: temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityRequest.request:type_name -> temporal.api.workflowservice.v1.GetWorkerBuildIdCompatibilityRequest - 79, // 54: temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityResponse.response:type_name -> temporal.api.workflowservice.v1.GetWorkerBuildIdCompatibilityResponse - 73, // 55: temporal.server.api.matchingservice.v1.GetTaskQueueUserDataRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType - 80, // 56: temporal.server.api.matchingservice.v1.GetTaskQueueUserDataResponse.user_data:type_name -> temporal.server.api.persistence.v1.VersionedTaskQueueUserData - 81, // 57: temporal.server.api.matchingservice.v1.ApplyTaskQueueUserDataReplicationEventRequest.user_data:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData - 73, // 58: temporal.server.api.matchingservice.v1.ForceUnloadTaskQueueRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType - 80, // 59: temporal.server.api.matchingservice.v1.UpdateTaskQueueUserDataRequest.user_data:type_name -> temporal.server.api.persistence.v1.VersionedTaskQueueUserData - 81, // 60: temporal.server.api.matchingservice.v1.ReplicateTaskQueueUserDataRequest.user_data:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData - 58, // 61: temporal.server.api.matchingservice.v1.DispatchNexusTaskRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 82, // 62: temporal.server.api.matchingservice.v1.DispatchNexusTaskRequest.request:type_name -> temporal.api.nexus.v1.Request - 83, // 63: temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse.handler_error:type_name -> temporal.api.nexus.v1.HandlerError - 84, // 64: temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse.response:type_name -> temporal.api.nexus.v1.Response - 85, // 65: temporal.server.api.matchingservice.v1.PollNexusTaskQueueRequest.request:type_name -> temporal.api.workflowservice.v1.PollNexusTaskQueueRequest - 86, // 66: temporal.server.api.matchingservice.v1.PollNexusTaskQueueResponse.response:type_name -> temporal.api.workflowservice.v1.PollNexusTaskQueueResponse - 58, // 67: temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 87, // 68: temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedRequest.request:type_name -> temporal.api.workflowservice.v1.RespondNexusTaskCompletedRequest - 58, // 69: temporal.server.api.matchingservice.v1.RespondNexusTaskFailedRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue - 88, // 70: temporal.server.api.matchingservice.v1.RespondNexusTaskFailedRequest.request:type_name -> temporal.api.workflowservice.v1.RespondNexusTaskFailedRequest - 89, // 71: temporal.server.api.matchingservice.v1.CreateNexusIncomingServiceRequest.spec:type_name -> temporal.api.nexus.v1.IncomingServiceSpec - 90, // 72: temporal.server.api.matchingservice.v1.CreateNexusIncomingServiceResponse.service:type_name -> temporal.api.nexus.v1.IncomingService - 89, // 73: temporal.server.api.matchingservice.v1.UpdateNexusIncomingServiceRequest.spec:type_name -> temporal.api.nexus.v1.IncomingServiceSpec - 90, // 74: temporal.server.api.matchingservice.v1.UpdateNexusIncomingServiceResponse.service:type_name -> temporal.api.nexus.v1.IncomingService - 90, // 75: temporal.server.api.matchingservice.v1.ListNexusIncomingServicesResponse.services:type_name -> temporal.api.nexus.v1.IncomingService - 56, // 76: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.QueriesEntry.value:type_name -> temporal.api.query.v1.WorkflowQuery - 91, // 77: temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.ApplyPublicRequest.request:type_name -> temporal.api.workflowservice.v1.UpdateWorkerBuildIdCompatibilityRequest - 78, // [78:78] is the sub-list for method output_type - 78, // [78:78] is the sub-list for method input_type - 78, // [78:78] is the sub-list for extension type_name - 78, // [78:78] is the sub-list for extension extendee - 0, // [0:78] is the sub-list for field type_name + 92, // 0: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueRequest.poll_request:type_name -> temporal.api.workflowservice.v1.PollWorkflowTaskQueueRequest + 81, // 1: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueRequest.conditions:type_name -> temporal.server.api.matchingservice.v1.PollConditions + 93, // 2: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 94, // 3: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType + 95, // 4: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.query:type_name -> temporal.api.query.v1.WorkflowQuery + 96, // 5: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.transient_workflow_task:type_name -> temporal.server.api.history.v1.TransientWorkflowTaskInfo + 97, // 6: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.workflow_execution_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 98, // 7: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.scheduled_time:type_name -> google.protobuf.Timestamp + 98, // 8: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.started_time:type_name -> google.protobuf.Timestamp + 82, // 9: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.queries:type_name -> temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.QueriesEntry + 99, // 10: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.messages:type_name -> temporal.api.protocol.v1.Message + 100, // 11: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.history:type_name -> temporal.api.history.v1.History + 101, // 12: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.poller_scaling_decision:type_name -> temporal.api.taskqueue.v1.PollerScalingDecision + 100, // 13: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.raw_history:type_name -> temporal.api.history.v1.History + 93, // 14: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 94, // 15: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.workflow_type:type_name -> temporal.api.common.v1.WorkflowType + 95, // 16: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.query:type_name -> temporal.api.query.v1.WorkflowQuery + 96, // 17: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.transient_workflow_task:type_name -> temporal.server.api.history.v1.TransientWorkflowTaskInfo + 97, // 18: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.workflow_execution_task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 98, // 19: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.scheduled_time:type_name -> google.protobuf.Timestamp + 98, // 20: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.started_time:type_name -> google.protobuf.Timestamp + 83, // 21: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.queries:type_name -> temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.QueriesEntry + 99, // 22: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.messages:type_name -> temporal.api.protocol.v1.Message + 100, // 23: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.history:type_name -> temporal.api.history.v1.History + 101, // 24: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.poller_scaling_decision:type_name -> temporal.api.taskqueue.v1.PollerScalingDecision + 102, // 25: temporal.server.api.matchingservice.v1.PollActivityTaskQueueRequest.poll_request:type_name -> temporal.api.workflowservice.v1.PollActivityTaskQueueRequest + 81, // 26: temporal.server.api.matchingservice.v1.PollActivityTaskQueueRequest.conditions:type_name -> temporal.server.api.matchingservice.v1.PollConditions + 93, // 27: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.workflow_execution:type_name -> temporal.api.common.v1.WorkflowExecution + 103, // 28: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.activity_type:type_name -> temporal.api.common.v1.ActivityType + 104, // 29: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.input:type_name -> temporal.api.common.v1.Payloads + 98, // 30: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.scheduled_time:type_name -> google.protobuf.Timestamp + 105, // 31: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.schedule_to_close_timeout:type_name -> google.protobuf.Duration + 98, // 32: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.started_time:type_name -> google.protobuf.Timestamp + 105, // 33: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.start_to_close_timeout:type_name -> google.protobuf.Duration + 105, // 34: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.heartbeat_timeout:type_name -> google.protobuf.Duration + 98, // 35: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.current_attempt_scheduled_time:type_name -> google.protobuf.Timestamp + 104, // 36: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.heartbeat_details:type_name -> temporal.api.common.v1.Payloads + 94, // 37: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.workflow_type:type_name -> temporal.api.common.v1.WorkflowType + 106, // 38: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.header:type_name -> temporal.api.common.v1.Header + 101, // 39: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.poller_scaling_decision:type_name -> temporal.api.taskqueue.v1.PollerScalingDecision + 107, // 40: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.priority:type_name -> temporal.api.common.v1.Priority + 108, // 41: temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse.retry_policy:type_name -> temporal.api.common.v1.RetryPolicy + 93, // 42: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 97, // 43: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 105, // 44: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.schedule_to_start_timeout:type_name -> google.protobuf.Duration + 109, // 45: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 110, // 46: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective + 111, // 47: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.forward_info:type_name -> temporal.server.api.taskqueue.v1.TaskForwardInfo + 107, // 48: temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest.priority:type_name -> temporal.api.common.v1.Priority + 93, // 49: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 97, // 50: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 105, // 51: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.schedule_to_start_timeout:type_name -> google.protobuf.Duration + 109, // 52: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 110, // 53: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective + 111, // 54: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.forward_info:type_name -> temporal.server.api.taskqueue.v1.TaskForwardInfo + 107, // 55: temporal.server.api.matchingservice.v1.AddActivityTaskRequest.priority:type_name -> temporal.api.common.v1.Priority + 97, // 56: temporal.server.api.matchingservice.v1.QueryWorkflowRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 112, // 57: temporal.server.api.matchingservice.v1.QueryWorkflowRequest.query_request:type_name -> temporal.api.workflowservice.v1.QueryWorkflowRequest + 110, // 58: temporal.server.api.matchingservice.v1.QueryWorkflowRequest.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective + 111, // 59: temporal.server.api.matchingservice.v1.QueryWorkflowRequest.forward_info:type_name -> temporal.server.api.taskqueue.v1.TaskForwardInfo + 107, // 60: temporal.server.api.matchingservice.v1.QueryWorkflowRequest.priority:type_name -> temporal.api.common.v1.Priority + 104, // 61: temporal.server.api.matchingservice.v1.QueryWorkflowResponse.query_result:type_name -> temporal.api.common.v1.Payloads + 113, // 62: temporal.server.api.matchingservice.v1.QueryWorkflowResponse.query_rejected:type_name -> temporal.api.query.v1.QueryRejected + 97, // 63: temporal.server.api.matchingservice.v1.RespondQueryTaskCompletedRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 114, // 64: temporal.server.api.matchingservice.v1.RespondQueryTaskCompletedRequest.completed_request:type_name -> temporal.api.workflowservice.v1.RespondQueryTaskCompletedRequest + 115, // 65: temporal.server.api.matchingservice.v1.CancelOutstandingPollRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 97, // 66: temporal.server.api.matchingservice.v1.CancelOutstandingPollRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 97, // 67: temporal.server.api.matchingservice.v1.CancelOutstandingWorkerPollsRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 115, // 68: temporal.server.api.matchingservice.v1.CancelOutstandingWorkerPollsRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 116, // 69: temporal.server.api.matchingservice.v1.DescribeTaskQueueRequest.desc_request:type_name -> temporal.api.workflowservice.v1.DescribeTaskQueueRequest + 117, // 70: temporal.server.api.matchingservice.v1.DescribeTaskQueueRequest.version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 118, // 71: temporal.server.api.matchingservice.v1.DescribeTaskQueueResponse.desc_response:type_name -> temporal.api.workflowservice.v1.DescribeTaskQueueResponse + 115, // 72: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 97, // 73: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 117, // 74: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest.version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 84, // 75: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest.version_task_queues:type_name -> temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest.VersionTaskQueue + 85, // 76: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.version_task_queues:type_name -> temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.VersionTaskQueue + 119, // 77: temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionRequest.task_queue_partition:type_name -> temporal.server.api.taskqueue.v1.TaskQueuePartition + 120, // 78: temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionRequest.versions:type_name -> temporal.api.taskqueue.v1.TaskQueueVersionSelection + 87, // 79: temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionResponse.versions_info_internal:type_name -> temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionResponse.VersionsInfoInternalEntry + 97, // 80: temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 121, // 81: temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsResponse.activity_task_queue_partitions:type_name -> temporal.api.taskqueue.v1.TaskQueuePartitionMetadata + 121, // 82: temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsResponse.workflow_task_queue_partitions:type_name -> temporal.api.taskqueue.v1.TaskQueuePartitionMetadata + 88, // 83: temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.apply_public_request:type_name -> temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.ApplyPublicRequest + 89, // 84: temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.remove_build_ids:type_name -> temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.RemoveBuildIds + 122, // 85: temporal.server.api.matchingservice.v1.GetWorkerVersioningRulesRequest.request:type_name -> temporal.api.workflowservice.v1.GetWorkerVersioningRulesRequest + 123, // 86: temporal.server.api.matchingservice.v1.GetWorkerVersioningRulesResponse.response:type_name -> temporal.api.workflowservice.v1.GetWorkerVersioningRulesResponse + 124, // 87: temporal.server.api.matchingservice.v1.UpdateWorkerVersioningRulesRequest.request:type_name -> temporal.api.workflowservice.v1.UpdateWorkerVersioningRulesRequest + 125, // 88: temporal.server.api.matchingservice.v1.UpdateWorkerVersioningRulesResponse.response:type_name -> temporal.api.workflowservice.v1.UpdateWorkerVersioningRulesResponse + 126, // 89: temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityRequest.request:type_name -> temporal.api.workflowservice.v1.GetWorkerBuildIdCompatibilityRequest + 127, // 90: temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityResponse.response:type_name -> temporal.api.workflowservice.v1.GetWorkerBuildIdCompatibilityResponse + 115, // 91: temporal.server.api.matchingservice.v1.GetTaskQueueUserDataRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 128, // 92: temporal.server.api.matchingservice.v1.GetTaskQueueUserDataResponse.user_data:type_name -> temporal.server.api.persistence.v1.VersionedTaskQueueUserData + 129, // 93: temporal.server.api.matchingservice.v1.GetTaskQueueUserDataResponse.ephemeral_data:type_name -> temporal.server.api.taskqueue.v1.VersionedEphemeralData + 115, // 94: temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest.task_queue_types:type_name -> temporal.api.enums.v1.TaskQueueType + 130, // 95: temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest.update_version_data:type_name -> temporal.server.api.deployment.v1.DeploymentVersionData + 117, // 96: temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest.forget_version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 131, // 97: temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest.update_routing_config:type_name -> temporal.api.deployment.v1.RoutingConfig + 90, // 98: temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest.upsert_versions_data:type_name -> temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest.UpsertVersionsDataEntry + 132, // 99: temporal.server.api.matchingservice.v1.ApplyTaskQueueUserDataReplicationEventRequest.user_data:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData + 119, // 100: temporal.server.api.matchingservice.v1.ForceLoadTaskQueuePartitionRequest.task_queue_partition:type_name -> temporal.server.api.taskqueue.v1.TaskQueuePartition + 115, // 101: temporal.server.api.matchingservice.v1.ForceUnloadTaskQueueRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 119, // 102: temporal.server.api.matchingservice.v1.ForceUnloadTaskQueuePartitionRequest.task_queue_partition:type_name -> temporal.server.api.taskqueue.v1.TaskQueuePartition + 128, // 103: temporal.server.api.matchingservice.v1.UpdateTaskQueueUserDataRequest.user_data:type_name -> temporal.server.api.persistence.v1.VersionedTaskQueueUserData + 132, // 104: temporal.server.api.matchingservice.v1.ReplicateTaskQueueUserDataRequest.user_data:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData + 97, // 105: temporal.server.api.matchingservice.v1.DispatchNexusTaskRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 133, // 106: temporal.server.api.matchingservice.v1.DispatchNexusTaskRequest.request:type_name -> temporal.api.nexus.v1.Request + 111, // 107: temporal.server.api.matchingservice.v1.DispatchNexusTaskRequest.forward_info:type_name -> temporal.server.api.taskqueue.v1.TaskForwardInfo + 134, // 108: temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse.handler_error:type_name -> temporal.api.nexus.v1.HandlerError + 135, // 109: temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse.response:type_name -> temporal.api.nexus.v1.Response + 91, // 110: temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse.request_timeout:type_name -> temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse.Timeout + 136, // 111: temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse.failure:type_name -> temporal.api.failure.v1.Failure + 137, // 112: temporal.server.api.matchingservice.v1.PollNexusTaskQueueRequest.request:type_name -> temporal.api.workflowservice.v1.PollNexusTaskQueueRequest + 81, // 113: temporal.server.api.matchingservice.v1.PollNexusTaskQueueRequest.conditions:type_name -> temporal.server.api.matchingservice.v1.PollConditions + 138, // 114: temporal.server.api.matchingservice.v1.PollNexusTaskQueueResponse.response:type_name -> temporal.api.workflowservice.v1.PollNexusTaskQueueResponse + 97, // 115: temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 139, // 116: temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedRequest.request:type_name -> temporal.api.workflowservice.v1.RespondNexusTaskCompletedRequest + 97, // 117: temporal.server.api.matchingservice.v1.RespondNexusTaskFailedRequest.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 140, // 118: temporal.server.api.matchingservice.v1.RespondNexusTaskFailedRequest.request:type_name -> temporal.api.workflowservice.v1.RespondNexusTaskFailedRequest + 141, // 119: temporal.server.api.matchingservice.v1.CreateNexusEndpointRequest.spec:type_name -> temporal.server.api.persistence.v1.NexusEndpointSpec + 142, // 120: temporal.server.api.matchingservice.v1.CreateNexusEndpointResponse.entry:type_name -> temporal.server.api.persistence.v1.NexusEndpointEntry + 141, // 121: temporal.server.api.matchingservice.v1.UpdateNexusEndpointRequest.spec:type_name -> temporal.server.api.persistence.v1.NexusEndpointSpec + 142, // 122: temporal.server.api.matchingservice.v1.UpdateNexusEndpointResponse.entry:type_name -> temporal.server.api.persistence.v1.NexusEndpointEntry + 142, // 123: temporal.server.api.matchingservice.v1.ListNexusEndpointsResponse.entries:type_name -> temporal.server.api.persistence.v1.NexusEndpointEntry + 143, // 124: temporal.server.api.matchingservice.v1.RecordWorkerHeartbeatRequest.heartbeart_request:type_name -> temporal.api.workflowservice.v1.RecordWorkerHeartbeatRequest + 144, // 125: temporal.server.api.matchingservice.v1.ListWorkersRequest.list_request:type_name -> temporal.api.workflowservice.v1.ListWorkersRequest + 145, // 126: temporal.server.api.matchingservice.v1.ListWorkersResponse.workers_info:type_name -> temporal.api.worker.v1.WorkerInfo + 146, // 127: temporal.server.api.matchingservice.v1.ListWorkersResponse.workers:type_name -> temporal.api.worker.v1.WorkerListInfo + 147, // 128: temporal.server.api.matchingservice.v1.UpdateTaskQueueConfigRequest.update_taskqueue_config:type_name -> temporal.api.workflowservice.v1.UpdateTaskQueueConfigRequest + 148, // 129: temporal.server.api.matchingservice.v1.UpdateTaskQueueConfigResponse.updated_taskqueue_config:type_name -> temporal.api.taskqueue.v1.TaskQueueConfig + 149, // 130: temporal.server.api.matchingservice.v1.DescribeWorkerRequest.request:type_name -> temporal.api.workflowservice.v1.DescribeWorkerRequest + 145, // 131: temporal.server.api.matchingservice.v1.DescribeWorkerResponse.worker_info:type_name -> temporal.api.worker.v1.WorkerInfo + 115, // 132: temporal.server.api.matchingservice.v1.UpdateFairnessStateRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 150, // 133: temporal.server.api.matchingservice.v1.UpdateFairnessStateRequest.fairness_state:type_name -> temporal.server.api.enums.v1.FairnessState + 115, // 134: temporal.server.api.matchingservice.v1.CheckTaskQueueVersionMembershipRequest.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 117, // 135: temporal.server.api.matchingservice.v1.CheckTaskQueueVersionMembershipRequest.version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 95, // 136: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse.QueriesEntry.value:type_name -> temporal.api.query.v1.WorkflowQuery + 95, // 137: temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponseWithRawHistory.QueriesEntry.value:type_name -> temporal.api.query.v1.WorkflowQuery + 115, // 138: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest.VersionTaskQueue.type:type_name -> temporal.api.enums.v1.TaskQueueType + 115, // 139: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.VersionTaskQueue.type:type_name -> temporal.api.enums.v1.TaskQueueType + 151, // 140: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.VersionTaskQueue.stats:type_name -> temporal.api.taskqueue.v1.TaskQueueStats + 86, // 141: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.VersionTaskQueue.stats_by_priority_key:type_name -> temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.VersionTaskQueue.StatsByPriorityKeyEntry + 151, // 142: temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse.VersionTaskQueue.StatsByPriorityKeyEntry.value:type_name -> temporal.api.taskqueue.v1.TaskQueueStats + 152, // 143: temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionResponse.VersionsInfoInternalEntry.value:type_name -> temporal.server.api.taskqueue.v1.TaskQueueVersionInfoInternal + 153, // 144: temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest.ApplyPublicRequest.request:type_name -> temporal.api.workflowservice.v1.UpdateWorkerBuildIdCompatibilityRequest + 154, // 145: temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest.UpsertVersionsDataEntry.value:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersionData + 146, // [146:146] is the sub-list for method output_type + 146, // [146:146] is the sub-list for method input_type + 146, // [146:146] is the sub-list for extension type_name + 146, // [146:146] is the sub-list for extension extendee + 0, // [0:146] is the sub-list for field type_name } func init() { file_temporal_server_api_matchingservice_v1_request_response_proto_init() } @@ -4407,648 +6527,34 @@ func file_temporal_server_api_matchingservice_v1_request_response_proto_init() { if File_temporal_server_api_matchingservice_v1_request_response_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollWorkflowTaskQueueRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollWorkflowTaskQueueResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollActivityTaskQueueRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollActivityTaskQueueResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddWorkflowTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddWorkflowTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddActivityTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddActivityTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWorkflowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWorkflowResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondQueryTaskCompletedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondQueryTaskCompletedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelOutstandingPollRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelOutstandingPollResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeTaskQueueRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeTaskQueueResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTaskQueuePartitionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTaskQueuePartitionsResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateWorkerBuildIdCompatibilityRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateWorkerBuildIdCompatibilityResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkerBuildIdCompatibilityRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetWorkerBuildIdCompatibilityResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTaskQueueUserDataRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTaskQueueUserDataResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyTaskQueueUserDataReplicationEventRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyTaskQueueUserDataReplicationEventResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBuildIdTaskQueueMappingRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBuildIdTaskQueueMappingResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ForceUnloadTaskQueueRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ForceUnloadTaskQueueResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateTaskQueueUserDataRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateTaskQueueUserDataResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicateTaskQueueUserDataRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicateTaskQueueUserDataResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DispatchNexusTaskRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DispatchNexusTaskResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollNexusTaskQueueRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PollNexusTaskQueueResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondNexusTaskCompletedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondNexusTaskCompletedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondNexusTaskFailedRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RespondNexusTaskFailedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNexusIncomingServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateNexusIncomingServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateNexusIncomingServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateNexusIncomingServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNexusIncomingServiceRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteNexusIncomingServiceResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNexusIncomingServicesRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListNexusIncomingServicesResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[18].OneofWrappers = []interface{}{ + file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[25].OneofWrappers = []any{ (*UpdateWorkerBuildIdCompatibilityRequest_ApplyPublicRequest_)(nil), (*UpdateWorkerBuildIdCompatibilityRequest_RemoveBuildIds_)(nil), (*UpdateWorkerBuildIdCompatibilityRequest_PersistUnknownBuildId)(nil), } - file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[35].OneofWrappers = []interface{}{ + file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[27].OneofWrappers = []any{ + (*GetWorkerVersioningRulesRequest_Request)(nil), + } + file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[29].OneofWrappers = []any{ + (*UpdateWorkerVersioningRulesRequest_Request)(nil), + } + file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[35].OneofWrappers = []any{ + (*SyncDeploymentUserDataRequest_UpdateVersionData)(nil), + (*SyncDeploymentUserDataRequest_ForgetVersion)(nil), + } + file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes[54].OneofWrappers = []any{ (*DispatchNexusTaskResponse_HandlerError)(nil), (*DispatchNexusTaskResponse_Response)(nil), + (*DispatchNexusTaskResponse_RequestTimeout)(nil), + (*DispatchNexusTaskResponse_Failure)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_matchingservice_v1_request_response_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_matchingservice_v1_request_response_proto_rawDesc), len(file_temporal_server_api_matchingservice_v1_request_response_proto_rawDesc)), NumEnums: 0, - NumMessages: 53, + NumMessages: 92, NumExtensions: 0, NumServices: 0, }, @@ -5057,7 +6563,6 @@ func file_temporal_server_api_matchingservice_v1_request_response_proto_init() { MessageInfos: file_temporal_server_api_matchingservice_v1_request_response_proto_msgTypes, }.Build() File_temporal_server_api_matchingservice_v1_request_response_proto = out.File - file_temporal_server_api_matchingservice_v1_request_response_proto_rawDesc = nil file_temporal_server_api_matchingservice_v1_request_response_proto_goTypes = nil file_temporal_server_api_matchingservice_v1_request_response_proto_depIdxs = nil } diff --git a/api/matchingservice/v1/service.pb.go b/api/matchingservice/v1/service.pb.go index 0376200423b..10479d1ca5c 100644 --- a/api/matchingservice/v1/service.pb.go +++ b/api/matchingservice/v1/service.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -30,7 +8,9 @@ package matchingservice import ( reflect "reflect" + unsafe "unsafe" + _ "go.temporal.io/server/api/common/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -44,299 +24,52 @@ const ( var File_temporal_server_api_matchingservice_v1_service_proto protoreflect.FileDescriptor -var file_temporal_server_api_matchingservice_v1_service_proto_rawDesc = []byte{ - 0x0a, 0x34, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x3d, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x99, 0x22, - 0x0a, 0x0f, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x12, 0xa6, 0x01, 0x0a, 0x15, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x44, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa6, 0x01, 0x0a, 0x15, 0x50, - 0x6f, 0x6c, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x12, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, - 0x6c, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x45, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, 0x0f, 0x41, 0x64, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x41, 0x64, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x41, 0x64, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x94, 0x01, 0x0a, 0x0f, 0x41, - 0x64, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x3e, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3f, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x8e, 0x01, 0x0a, 0x0d, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x12, 0x3c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, - 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, - 0x72, 0x79, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x3d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0xb2, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x12, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x49, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x51, 0x75, 0x65, 0x72, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9a, 0x01, 0x0a, 0x11, 0x44, 0x69, 0x73, 0x70, - 0x61, 0x74, 0x63, 0x68, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x40, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x4e, - 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, - 0x68, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0x9d, 0x01, 0x0a, 0x12, 0x50, 0x6f, 0x6c, 0x6c, 0x4e, 0x65, 0x78, - 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x41, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x42, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x6c, 0x4e, 0x65, 0x78, 0x75, - 0x73, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xb2, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, - 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x12, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, - 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x49, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, - 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa9, 0x01, 0x0a, 0x16, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x12, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, - 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x46, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x4e, 0x65, 0x78, 0x75, - 0x73, 0x54, 0x61, 0x73, 0x6b, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa6, 0x01, 0x0a, 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x6c, 0x12, - 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, - 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x6c, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, - 0x50, 0x6f, 0x6c, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9a, - 0x01, 0x0a, 0x11, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x12, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xac, 0x01, 0x0a, 0x17, - 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x61, 0x72, - 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x46, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xc7, 0x01, 0x0a, 0x20, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, - 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, - 0x4f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, - 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x50, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, - 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xbe, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa3, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x43, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x44, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, - 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, - 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xd9, 0x01, 0x0a, 0x26, - 0x41, 0x70, 0x70, 0x6c, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x55, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x41, 0x70, 0x70, 0x6c, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x56, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x54, 0x61, 0x73, 0x6b, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb5, 0x01, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4d, - 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x47, 0x65, 0x74, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, - 0x69, 0x6c, 0x64, 0x49, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4d, 0x61, - 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, - 0xa3, 0x01, 0x0a, 0x14, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x55, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, - 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x55, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x44, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x55, 0x6e, 0x6c, 0x6f, - 0x61, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xac, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, - 0x61, 0x12, 0x46, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xb5, 0x01, 0x0a, 0x1a, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x49, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, - 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4a, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb5, 0x01, 0x0a, - 0x1a, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, - 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, - 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, - 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xb5, 0x01, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, - 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x49, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, - 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4a, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x65, - 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xb5, 0x01, 0x0a, - 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, - 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, - 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, - 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x00, 0x12, 0xb2, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x78, - 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x12, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, - 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x49, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, - 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x6f, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x69, - 0x6e, 0x67, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} +const file_temporal_server_api_matchingservice_v1_service_proto_rawDesc = "" + + "\n" + + "4temporal/server/api/matchingservice/v1/service.proto\x12&temporal.server.api.matchingservice.v1\x1a0temporal/server/api/common/v1/api_category.proto\x1a=temporal/server/api/matchingservice/v1/request_response.proto2\xa38\n" + + "\x0fMatchingService\x12\xac\x01\n" + + "\x15PollWorkflowTaskQueue\x12D.temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueRequest\x1aE.temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\xac\x01\n" + + "\x15PollActivityTaskQueue\x12D.temporal.server.api.matchingservice.v1.PollActivityTaskQueueRequest\x1aE.temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\x9a\x01\n" + + "\x0fAddWorkflowTask\x12>.temporal.server.api.matchingservice.v1.AddWorkflowTaskRequest\x1a?.temporal.server.api.matchingservice.v1.AddWorkflowTaskResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x9a\x01\n" + + "\x0fAddActivityTask\x12>.temporal.server.api.matchingservice.v1.AddActivityTaskRequest\x1a?.temporal.server.api.matchingservice.v1.AddActivityTaskResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x94\x01\n" + + "\rQueryWorkflow\x12<.temporal.server.api.matchingservice.v1.QueryWorkflowRequest\x1a=.temporal.server.api.matchingservice.v1.QueryWorkflowResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\xb8\x01\n" + + "\x19RespondQueryTaskCompleted\x12H.temporal.server.api.matchingservice.v1.RespondQueryTaskCompletedRequest\x1aI.temporal.server.api.matchingservice.v1.RespondQueryTaskCompletedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa0\x01\n" + + "\x11DispatchNexusTask\x12@.temporal.server.api.matchingservice.v1.DispatchNexusTaskRequest\x1aA.temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa3\x01\n" + + "\x12PollNexusTaskQueue\x12A.temporal.server.api.matchingservice.v1.PollNexusTaskQueueRequest\x1aB.temporal.server.api.matchingservice.v1.PollNexusTaskQueueResponse\"\x06\x8a\xb5\x18\x02\b\x02\x12\xb8\x01\n" + + "\x19RespondNexusTaskCompleted\x12H.temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedRequest\x1aI.temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xaf\x01\n" + + "\x16RespondNexusTaskFailed\x12E.temporal.server.api.matchingservice.v1.RespondNexusTaskFailedRequest\x1aF.temporal.server.api.matchingservice.v1.RespondNexusTaskFailedResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xac\x01\n" + + "\x15CancelOutstandingPoll\x12D.temporal.server.api.matchingservice.v1.CancelOutstandingPollRequest\x1aE.temporal.server.api.matchingservice.v1.CancelOutstandingPollResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xc1\x01\n" + + "\x1cCancelOutstandingWorkerPolls\x12K.temporal.server.api.matchingservice.v1.CancelOutstandingWorkerPollsRequest\x1aL.temporal.server.api.matchingservice.v1.CancelOutstandingWorkerPollsResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa0\x01\n" + + "\x11DescribeTaskQueue\x12@.temporal.server.api.matchingservice.v1.DescribeTaskQueueRequest\x1aA.temporal.server.api.matchingservice.v1.DescribeTaskQueueResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbb\x01\n" + + "\x1aDescribeTaskQueuePartition\x12I.temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionRequest\x1aJ.temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbe\x01\n" + + "\x1bDescribeVersionedTaskQueues\x12J.temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest\x1aK.temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb2\x01\n" + + "\x17ListTaskQueuePartitions\x12F.temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsRequest\x1aG.temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xcd\x01\n" + + " UpdateWorkerBuildIdCompatibility\x12O.temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest\x1aP.temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xc4\x01\n" + + "\x1dGetWorkerBuildIdCompatibility\x12L.temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityRequest\x1aM.temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa9\x01\n" + + "\x14GetTaskQueueUserData\x12C.temporal.server.api.matchingservice.v1.GetTaskQueueUserDataRequest\x1aD.temporal.server.api.matchingservice.v1.GetTaskQueueUserDataResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbe\x01\n" + + "\x1bUpdateWorkerVersioningRules\x12J.temporal.server.api.matchingservice.v1.UpdateWorkerVersioningRulesRequest\x1aK.temporal.server.api.matchingservice.v1.UpdateWorkerVersioningRulesResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb5\x01\n" + + "\x18GetWorkerVersioningRules\x12G.temporal.server.api.matchingservice.v1.GetWorkerVersioningRulesRequest\x1aH.temporal.server.api.matchingservice.v1.GetWorkerVersioningRulesResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xaf\x01\n" + + "\x16SyncDeploymentUserData\x12E.temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest\x1aF.temporal.server.api.matchingservice.v1.SyncDeploymentUserDataResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xdf\x01\n" + + "&ApplyTaskQueueUserDataReplicationEvent\x12U.temporal.server.api.matchingservice.v1.ApplyTaskQueueUserDataReplicationEventRequest\x1aV.temporal.server.api.matchingservice.v1.ApplyTaskQueueUserDataReplicationEventResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbb\x01\n" + + "\x1aGetBuildIdTaskQueueMapping\x12I.temporal.server.api.matchingservice.v1.GetBuildIdTaskQueueMappingRequest\x1aJ.temporal.server.api.matchingservice.v1.GetBuildIdTaskQueueMappingResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbe\x01\n" + + "\x1bForceLoadTaskQueuePartition\x12J.temporal.server.api.matchingservice.v1.ForceLoadTaskQueuePartitionRequest\x1aK.temporal.server.api.matchingservice.v1.ForceLoadTaskQueuePartitionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa9\x01\n" + + "\x14ForceUnloadTaskQueue\x12C.temporal.server.api.matchingservice.v1.ForceUnloadTaskQueueRequest\x1aD.temporal.server.api.matchingservice.v1.ForceUnloadTaskQueueResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xc4\x01\n" + + "\x1dForceUnloadTaskQueuePartition\x12L.temporal.server.api.matchingservice.v1.ForceUnloadTaskQueuePartitionRequest\x1aM.temporal.server.api.matchingservice.v1.ForceUnloadTaskQueuePartitionResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xb2\x01\n" + + "\x17UpdateTaskQueueUserData\x12F.temporal.server.api.matchingservice.v1.UpdateTaskQueueUserDataRequest\x1aG.temporal.server.api.matchingservice.v1.UpdateTaskQueueUserDataResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xbb\x01\n" + + "\x1aReplicateTaskQueueUserData\x12I.temporal.server.api.matchingservice.v1.ReplicateTaskQueueUserDataRequest\x1aJ.temporal.server.api.matchingservice.v1.ReplicateTaskQueueUserDataResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xd0\x01\n" + + "!CheckTaskQueueUserDataPropagation\x12P.temporal.server.api.matchingservice.v1.CheckTaskQueueUserDataPropagationRequest\x1aQ.temporal.server.api.matchingservice.v1.CheckTaskQueueUserDataPropagationResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa6\x01\n" + + "\x13CreateNexusEndpoint\x12B.temporal.server.api.matchingservice.v1.CreateNexusEndpointRequest\x1aC.temporal.server.api.matchingservice.v1.CreateNexusEndpointResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa6\x01\n" + + "\x13UpdateNexusEndpoint\x12B.temporal.server.api.matchingservice.v1.UpdateNexusEndpointRequest\x1aC.temporal.server.api.matchingservice.v1.UpdateNexusEndpointResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa6\x01\n" + + "\x13DeleteNexusEndpoint\x12B.temporal.server.api.matchingservice.v1.DeleteNexusEndpointRequest\x1aC.temporal.server.api.matchingservice.v1.DeleteNexusEndpointResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa3\x01\n" + + "\x12ListNexusEndpoints\x12A.temporal.server.api.matchingservice.v1.ListNexusEndpointsRequest\x1aB.temporal.server.api.matchingservice.v1.ListNexusEndpointsResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xac\x01\n" + + "\x15RecordWorkerHeartbeat\x12D.temporal.server.api.matchingservice.v1.RecordWorkerHeartbeatRequest\x1aE.temporal.server.api.matchingservice.v1.RecordWorkerHeartbeatResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x8e\x01\n" + + "\vListWorkers\x12:.temporal.server.api.matchingservice.v1.ListWorkersRequest\x1a;.temporal.server.api.matchingservice.v1.ListWorkersResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xac\x01\n" + + "\x15UpdateTaskQueueConfig\x12D.temporal.server.api.matchingservice.v1.UpdateTaskQueueConfigRequest\x1aE.temporal.server.api.matchingservice.v1.UpdateTaskQueueConfigResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\x97\x01\n" + + "\x0eDescribeWorker\x12=.temporal.server.api.matchingservice.v1.DescribeWorkerRequest\x1a>.temporal.server.api.matchingservice.v1.DescribeWorkerResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xa6\x01\n" + + "\x13UpdateFairnessState\x12B.temporal.server.api.matchingservice.v1.UpdateFairnessStateRequest\x1aC.temporal.server.api.matchingservice.v1.UpdateFairnessStateResponse\"\x06\x8a\xb5\x18\x02\b\x01\x12\xca\x01\n" + + "\x1fCheckTaskQueueVersionMembership\x12N.temporal.server.api.matchingservice.v1.CheckTaskQueueVersionMembershipRequest\x1aO.temporal.server.api.matchingservice.v1.CheckTaskQueueVersionMembershipResponse\"\x06\x8a\xb5\x18\x02\b\x01B>Z temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueRequest @@ -400,47 +163,77 @@ var file_temporal_server_api_matchingservice_v1_service_proto_depIdxs = []int32{ 8, // 8: temporal.server.api.matchingservice.v1.MatchingService.RespondNexusTaskCompleted:input_type -> temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedRequest 9, // 9: temporal.server.api.matchingservice.v1.MatchingService.RespondNexusTaskFailed:input_type -> temporal.server.api.matchingservice.v1.RespondNexusTaskFailedRequest 10, // 10: temporal.server.api.matchingservice.v1.MatchingService.CancelOutstandingPoll:input_type -> temporal.server.api.matchingservice.v1.CancelOutstandingPollRequest - 11, // 11: temporal.server.api.matchingservice.v1.MatchingService.DescribeTaskQueue:input_type -> temporal.server.api.matchingservice.v1.DescribeTaskQueueRequest - 12, // 12: temporal.server.api.matchingservice.v1.MatchingService.ListTaskQueuePartitions:input_type -> temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsRequest - 13, // 13: temporal.server.api.matchingservice.v1.MatchingService.UpdateWorkerBuildIdCompatibility:input_type -> temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest - 14, // 14: temporal.server.api.matchingservice.v1.MatchingService.GetWorkerBuildIdCompatibility:input_type -> temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityRequest - 15, // 15: temporal.server.api.matchingservice.v1.MatchingService.GetTaskQueueUserData:input_type -> temporal.server.api.matchingservice.v1.GetTaskQueueUserDataRequest - 16, // 16: temporal.server.api.matchingservice.v1.MatchingService.ApplyTaskQueueUserDataReplicationEvent:input_type -> temporal.server.api.matchingservice.v1.ApplyTaskQueueUserDataReplicationEventRequest - 17, // 17: temporal.server.api.matchingservice.v1.MatchingService.GetBuildIdTaskQueueMapping:input_type -> temporal.server.api.matchingservice.v1.GetBuildIdTaskQueueMappingRequest - 18, // 18: temporal.server.api.matchingservice.v1.MatchingService.ForceUnloadTaskQueue:input_type -> temporal.server.api.matchingservice.v1.ForceUnloadTaskQueueRequest - 19, // 19: temporal.server.api.matchingservice.v1.MatchingService.UpdateTaskQueueUserData:input_type -> temporal.server.api.matchingservice.v1.UpdateTaskQueueUserDataRequest - 20, // 20: temporal.server.api.matchingservice.v1.MatchingService.ReplicateTaskQueueUserData:input_type -> temporal.server.api.matchingservice.v1.ReplicateTaskQueueUserDataRequest - 21, // 21: temporal.server.api.matchingservice.v1.MatchingService.CreateNexusIncomingService:input_type -> temporal.server.api.matchingservice.v1.CreateNexusIncomingServiceRequest - 22, // 22: temporal.server.api.matchingservice.v1.MatchingService.UpdateNexusIncomingService:input_type -> temporal.server.api.matchingservice.v1.UpdateNexusIncomingServiceRequest - 23, // 23: temporal.server.api.matchingservice.v1.MatchingService.DeleteNexusIncomingService:input_type -> temporal.server.api.matchingservice.v1.DeleteNexusIncomingServiceRequest - 24, // 24: temporal.server.api.matchingservice.v1.MatchingService.ListNexusIncomingServices:input_type -> temporal.server.api.matchingservice.v1.ListNexusIncomingServicesRequest - 25, // 25: temporal.server.api.matchingservice.v1.MatchingService.PollWorkflowTaskQueue:output_type -> temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse - 26, // 26: temporal.server.api.matchingservice.v1.MatchingService.PollActivityTaskQueue:output_type -> temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse - 27, // 27: temporal.server.api.matchingservice.v1.MatchingService.AddWorkflowTask:output_type -> temporal.server.api.matchingservice.v1.AddWorkflowTaskResponse - 28, // 28: temporal.server.api.matchingservice.v1.MatchingService.AddActivityTask:output_type -> temporal.server.api.matchingservice.v1.AddActivityTaskResponse - 29, // 29: temporal.server.api.matchingservice.v1.MatchingService.QueryWorkflow:output_type -> temporal.server.api.matchingservice.v1.QueryWorkflowResponse - 30, // 30: temporal.server.api.matchingservice.v1.MatchingService.RespondQueryTaskCompleted:output_type -> temporal.server.api.matchingservice.v1.RespondQueryTaskCompletedResponse - 31, // 31: temporal.server.api.matchingservice.v1.MatchingService.DispatchNexusTask:output_type -> temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse - 32, // 32: temporal.server.api.matchingservice.v1.MatchingService.PollNexusTaskQueue:output_type -> temporal.server.api.matchingservice.v1.PollNexusTaskQueueResponse - 33, // 33: temporal.server.api.matchingservice.v1.MatchingService.RespondNexusTaskCompleted:output_type -> temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedResponse - 34, // 34: temporal.server.api.matchingservice.v1.MatchingService.RespondNexusTaskFailed:output_type -> temporal.server.api.matchingservice.v1.RespondNexusTaskFailedResponse - 35, // 35: temporal.server.api.matchingservice.v1.MatchingService.CancelOutstandingPoll:output_type -> temporal.server.api.matchingservice.v1.CancelOutstandingPollResponse - 36, // 36: temporal.server.api.matchingservice.v1.MatchingService.DescribeTaskQueue:output_type -> temporal.server.api.matchingservice.v1.DescribeTaskQueueResponse - 37, // 37: temporal.server.api.matchingservice.v1.MatchingService.ListTaskQueuePartitions:output_type -> temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsResponse - 38, // 38: temporal.server.api.matchingservice.v1.MatchingService.UpdateWorkerBuildIdCompatibility:output_type -> temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityResponse - 39, // 39: temporal.server.api.matchingservice.v1.MatchingService.GetWorkerBuildIdCompatibility:output_type -> temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityResponse - 40, // 40: temporal.server.api.matchingservice.v1.MatchingService.GetTaskQueueUserData:output_type -> temporal.server.api.matchingservice.v1.GetTaskQueueUserDataResponse - 41, // 41: temporal.server.api.matchingservice.v1.MatchingService.ApplyTaskQueueUserDataReplicationEvent:output_type -> temporal.server.api.matchingservice.v1.ApplyTaskQueueUserDataReplicationEventResponse - 42, // 42: temporal.server.api.matchingservice.v1.MatchingService.GetBuildIdTaskQueueMapping:output_type -> temporal.server.api.matchingservice.v1.GetBuildIdTaskQueueMappingResponse - 43, // 43: temporal.server.api.matchingservice.v1.MatchingService.ForceUnloadTaskQueue:output_type -> temporal.server.api.matchingservice.v1.ForceUnloadTaskQueueResponse - 44, // 44: temporal.server.api.matchingservice.v1.MatchingService.UpdateTaskQueueUserData:output_type -> temporal.server.api.matchingservice.v1.UpdateTaskQueueUserDataResponse - 45, // 45: temporal.server.api.matchingservice.v1.MatchingService.ReplicateTaskQueueUserData:output_type -> temporal.server.api.matchingservice.v1.ReplicateTaskQueueUserDataResponse - 46, // 46: temporal.server.api.matchingservice.v1.MatchingService.CreateNexusIncomingService:output_type -> temporal.server.api.matchingservice.v1.CreateNexusIncomingServiceResponse - 47, // 47: temporal.server.api.matchingservice.v1.MatchingService.UpdateNexusIncomingService:output_type -> temporal.server.api.matchingservice.v1.UpdateNexusIncomingServiceResponse - 48, // 48: temporal.server.api.matchingservice.v1.MatchingService.DeleteNexusIncomingService:output_type -> temporal.server.api.matchingservice.v1.DeleteNexusIncomingServiceResponse - 49, // 49: temporal.server.api.matchingservice.v1.MatchingService.ListNexusIncomingServices:output_type -> temporal.server.api.matchingservice.v1.ListNexusIncomingServicesResponse - 25, // [25:50] is the sub-list for method output_type - 0, // [0:25] is the sub-list for method input_type + 11, // 11: temporal.server.api.matchingservice.v1.MatchingService.CancelOutstandingWorkerPolls:input_type -> temporal.server.api.matchingservice.v1.CancelOutstandingWorkerPollsRequest + 12, // 12: temporal.server.api.matchingservice.v1.MatchingService.DescribeTaskQueue:input_type -> temporal.server.api.matchingservice.v1.DescribeTaskQueueRequest + 13, // 13: temporal.server.api.matchingservice.v1.MatchingService.DescribeTaskQueuePartition:input_type -> temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionRequest + 14, // 14: temporal.server.api.matchingservice.v1.MatchingService.DescribeVersionedTaskQueues:input_type -> temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesRequest + 15, // 15: temporal.server.api.matchingservice.v1.MatchingService.ListTaskQueuePartitions:input_type -> temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsRequest + 16, // 16: temporal.server.api.matchingservice.v1.MatchingService.UpdateWorkerBuildIdCompatibility:input_type -> temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityRequest + 17, // 17: temporal.server.api.matchingservice.v1.MatchingService.GetWorkerBuildIdCompatibility:input_type -> temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityRequest + 18, // 18: temporal.server.api.matchingservice.v1.MatchingService.GetTaskQueueUserData:input_type -> temporal.server.api.matchingservice.v1.GetTaskQueueUserDataRequest + 19, // 19: temporal.server.api.matchingservice.v1.MatchingService.UpdateWorkerVersioningRules:input_type -> temporal.server.api.matchingservice.v1.UpdateWorkerVersioningRulesRequest + 20, // 20: temporal.server.api.matchingservice.v1.MatchingService.GetWorkerVersioningRules:input_type -> temporal.server.api.matchingservice.v1.GetWorkerVersioningRulesRequest + 21, // 21: temporal.server.api.matchingservice.v1.MatchingService.SyncDeploymentUserData:input_type -> temporal.server.api.matchingservice.v1.SyncDeploymentUserDataRequest + 22, // 22: temporal.server.api.matchingservice.v1.MatchingService.ApplyTaskQueueUserDataReplicationEvent:input_type -> temporal.server.api.matchingservice.v1.ApplyTaskQueueUserDataReplicationEventRequest + 23, // 23: temporal.server.api.matchingservice.v1.MatchingService.GetBuildIdTaskQueueMapping:input_type -> temporal.server.api.matchingservice.v1.GetBuildIdTaskQueueMappingRequest + 24, // 24: temporal.server.api.matchingservice.v1.MatchingService.ForceLoadTaskQueuePartition:input_type -> temporal.server.api.matchingservice.v1.ForceLoadTaskQueuePartitionRequest + 25, // 25: temporal.server.api.matchingservice.v1.MatchingService.ForceUnloadTaskQueue:input_type -> temporal.server.api.matchingservice.v1.ForceUnloadTaskQueueRequest + 26, // 26: temporal.server.api.matchingservice.v1.MatchingService.ForceUnloadTaskQueuePartition:input_type -> temporal.server.api.matchingservice.v1.ForceUnloadTaskQueuePartitionRequest + 27, // 27: temporal.server.api.matchingservice.v1.MatchingService.UpdateTaskQueueUserData:input_type -> temporal.server.api.matchingservice.v1.UpdateTaskQueueUserDataRequest + 28, // 28: temporal.server.api.matchingservice.v1.MatchingService.ReplicateTaskQueueUserData:input_type -> temporal.server.api.matchingservice.v1.ReplicateTaskQueueUserDataRequest + 29, // 29: temporal.server.api.matchingservice.v1.MatchingService.CheckTaskQueueUserDataPropagation:input_type -> temporal.server.api.matchingservice.v1.CheckTaskQueueUserDataPropagationRequest + 30, // 30: temporal.server.api.matchingservice.v1.MatchingService.CreateNexusEndpoint:input_type -> temporal.server.api.matchingservice.v1.CreateNexusEndpointRequest + 31, // 31: temporal.server.api.matchingservice.v1.MatchingService.UpdateNexusEndpoint:input_type -> temporal.server.api.matchingservice.v1.UpdateNexusEndpointRequest + 32, // 32: temporal.server.api.matchingservice.v1.MatchingService.DeleteNexusEndpoint:input_type -> temporal.server.api.matchingservice.v1.DeleteNexusEndpointRequest + 33, // 33: temporal.server.api.matchingservice.v1.MatchingService.ListNexusEndpoints:input_type -> temporal.server.api.matchingservice.v1.ListNexusEndpointsRequest + 34, // 34: temporal.server.api.matchingservice.v1.MatchingService.RecordWorkerHeartbeat:input_type -> temporal.server.api.matchingservice.v1.RecordWorkerHeartbeatRequest + 35, // 35: temporal.server.api.matchingservice.v1.MatchingService.ListWorkers:input_type -> temporal.server.api.matchingservice.v1.ListWorkersRequest + 36, // 36: temporal.server.api.matchingservice.v1.MatchingService.UpdateTaskQueueConfig:input_type -> temporal.server.api.matchingservice.v1.UpdateTaskQueueConfigRequest + 37, // 37: temporal.server.api.matchingservice.v1.MatchingService.DescribeWorker:input_type -> temporal.server.api.matchingservice.v1.DescribeWorkerRequest + 38, // 38: temporal.server.api.matchingservice.v1.MatchingService.UpdateFairnessState:input_type -> temporal.server.api.matchingservice.v1.UpdateFairnessStateRequest + 39, // 39: temporal.server.api.matchingservice.v1.MatchingService.CheckTaskQueueVersionMembership:input_type -> temporal.server.api.matchingservice.v1.CheckTaskQueueVersionMembershipRequest + 40, // 40: temporal.server.api.matchingservice.v1.MatchingService.PollWorkflowTaskQueue:output_type -> temporal.server.api.matchingservice.v1.PollWorkflowTaskQueueResponse + 41, // 41: temporal.server.api.matchingservice.v1.MatchingService.PollActivityTaskQueue:output_type -> temporal.server.api.matchingservice.v1.PollActivityTaskQueueResponse + 42, // 42: temporal.server.api.matchingservice.v1.MatchingService.AddWorkflowTask:output_type -> temporal.server.api.matchingservice.v1.AddWorkflowTaskResponse + 43, // 43: temporal.server.api.matchingservice.v1.MatchingService.AddActivityTask:output_type -> temporal.server.api.matchingservice.v1.AddActivityTaskResponse + 44, // 44: temporal.server.api.matchingservice.v1.MatchingService.QueryWorkflow:output_type -> temporal.server.api.matchingservice.v1.QueryWorkflowResponse + 45, // 45: temporal.server.api.matchingservice.v1.MatchingService.RespondQueryTaskCompleted:output_type -> temporal.server.api.matchingservice.v1.RespondQueryTaskCompletedResponse + 46, // 46: temporal.server.api.matchingservice.v1.MatchingService.DispatchNexusTask:output_type -> temporal.server.api.matchingservice.v1.DispatchNexusTaskResponse + 47, // 47: temporal.server.api.matchingservice.v1.MatchingService.PollNexusTaskQueue:output_type -> temporal.server.api.matchingservice.v1.PollNexusTaskQueueResponse + 48, // 48: temporal.server.api.matchingservice.v1.MatchingService.RespondNexusTaskCompleted:output_type -> temporal.server.api.matchingservice.v1.RespondNexusTaskCompletedResponse + 49, // 49: temporal.server.api.matchingservice.v1.MatchingService.RespondNexusTaskFailed:output_type -> temporal.server.api.matchingservice.v1.RespondNexusTaskFailedResponse + 50, // 50: temporal.server.api.matchingservice.v1.MatchingService.CancelOutstandingPoll:output_type -> temporal.server.api.matchingservice.v1.CancelOutstandingPollResponse + 51, // 51: temporal.server.api.matchingservice.v1.MatchingService.CancelOutstandingWorkerPolls:output_type -> temporal.server.api.matchingservice.v1.CancelOutstandingWorkerPollsResponse + 52, // 52: temporal.server.api.matchingservice.v1.MatchingService.DescribeTaskQueue:output_type -> temporal.server.api.matchingservice.v1.DescribeTaskQueueResponse + 53, // 53: temporal.server.api.matchingservice.v1.MatchingService.DescribeTaskQueuePartition:output_type -> temporal.server.api.matchingservice.v1.DescribeTaskQueuePartitionResponse + 54, // 54: temporal.server.api.matchingservice.v1.MatchingService.DescribeVersionedTaskQueues:output_type -> temporal.server.api.matchingservice.v1.DescribeVersionedTaskQueuesResponse + 55, // 55: temporal.server.api.matchingservice.v1.MatchingService.ListTaskQueuePartitions:output_type -> temporal.server.api.matchingservice.v1.ListTaskQueuePartitionsResponse + 56, // 56: temporal.server.api.matchingservice.v1.MatchingService.UpdateWorkerBuildIdCompatibility:output_type -> temporal.server.api.matchingservice.v1.UpdateWorkerBuildIdCompatibilityResponse + 57, // 57: temporal.server.api.matchingservice.v1.MatchingService.GetWorkerBuildIdCompatibility:output_type -> temporal.server.api.matchingservice.v1.GetWorkerBuildIdCompatibilityResponse + 58, // 58: temporal.server.api.matchingservice.v1.MatchingService.GetTaskQueueUserData:output_type -> temporal.server.api.matchingservice.v1.GetTaskQueueUserDataResponse + 59, // 59: temporal.server.api.matchingservice.v1.MatchingService.UpdateWorkerVersioningRules:output_type -> temporal.server.api.matchingservice.v1.UpdateWorkerVersioningRulesResponse + 60, // 60: temporal.server.api.matchingservice.v1.MatchingService.GetWorkerVersioningRules:output_type -> temporal.server.api.matchingservice.v1.GetWorkerVersioningRulesResponse + 61, // 61: temporal.server.api.matchingservice.v1.MatchingService.SyncDeploymentUserData:output_type -> temporal.server.api.matchingservice.v1.SyncDeploymentUserDataResponse + 62, // 62: temporal.server.api.matchingservice.v1.MatchingService.ApplyTaskQueueUserDataReplicationEvent:output_type -> temporal.server.api.matchingservice.v1.ApplyTaskQueueUserDataReplicationEventResponse + 63, // 63: temporal.server.api.matchingservice.v1.MatchingService.GetBuildIdTaskQueueMapping:output_type -> temporal.server.api.matchingservice.v1.GetBuildIdTaskQueueMappingResponse + 64, // 64: temporal.server.api.matchingservice.v1.MatchingService.ForceLoadTaskQueuePartition:output_type -> temporal.server.api.matchingservice.v1.ForceLoadTaskQueuePartitionResponse + 65, // 65: temporal.server.api.matchingservice.v1.MatchingService.ForceUnloadTaskQueue:output_type -> temporal.server.api.matchingservice.v1.ForceUnloadTaskQueueResponse + 66, // 66: temporal.server.api.matchingservice.v1.MatchingService.ForceUnloadTaskQueuePartition:output_type -> temporal.server.api.matchingservice.v1.ForceUnloadTaskQueuePartitionResponse + 67, // 67: temporal.server.api.matchingservice.v1.MatchingService.UpdateTaskQueueUserData:output_type -> temporal.server.api.matchingservice.v1.UpdateTaskQueueUserDataResponse + 68, // 68: temporal.server.api.matchingservice.v1.MatchingService.ReplicateTaskQueueUserData:output_type -> temporal.server.api.matchingservice.v1.ReplicateTaskQueueUserDataResponse + 69, // 69: temporal.server.api.matchingservice.v1.MatchingService.CheckTaskQueueUserDataPropagation:output_type -> temporal.server.api.matchingservice.v1.CheckTaskQueueUserDataPropagationResponse + 70, // 70: temporal.server.api.matchingservice.v1.MatchingService.CreateNexusEndpoint:output_type -> temporal.server.api.matchingservice.v1.CreateNexusEndpointResponse + 71, // 71: temporal.server.api.matchingservice.v1.MatchingService.UpdateNexusEndpoint:output_type -> temporal.server.api.matchingservice.v1.UpdateNexusEndpointResponse + 72, // 72: temporal.server.api.matchingservice.v1.MatchingService.DeleteNexusEndpoint:output_type -> temporal.server.api.matchingservice.v1.DeleteNexusEndpointResponse + 73, // 73: temporal.server.api.matchingservice.v1.MatchingService.ListNexusEndpoints:output_type -> temporal.server.api.matchingservice.v1.ListNexusEndpointsResponse + 74, // 74: temporal.server.api.matchingservice.v1.MatchingService.RecordWorkerHeartbeat:output_type -> temporal.server.api.matchingservice.v1.RecordWorkerHeartbeatResponse + 75, // 75: temporal.server.api.matchingservice.v1.MatchingService.ListWorkers:output_type -> temporal.server.api.matchingservice.v1.ListWorkersResponse + 76, // 76: temporal.server.api.matchingservice.v1.MatchingService.UpdateTaskQueueConfig:output_type -> temporal.server.api.matchingservice.v1.UpdateTaskQueueConfigResponse + 77, // 77: temporal.server.api.matchingservice.v1.MatchingService.DescribeWorker:output_type -> temporal.server.api.matchingservice.v1.DescribeWorkerResponse + 78, // 78: temporal.server.api.matchingservice.v1.MatchingService.UpdateFairnessState:output_type -> temporal.server.api.matchingservice.v1.UpdateFairnessStateResponse + 79, // 79: temporal.server.api.matchingservice.v1.MatchingService.CheckTaskQueueVersionMembership:output_type -> temporal.server.api.matchingservice.v1.CheckTaskQueueVersionMembershipResponse + 40, // [40:80] is the sub-list for method output_type + 0, // [0:40] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -456,7 +249,7 @@ func file_temporal_server_api_matchingservice_v1_service_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_matchingservice_v1_service_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_matchingservice_v1_service_proto_rawDesc), len(file_temporal_server_api_matchingservice_v1_service_proto_rawDesc)), NumEnums: 0, NumMessages: 0, NumExtensions: 0, @@ -466,7 +259,6 @@ func file_temporal_server_api_matchingservice_v1_service_proto_init() { DependencyIndexes: file_temporal_server_api_matchingservice_v1_service_proto_depIdxs, }.Build() File_temporal_server_api_matchingservice_v1_service_proto = out.File - file_temporal_server_api_matchingservice_v1_service_proto_rawDesc = nil file_temporal_server_api_matchingservice_v1_service_proto_goTypes = nil file_temporal_server_api_matchingservice_v1_service_proto_depIdxs = nil } diff --git a/api/matchingservice/v1/service_grpc.pb.go b/api/matchingservice/v1/service_grpc.pb.go index b476b7e48a3..a04f9dc4e47 100644 --- a/api/matchingservice/v1/service_grpc.pb.go +++ b/api/matchingservice/v1/service_grpc.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // plugins: // - protoc-gen-go-grpc @@ -53,20 +31,35 @@ const ( MatchingService_RespondNexusTaskCompleted_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/RespondNexusTaskCompleted" MatchingService_RespondNexusTaskFailed_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/RespondNexusTaskFailed" MatchingService_CancelOutstandingPoll_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/CancelOutstandingPoll" + MatchingService_CancelOutstandingWorkerPolls_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/CancelOutstandingWorkerPolls" MatchingService_DescribeTaskQueue_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/DescribeTaskQueue" + MatchingService_DescribeTaskQueuePartition_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/DescribeTaskQueuePartition" + MatchingService_DescribeVersionedTaskQueues_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/DescribeVersionedTaskQueues" MatchingService_ListTaskQueuePartitions_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ListTaskQueuePartitions" MatchingService_UpdateWorkerBuildIdCompatibility_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/UpdateWorkerBuildIdCompatibility" MatchingService_GetWorkerBuildIdCompatibility_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/GetWorkerBuildIdCompatibility" MatchingService_GetTaskQueueUserData_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/GetTaskQueueUserData" + MatchingService_UpdateWorkerVersioningRules_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/UpdateWorkerVersioningRules" + MatchingService_GetWorkerVersioningRules_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/GetWorkerVersioningRules" + MatchingService_SyncDeploymentUserData_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/SyncDeploymentUserData" MatchingService_ApplyTaskQueueUserDataReplicationEvent_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ApplyTaskQueueUserDataReplicationEvent" MatchingService_GetBuildIdTaskQueueMapping_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/GetBuildIdTaskQueueMapping" + MatchingService_ForceLoadTaskQueuePartition_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ForceLoadTaskQueuePartition" MatchingService_ForceUnloadTaskQueue_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ForceUnloadTaskQueue" + MatchingService_ForceUnloadTaskQueuePartition_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ForceUnloadTaskQueuePartition" MatchingService_UpdateTaskQueueUserData_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/UpdateTaskQueueUserData" MatchingService_ReplicateTaskQueueUserData_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ReplicateTaskQueueUserData" - MatchingService_CreateNexusIncomingService_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/CreateNexusIncomingService" - MatchingService_UpdateNexusIncomingService_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/UpdateNexusIncomingService" - MatchingService_DeleteNexusIncomingService_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/DeleteNexusIncomingService" - MatchingService_ListNexusIncomingServices_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ListNexusIncomingServices" + MatchingService_CheckTaskQueueUserDataPropagation_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/CheckTaskQueueUserDataPropagation" + MatchingService_CreateNexusEndpoint_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/CreateNexusEndpoint" + MatchingService_UpdateNexusEndpoint_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/UpdateNexusEndpoint" + MatchingService_DeleteNexusEndpoint_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/DeleteNexusEndpoint" + MatchingService_ListNexusEndpoints_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ListNexusEndpoints" + MatchingService_RecordWorkerHeartbeat_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/RecordWorkerHeartbeat" + MatchingService_ListWorkers_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/ListWorkers" + MatchingService_UpdateTaskQueueConfig_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/UpdateTaskQueueConfig" + MatchingService_DescribeWorker_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/DescribeWorker" + MatchingService_UpdateFairnessState_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/UpdateFairnessState" + MatchingService_CheckTaskQueueVersionMembership_FullMethodName = "/temporal.server.api.matchingservice.v1.MatchingService/CheckTaskQueueVersionMembership" ) // MatchingServiceClient is the client API for MatchingService service. @@ -105,9 +98,20 @@ type MatchingServiceClient interface { // api call to matching it passes in a pollerId and then calls this API when it detects client connection is closed // to unblock long polls for this poller and prevent tasks being sent to these zombie pollers. CancelOutstandingPoll(ctx context.Context, in *CancelOutstandingPollRequest, opts ...grpc.CallOption) (*CancelOutstandingPollResponse, error) + // CancelOutstandingWorkerPolls cancels any outstanding polls for a given worker instance key. + // These polls could be waiting on different partitions of the task queue. + // This is called during worker shutdown to eagerly cancel polls and avoid giving out tasks to workers that are shutting down. + // Note: This only cancels polls that are currently outstanding. The caller must ensure no new polls + // are issued after calling this RPC, otherwise those polls will not be cancelled. + CancelOutstandingWorkerPolls(ctx context.Context, in *CancelOutstandingWorkerPollsRequest, opts ...grpc.CallOption) (*CancelOutstandingWorkerPollsResponse, error) // DescribeTaskQueue returns information about the target task queue, right now this API returns the // pollers which polled this task queue in last few minutes. DescribeTaskQueue(ctx context.Context, in *DescribeTaskQueueRequest, opts ...grpc.CallOption) (*DescribeTaskQueueResponse, error) + // DescribeTaskQueuePartition returns information about the target task queue partition. + DescribeTaskQueuePartition(ctx context.Context, in *DescribeTaskQueuePartitionRequest, opts ...grpc.CallOption) (*DescribeTaskQueuePartitionResponse, error) + // DescribeVersionedTaskQueues returns details about the requested versioned task queues. + // It is an internal API; there is no direct user-facing equivalent. + DescribeVersionedTaskQueues(ctx context.Context, in *DescribeVersionedTaskQueuesRequest, opts ...grpc.CallOption) (*DescribeVersionedTaskQueuesResponse, error) // ListTaskQueuePartitions returns a map of partitionKey and hostAddress for a task queue. ListTaskQueuePartitions(ctx context.Context, in *ListTaskQueuePartitionsRequest, opts ...grpc.CallOption) (*ListTaskQueuePartitionsResponse, error) // (-- api-linter: core::0134::response-message-name=disabled @@ -121,12 +125,40 @@ type MatchingServiceClient interface { GetWorkerBuildIdCompatibility(ctx context.Context, in *GetWorkerBuildIdCompatibilityRequest, opts ...grpc.CallOption) (*GetWorkerBuildIdCompatibilityResponse, error) // Fetch user data for a task queue, this request should always be routed to the node holding the root partition of the workflow task queue. GetTaskQueueUserData(ctx context.Context, in *GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*GetTaskQueueUserDataResponse, error) + // Allows updating the Build ID assignment and redirect rules for a given Task Queue. + // (-- api-linter: core::0134::method-signature=disabled + // + // aip.dev/not-precedent: UpdateWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::response-message-name=disabled + // + // aip.dev/not-precedent: UpdateWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) + UpdateWorkerVersioningRules(ctx context.Context, in *UpdateWorkerVersioningRulesRequest, opts ...grpc.CallOption) (*UpdateWorkerVersioningRulesResponse, error) + // Fetches the Build ID assignment and redirect rules for a Task Queue + // (-- api-linter: core::0127::resource-name-extraction=disabled + // + // aip.dev/not-precedent: GetWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0131::http-uri-name=disabled + // + // aip.dev/not-precedent: GetWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) + GetWorkerVersioningRules(ctx context.Context, in *GetWorkerVersioningRulesRequest, opts ...grpc.CallOption) (*GetWorkerVersioningRulesResponse, error) + // This request should always be routed to the node holding the root partition of the workflow task queue. + SyncDeploymentUserData(ctx context.Context, in *SyncDeploymentUserDataRequest, opts ...grpc.CallOption) (*SyncDeploymentUserDataResponse, error) // Apply a user data replication event. ApplyTaskQueueUserDataReplicationEvent(ctx context.Context, in *ApplyTaskQueueUserDataReplicationEventRequest, opts ...grpc.CallOption) (*ApplyTaskQueueUserDataReplicationEventResponse, error) // Gets all task queue names mapped to a given build ID GetBuildIdTaskQueueMapping(ctx context.Context, in *GetBuildIdTaskQueueMappingRequest, opts ...grpc.CallOption) (*GetBuildIdTaskQueueMappingResponse, error) - // Force unloading a task queue. Used for testing only. + // Force loading a task queue partition. Used by matching node owning root partition. + // When root partition is loaded this is called for all child partitions. + // This addresses the posibility of unloaded child partitions having backlog, + // but not being forwarded/synced to the root partition to find the polling + // worker which triggered the root partition being loaded in the first place. + ForceLoadTaskQueuePartition(ctx context.Context, in *ForceLoadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*ForceLoadTaskQueuePartitionResponse, error) + // TODO Shivam - remove this in 123. Present for backwards compatibility. ForceUnloadTaskQueue(ctx context.Context, in *ForceUnloadTaskQueueRequest, opts ...grpc.CallOption) (*ForceUnloadTaskQueueResponse, error) + // Force unloading a task queue partition. + ForceUnloadTaskQueuePartition(ctx context.Context, in *ForceUnloadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*ForceUnloadTaskQueuePartitionResponse, error) // Update task queue user data in owning node for all updates in namespace. // All user data updates must first go through the task queue owner using the `UpdateWorkerBuildIdCompatibility` // API. @@ -140,38 +172,80 @@ type MatchingServiceClient interface { UpdateTaskQueueUserData(ctx context.Context, in *UpdateTaskQueueUserDataRequest, opts ...grpc.CallOption) (*UpdateTaskQueueUserDataResponse, error) // Replicate task queue user data across clusters, must be done via the owning node for updates in namespace. ReplicateTaskQueueUserData(ctx context.Context, in *ReplicateTaskQueueUserDataRequest, opts ...grpc.CallOption) (*ReplicateTaskQueueUserDataResponse, error) - // Optimistically create or update a Nexus incoming service based on provided version. - // Set version to 0 to create a new service. - // If this request is accepted, the input is considered the "current" state of this service at the time it was - // persisted and the updated version is returned. + // Blocks on user data propagation to all loaded partitions. If successful, all loaded + // workflow + activity partitions have the requested version or higher. + // Routed to user data owner (root partition of workflow task queue). + CheckTaskQueueUserDataPropagation(ctx context.Context, in *CheckTaskQueueUserDataPropagationRequest, opts ...grpc.CallOption) (*CheckTaskQueueUserDataPropagationResponse, error) + // Create a Nexus endpoint. // (-- api-linter: core::0133::method-signature=disabled // - // aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) // // (-- api-linter: core::0133::response-message-name=disabled // - // aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) // // (-- api-linter: core::0133::http-uri-parent=disabled // - // aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) - CreateNexusIncomingService(ctx context.Context, in *CreateNexusIncomingServiceRequest, opts ...grpc.CallOption) (*CreateNexusIncomingServiceResponse, error) + // aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) + CreateNexusEndpoint(ctx context.Context, in *CreateNexusEndpointRequest, opts ...grpc.CallOption) (*CreateNexusEndpointResponse, error) + // Optimistically update a Nexus endpoint based on provided version. + // If this request is accepted, the input is considered the "current" state of this service at the time it was + // persisted and the updated version is returned. // (-- api-linter: core::0134::method-signature=disabled // - // aip.dev/not-precedent: UpdateNexusIncomingService RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateNexusEndpoint RPC doesn't follow Google API format. --) // // (-- api-linter: core::0134::response-message-name=disabled // - // aip.dev/not-precedent: UpdateNexusIncomingService RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateNexusEndpoint RPC doesn't follow Google API format. --) // // (-- api-linter: core::0134::request-resource-required=disabled // - // aip.dev/not-precedent: UpdateNexusIncomingService RPC doesn't follow Google API format. --) - UpdateNexusIncomingService(ctx context.Context, in *UpdateNexusIncomingServiceRequest, opts ...grpc.CallOption) (*UpdateNexusIncomingServiceResponse, error) + // aip.dev/not-precedent: UpdateNexusEndpoint RPC doesn't follow Google API format. --) + UpdateNexusEndpoint(ctx context.Context, in *UpdateNexusEndpointRequest, opts ...grpc.CallOption) (*UpdateNexusEndpointResponse, error) // Delete a service by its name. - DeleteNexusIncomingService(ctx context.Context, in *DeleteNexusIncomingServiceRequest, opts ...grpc.CallOption) (*DeleteNexusIncomingServiceResponse, error) + DeleteNexusEndpoint(ctx context.Context, in *DeleteNexusEndpointRequest, opts ...grpc.CallOption) (*DeleteNexusEndpointResponse, error) // List all registered services. - ListNexusIncomingServices(ctx context.Context, in *ListNexusIncomingServicesRequest, opts ...grpc.CallOption) (*ListNexusIncomingServicesResponse, error) + ListNexusEndpoints(ctx context.Context, in *ListNexusEndpointsRequest, opts ...grpc.CallOption) (*ListNexusEndpointsResponse, error) + // RecordWorkerHeartbeat receive heartbeat request from the worker. + RecordWorkerHeartbeat(ctx context.Context, in *RecordWorkerHeartbeatRequest, opts ...grpc.CallOption) (*RecordWorkerHeartbeatResponse, error) + // ListWorkers retrieves a list of workers in the specified namespace that match the provided filters. + // Supports pagination for large result sets. Returns an empty list if no workers match the criteria. + // Returns an error if the namespace doesn't exist. + ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) + // Set the persisted task queue configuration. + // (-- api-linter: core::0134::method-signature=disabled + // + // aip.dev/not-precedent: UpdateTaskQueueConfig RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::response-message-name=disabled + // + // aip.dev/not-precedent: UpdateTaskQueueConfig RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::request-resource-required=disabled + // + // aip.dev/not-precedent: UpdateTaskQueueConfig RPC doesn't follow Google API format. --) + UpdateTaskQueueConfig(ctx context.Context, in *UpdateTaskQueueConfigRequest, opts ...grpc.CallOption) (*UpdateTaskQueueConfigResponse, error) + // DescribeWorker retrieves a worker information in the specified namespace that match the provided instance key. + // Returns an error if the namespace or worker doesn't exist. + DescribeWorker(ctx context.Context, in *DescribeWorkerRequest, opts ...grpc.CallOption) (*DescribeWorkerResponse, error) + // UpdateFairnessState changes the fairness_state stored in UserData for automatically enabling + // priority and fairness. + // (-- api-linter: core::0134::method-signature=disabled + // + // aip.dev/not-precedent: UpdateFairnessState RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::response-message-name=disabled + // + // aip.dev/not-precedent: UpdateFairnessState RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::request-resource-required=disabled + // + // aip.dev/not-precedent: UpdateFairnessState RPC doesn't follow Google API format. --) + UpdateFairnessState(ctx context.Context, in *UpdateFairnessStateRequest, opts ...grpc.CallOption) (*UpdateFairnessStateResponse, error) + // CheckTaskQueueVersionMembership checks if a task queue is part of a specific deployment version. + CheckTaskQueueVersionMembership(ctx context.Context, in *CheckTaskQueueVersionMembershipRequest, opts ...grpc.CallOption) (*CheckTaskQueueVersionMembershipResponse, error) } type matchingServiceClient struct { @@ -281,6 +355,15 @@ func (c *matchingServiceClient) CancelOutstandingPoll(ctx context.Context, in *C return out, nil } +func (c *matchingServiceClient) CancelOutstandingWorkerPolls(ctx context.Context, in *CancelOutstandingWorkerPollsRequest, opts ...grpc.CallOption) (*CancelOutstandingWorkerPollsResponse, error) { + out := new(CancelOutstandingWorkerPollsResponse) + err := c.cc.Invoke(ctx, MatchingService_CancelOutstandingWorkerPolls_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *matchingServiceClient) DescribeTaskQueue(ctx context.Context, in *DescribeTaskQueueRequest, opts ...grpc.CallOption) (*DescribeTaskQueueResponse, error) { out := new(DescribeTaskQueueResponse) err := c.cc.Invoke(ctx, MatchingService_DescribeTaskQueue_FullMethodName, in, out, opts...) @@ -290,6 +373,24 @@ func (c *matchingServiceClient) DescribeTaskQueue(ctx context.Context, in *Descr return out, nil } +func (c *matchingServiceClient) DescribeTaskQueuePartition(ctx context.Context, in *DescribeTaskQueuePartitionRequest, opts ...grpc.CallOption) (*DescribeTaskQueuePartitionResponse, error) { + out := new(DescribeTaskQueuePartitionResponse) + err := c.cc.Invoke(ctx, MatchingService_DescribeTaskQueuePartition_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) DescribeVersionedTaskQueues(ctx context.Context, in *DescribeVersionedTaskQueuesRequest, opts ...grpc.CallOption) (*DescribeVersionedTaskQueuesResponse, error) { + out := new(DescribeVersionedTaskQueuesResponse) + err := c.cc.Invoke(ctx, MatchingService_DescribeVersionedTaskQueues_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *matchingServiceClient) ListTaskQueuePartitions(ctx context.Context, in *ListTaskQueuePartitionsRequest, opts ...grpc.CallOption) (*ListTaskQueuePartitionsResponse, error) { out := new(ListTaskQueuePartitionsResponse) err := c.cc.Invoke(ctx, MatchingService_ListTaskQueuePartitions_FullMethodName, in, out, opts...) @@ -326,6 +427,33 @@ func (c *matchingServiceClient) GetTaskQueueUserData(ctx context.Context, in *Ge return out, nil } +func (c *matchingServiceClient) UpdateWorkerVersioningRules(ctx context.Context, in *UpdateWorkerVersioningRulesRequest, opts ...grpc.CallOption) (*UpdateWorkerVersioningRulesResponse, error) { + out := new(UpdateWorkerVersioningRulesResponse) + err := c.cc.Invoke(ctx, MatchingService_UpdateWorkerVersioningRules_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) GetWorkerVersioningRules(ctx context.Context, in *GetWorkerVersioningRulesRequest, opts ...grpc.CallOption) (*GetWorkerVersioningRulesResponse, error) { + out := new(GetWorkerVersioningRulesResponse) + err := c.cc.Invoke(ctx, MatchingService_GetWorkerVersioningRules_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) SyncDeploymentUserData(ctx context.Context, in *SyncDeploymentUserDataRequest, opts ...grpc.CallOption) (*SyncDeploymentUserDataResponse, error) { + out := new(SyncDeploymentUserDataResponse) + err := c.cc.Invoke(ctx, MatchingService_SyncDeploymentUserData_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *matchingServiceClient) ApplyTaskQueueUserDataReplicationEvent(ctx context.Context, in *ApplyTaskQueueUserDataReplicationEventRequest, opts ...grpc.CallOption) (*ApplyTaskQueueUserDataReplicationEventResponse, error) { out := new(ApplyTaskQueueUserDataReplicationEventResponse) err := c.cc.Invoke(ctx, MatchingService_ApplyTaskQueueUserDataReplicationEvent_FullMethodName, in, out, opts...) @@ -344,6 +472,15 @@ func (c *matchingServiceClient) GetBuildIdTaskQueueMapping(ctx context.Context, return out, nil } +func (c *matchingServiceClient) ForceLoadTaskQueuePartition(ctx context.Context, in *ForceLoadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*ForceLoadTaskQueuePartitionResponse, error) { + out := new(ForceLoadTaskQueuePartitionResponse) + err := c.cc.Invoke(ctx, MatchingService_ForceLoadTaskQueuePartition_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *matchingServiceClient) ForceUnloadTaskQueue(ctx context.Context, in *ForceUnloadTaskQueueRequest, opts ...grpc.CallOption) (*ForceUnloadTaskQueueResponse, error) { out := new(ForceUnloadTaskQueueResponse) err := c.cc.Invoke(ctx, MatchingService_ForceUnloadTaskQueue_FullMethodName, in, out, opts...) @@ -353,6 +490,15 @@ func (c *matchingServiceClient) ForceUnloadTaskQueue(ctx context.Context, in *Fo return out, nil } +func (c *matchingServiceClient) ForceUnloadTaskQueuePartition(ctx context.Context, in *ForceUnloadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*ForceUnloadTaskQueuePartitionResponse, error) { + out := new(ForceUnloadTaskQueuePartitionResponse) + err := c.cc.Invoke(ctx, MatchingService_ForceUnloadTaskQueuePartition_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *matchingServiceClient) UpdateTaskQueueUserData(ctx context.Context, in *UpdateTaskQueueUserDataRequest, opts ...grpc.CallOption) (*UpdateTaskQueueUserDataResponse, error) { out := new(UpdateTaskQueueUserDataResponse) err := c.cc.Invoke(ctx, MatchingService_UpdateTaskQueueUserData_FullMethodName, in, out, opts...) @@ -371,36 +517,99 @@ func (c *matchingServiceClient) ReplicateTaskQueueUserData(ctx context.Context, return out, nil } -func (c *matchingServiceClient) CreateNexusIncomingService(ctx context.Context, in *CreateNexusIncomingServiceRequest, opts ...grpc.CallOption) (*CreateNexusIncomingServiceResponse, error) { - out := new(CreateNexusIncomingServiceResponse) - err := c.cc.Invoke(ctx, MatchingService_CreateNexusIncomingService_FullMethodName, in, out, opts...) +func (c *matchingServiceClient) CheckTaskQueueUserDataPropagation(ctx context.Context, in *CheckTaskQueueUserDataPropagationRequest, opts ...grpc.CallOption) (*CheckTaskQueueUserDataPropagationResponse, error) { + out := new(CheckTaskQueueUserDataPropagationResponse) + err := c.cc.Invoke(ctx, MatchingService_CheckTaskQueueUserDataPropagation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) CreateNexusEndpoint(ctx context.Context, in *CreateNexusEndpointRequest, opts ...grpc.CallOption) (*CreateNexusEndpointResponse, error) { + out := new(CreateNexusEndpointResponse) + err := c.cc.Invoke(ctx, MatchingService_CreateNexusEndpoint_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) UpdateNexusEndpoint(ctx context.Context, in *UpdateNexusEndpointRequest, opts ...grpc.CallOption) (*UpdateNexusEndpointResponse, error) { + out := new(UpdateNexusEndpointResponse) + err := c.cc.Invoke(ctx, MatchingService_UpdateNexusEndpoint_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) DeleteNexusEndpoint(ctx context.Context, in *DeleteNexusEndpointRequest, opts ...grpc.CallOption) (*DeleteNexusEndpointResponse, error) { + out := new(DeleteNexusEndpointResponse) + err := c.cc.Invoke(ctx, MatchingService_DeleteNexusEndpoint_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) ListNexusEndpoints(ctx context.Context, in *ListNexusEndpointsRequest, opts ...grpc.CallOption) (*ListNexusEndpointsResponse, error) { + out := new(ListNexusEndpointsResponse) + err := c.cc.Invoke(ctx, MatchingService_ListNexusEndpoints_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) RecordWorkerHeartbeat(ctx context.Context, in *RecordWorkerHeartbeatRequest, opts ...grpc.CallOption) (*RecordWorkerHeartbeatResponse, error) { + out := new(RecordWorkerHeartbeatResponse) + err := c.cc.Invoke(ctx, MatchingService_RecordWorkerHeartbeat_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { + out := new(ListWorkersResponse) + err := c.cc.Invoke(ctx, MatchingService_ListWorkers_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *matchingServiceClient) UpdateTaskQueueConfig(ctx context.Context, in *UpdateTaskQueueConfigRequest, opts ...grpc.CallOption) (*UpdateTaskQueueConfigResponse, error) { + out := new(UpdateTaskQueueConfigResponse) + err := c.cc.Invoke(ctx, MatchingService_UpdateTaskQueueConfig_FullMethodName, in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *matchingServiceClient) UpdateNexusIncomingService(ctx context.Context, in *UpdateNexusIncomingServiceRequest, opts ...grpc.CallOption) (*UpdateNexusIncomingServiceResponse, error) { - out := new(UpdateNexusIncomingServiceResponse) - err := c.cc.Invoke(ctx, MatchingService_UpdateNexusIncomingService_FullMethodName, in, out, opts...) +func (c *matchingServiceClient) DescribeWorker(ctx context.Context, in *DescribeWorkerRequest, opts ...grpc.CallOption) (*DescribeWorkerResponse, error) { + out := new(DescribeWorkerResponse) + err := c.cc.Invoke(ctx, MatchingService_DescribeWorker_FullMethodName, in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *matchingServiceClient) DeleteNexusIncomingService(ctx context.Context, in *DeleteNexusIncomingServiceRequest, opts ...grpc.CallOption) (*DeleteNexusIncomingServiceResponse, error) { - out := new(DeleteNexusIncomingServiceResponse) - err := c.cc.Invoke(ctx, MatchingService_DeleteNexusIncomingService_FullMethodName, in, out, opts...) +func (c *matchingServiceClient) UpdateFairnessState(ctx context.Context, in *UpdateFairnessStateRequest, opts ...grpc.CallOption) (*UpdateFairnessStateResponse, error) { + out := new(UpdateFairnessStateResponse) + err := c.cc.Invoke(ctx, MatchingService_UpdateFairnessState_FullMethodName, in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *matchingServiceClient) ListNexusIncomingServices(ctx context.Context, in *ListNexusIncomingServicesRequest, opts ...grpc.CallOption) (*ListNexusIncomingServicesResponse, error) { - out := new(ListNexusIncomingServicesResponse) - err := c.cc.Invoke(ctx, MatchingService_ListNexusIncomingServices_FullMethodName, in, out, opts...) +func (c *matchingServiceClient) CheckTaskQueueVersionMembership(ctx context.Context, in *CheckTaskQueueVersionMembershipRequest, opts ...grpc.CallOption) (*CheckTaskQueueVersionMembershipResponse, error) { + out := new(CheckTaskQueueVersionMembershipResponse) + err := c.cc.Invoke(ctx, MatchingService_CheckTaskQueueVersionMembership_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -413,7 +622,7 @@ func (c *matchingServiceClient) ListNexusIncomingServices(ctx context.Context, i type MatchingServiceServer interface { // PollWorkflowTaskQueue is called by frontend to process WorkflowTask from a specific task queue. A // WorkflowTask is dispatched to callers for active workflow executions, with pending workflow tasks. - PollWorkflowTaskQueue(context.Context, *PollWorkflowTaskQueueRequest) (*PollWorkflowTaskQueueResponse, error) + PollWorkflowTaskQueue(context.Context, *PollWorkflowTaskQueueRequest) (*PollWorkflowTaskQueueResponseWithRawHistory, error) // PollActivityTaskQueue is called by frontend to process ActivityTask from a specific task queue. ActivityTask // is dispatched to callers whenever a ScheduleTask command is made for a workflow execution. PollActivityTaskQueue(context.Context, *PollActivityTaskQueueRequest) (*PollActivityTaskQueueResponse, error) @@ -443,9 +652,20 @@ type MatchingServiceServer interface { // api call to matching it passes in a pollerId and then calls this API when it detects client connection is closed // to unblock long polls for this poller and prevent tasks being sent to these zombie pollers. CancelOutstandingPoll(context.Context, *CancelOutstandingPollRequest) (*CancelOutstandingPollResponse, error) + // CancelOutstandingWorkerPolls cancels any outstanding polls for a given worker instance key. + // These polls could be waiting on different partitions of the task queue. + // This is called during worker shutdown to eagerly cancel polls and avoid giving out tasks to workers that are shutting down. + // Note: This only cancels polls that are currently outstanding. The caller must ensure no new polls + // are issued after calling this RPC, otherwise those polls will not be cancelled. + CancelOutstandingWorkerPolls(context.Context, *CancelOutstandingWorkerPollsRequest) (*CancelOutstandingWorkerPollsResponse, error) // DescribeTaskQueue returns information about the target task queue, right now this API returns the // pollers which polled this task queue in last few minutes. DescribeTaskQueue(context.Context, *DescribeTaskQueueRequest) (*DescribeTaskQueueResponse, error) + // DescribeTaskQueuePartition returns information about the target task queue partition. + DescribeTaskQueuePartition(context.Context, *DescribeTaskQueuePartitionRequest) (*DescribeTaskQueuePartitionResponse, error) + // DescribeVersionedTaskQueues returns details about the requested versioned task queues. + // It is an internal API; there is no direct user-facing equivalent. + DescribeVersionedTaskQueues(context.Context, *DescribeVersionedTaskQueuesRequest) (*DescribeVersionedTaskQueuesResponse, error) // ListTaskQueuePartitions returns a map of partitionKey and hostAddress for a task queue. ListTaskQueuePartitions(context.Context, *ListTaskQueuePartitionsRequest) (*ListTaskQueuePartitionsResponse, error) // (-- api-linter: core::0134::response-message-name=disabled @@ -459,12 +679,40 @@ type MatchingServiceServer interface { GetWorkerBuildIdCompatibility(context.Context, *GetWorkerBuildIdCompatibilityRequest) (*GetWorkerBuildIdCompatibilityResponse, error) // Fetch user data for a task queue, this request should always be routed to the node holding the root partition of the workflow task queue. GetTaskQueueUserData(context.Context, *GetTaskQueueUserDataRequest) (*GetTaskQueueUserDataResponse, error) + // Allows updating the Build ID assignment and redirect rules for a given Task Queue. + // (-- api-linter: core::0134::method-signature=disabled + // + // aip.dev/not-precedent: UpdateWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::response-message-name=disabled + // + // aip.dev/not-precedent: UpdateWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) + UpdateWorkerVersioningRules(context.Context, *UpdateWorkerVersioningRulesRequest) (*UpdateWorkerVersioningRulesResponse, error) + // Fetches the Build ID assignment and redirect rules for a Task Queue + // (-- api-linter: core::0127::resource-name-extraction=disabled + // + // aip.dev/not-precedent: GetWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0131::http-uri-name=disabled + // + // aip.dev/not-precedent: GetWorkerVersioningRulesRequest RPC doesn't follow Google API format. --) + GetWorkerVersioningRules(context.Context, *GetWorkerVersioningRulesRequest) (*GetWorkerVersioningRulesResponse, error) + // This request should always be routed to the node holding the root partition of the workflow task queue. + SyncDeploymentUserData(context.Context, *SyncDeploymentUserDataRequest) (*SyncDeploymentUserDataResponse, error) // Apply a user data replication event. ApplyTaskQueueUserDataReplicationEvent(context.Context, *ApplyTaskQueueUserDataReplicationEventRequest) (*ApplyTaskQueueUserDataReplicationEventResponse, error) // Gets all task queue names mapped to a given build ID GetBuildIdTaskQueueMapping(context.Context, *GetBuildIdTaskQueueMappingRequest) (*GetBuildIdTaskQueueMappingResponse, error) - // Force unloading a task queue. Used for testing only. + // Force loading a task queue partition. Used by matching node owning root partition. + // When root partition is loaded this is called for all child partitions. + // This addresses the posibility of unloaded child partitions having backlog, + // but not being forwarded/synced to the root partition to find the polling + // worker which triggered the root partition being loaded in the first place. + ForceLoadTaskQueuePartition(context.Context, *ForceLoadTaskQueuePartitionRequest) (*ForceLoadTaskQueuePartitionResponse, error) + // TODO Shivam - remove this in 123. Present for backwards compatibility. ForceUnloadTaskQueue(context.Context, *ForceUnloadTaskQueueRequest) (*ForceUnloadTaskQueueResponse, error) + // Force unloading a task queue partition. + ForceUnloadTaskQueuePartition(context.Context, *ForceUnloadTaskQueuePartitionRequest) (*ForceUnloadTaskQueuePartitionResponse, error) // Update task queue user data in owning node for all updates in namespace. // All user data updates must first go through the task queue owner using the `UpdateWorkerBuildIdCompatibility` // API. @@ -478,38 +726,80 @@ type MatchingServiceServer interface { UpdateTaskQueueUserData(context.Context, *UpdateTaskQueueUserDataRequest) (*UpdateTaskQueueUserDataResponse, error) // Replicate task queue user data across clusters, must be done via the owning node for updates in namespace. ReplicateTaskQueueUserData(context.Context, *ReplicateTaskQueueUserDataRequest) (*ReplicateTaskQueueUserDataResponse, error) - // Optimistically create or update a Nexus incoming service based on provided version. - // Set version to 0 to create a new service. - // If this request is accepted, the input is considered the "current" state of this service at the time it was - // persisted and the updated version is returned. + // Blocks on user data propagation to all loaded partitions. If successful, all loaded + // workflow + activity partitions have the requested version or higher. + // Routed to user data owner (root partition of workflow task queue). + CheckTaskQueueUserDataPropagation(context.Context, *CheckTaskQueueUserDataPropagationRequest) (*CheckTaskQueueUserDataPropagationResponse, error) + // Create a Nexus endpoint. // (-- api-linter: core::0133::method-signature=disabled // - // aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) // // (-- api-linter: core::0133::response-message-name=disabled // - // aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) // // (-- api-linter: core::0133::http-uri-parent=disabled // - // aip.dev/not-precedent: CreateNexusIncomingService RPC doesn't follow Google API format. --) - CreateNexusIncomingService(context.Context, *CreateNexusIncomingServiceRequest) (*CreateNexusIncomingServiceResponse, error) + // aip.dev/not-precedent: CreateNexusEndpoint RPC doesn't follow Google API format. --) + CreateNexusEndpoint(context.Context, *CreateNexusEndpointRequest) (*CreateNexusEndpointResponse, error) + // Optimistically update a Nexus endpoint based on provided version. + // If this request is accepted, the input is considered the "current" state of this service at the time it was + // persisted and the updated version is returned. // (-- api-linter: core::0134::method-signature=disabled // - // aip.dev/not-precedent: UpdateNexusIncomingService RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateNexusEndpoint RPC doesn't follow Google API format. --) // // (-- api-linter: core::0134::response-message-name=disabled // - // aip.dev/not-precedent: UpdateNexusIncomingService RPC doesn't follow Google API format. --) + // aip.dev/not-precedent: UpdateNexusEndpoint RPC doesn't follow Google API format. --) // // (-- api-linter: core::0134::request-resource-required=disabled // - // aip.dev/not-precedent: UpdateNexusIncomingService RPC doesn't follow Google API format. --) - UpdateNexusIncomingService(context.Context, *UpdateNexusIncomingServiceRequest) (*UpdateNexusIncomingServiceResponse, error) + // aip.dev/not-precedent: UpdateNexusEndpoint RPC doesn't follow Google API format. --) + UpdateNexusEndpoint(context.Context, *UpdateNexusEndpointRequest) (*UpdateNexusEndpointResponse, error) // Delete a service by its name. - DeleteNexusIncomingService(context.Context, *DeleteNexusIncomingServiceRequest) (*DeleteNexusIncomingServiceResponse, error) + DeleteNexusEndpoint(context.Context, *DeleteNexusEndpointRequest) (*DeleteNexusEndpointResponse, error) // List all registered services. - ListNexusIncomingServices(context.Context, *ListNexusIncomingServicesRequest) (*ListNexusIncomingServicesResponse, error) + ListNexusEndpoints(context.Context, *ListNexusEndpointsRequest) (*ListNexusEndpointsResponse, error) + // RecordWorkerHeartbeat receive heartbeat request from the worker. + RecordWorkerHeartbeat(context.Context, *RecordWorkerHeartbeatRequest) (*RecordWorkerHeartbeatResponse, error) + // ListWorkers retrieves a list of workers in the specified namespace that match the provided filters. + // Supports pagination for large result sets. Returns an empty list if no workers match the criteria. + // Returns an error if the namespace doesn't exist. + ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) + // Set the persisted task queue configuration. + // (-- api-linter: core::0134::method-signature=disabled + // + // aip.dev/not-precedent: UpdateTaskQueueConfig RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::response-message-name=disabled + // + // aip.dev/not-precedent: UpdateTaskQueueConfig RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::request-resource-required=disabled + // + // aip.dev/not-precedent: UpdateTaskQueueConfig RPC doesn't follow Google API format. --) + UpdateTaskQueueConfig(context.Context, *UpdateTaskQueueConfigRequest) (*UpdateTaskQueueConfigResponse, error) + // DescribeWorker retrieves a worker information in the specified namespace that match the provided instance key. + // Returns an error if the namespace or worker doesn't exist. + DescribeWorker(context.Context, *DescribeWorkerRequest) (*DescribeWorkerResponse, error) + // UpdateFairnessState changes the fairness_state stored in UserData for automatically enabling + // priority and fairness. + // (-- api-linter: core::0134::method-signature=disabled + // + // aip.dev/not-precedent: UpdateFairnessState RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::response-message-name=disabled + // + // aip.dev/not-precedent: UpdateFairnessState RPC doesn't follow Google API format. --) + // + // (-- api-linter: core::0134::request-resource-required=disabled + // + // aip.dev/not-precedent: UpdateFairnessState RPC doesn't follow Google API format. --) + UpdateFairnessState(context.Context, *UpdateFairnessStateRequest) (*UpdateFairnessStateResponse, error) + // CheckTaskQueueVersionMembership checks if a task queue is part of a specific deployment version. + CheckTaskQueueVersionMembership(context.Context, *CheckTaskQueueVersionMembershipRequest) (*CheckTaskQueueVersionMembershipResponse, error) mustEmbedUnimplementedMatchingServiceServer() } @@ -517,7 +807,7 @@ type MatchingServiceServer interface { type UnimplementedMatchingServiceServer struct { } -func (UnimplementedMatchingServiceServer) PollWorkflowTaskQueue(context.Context, *PollWorkflowTaskQueueRequest) (*PollWorkflowTaskQueueResponse, error) { +func (UnimplementedMatchingServiceServer) PollWorkflowTaskQueue(context.Context, *PollWorkflowTaskQueueRequest) (*PollWorkflowTaskQueueResponseWithRawHistory, error) { return nil, status.Errorf(codes.Unimplemented, "method PollWorkflowTaskQueue not implemented") } func (UnimplementedMatchingServiceServer) PollActivityTaskQueue(context.Context, *PollActivityTaskQueueRequest) (*PollActivityTaskQueueResponse, error) { @@ -550,9 +840,18 @@ func (UnimplementedMatchingServiceServer) RespondNexusTaskFailed(context.Context func (UnimplementedMatchingServiceServer) CancelOutstandingPoll(context.Context, *CancelOutstandingPollRequest) (*CancelOutstandingPollResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CancelOutstandingPoll not implemented") } +func (UnimplementedMatchingServiceServer) CancelOutstandingWorkerPolls(context.Context, *CancelOutstandingWorkerPollsRequest) (*CancelOutstandingWorkerPollsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CancelOutstandingWorkerPolls not implemented") +} func (UnimplementedMatchingServiceServer) DescribeTaskQueue(context.Context, *DescribeTaskQueueRequest) (*DescribeTaskQueueResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DescribeTaskQueue not implemented") } +func (UnimplementedMatchingServiceServer) DescribeTaskQueuePartition(context.Context, *DescribeTaskQueuePartitionRequest) (*DescribeTaskQueuePartitionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeTaskQueuePartition not implemented") +} +func (UnimplementedMatchingServiceServer) DescribeVersionedTaskQueues(context.Context, *DescribeVersionedTaskQueuesRequest) (*DescribeVersionedTaskQueuesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeVersionedTaskQueues not implemented") +} func (UnimplementedMatchingServiceServer) ListTaskQueuePartitions(context.Context, *ListTaskQueuePartitionsRequest) (*ListTaskQueuePartitionsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListTaskQueuePartitions not implemented") } @@ -565,32 +864,68 @@ func (UnimplementedMatchingServiceServer) GetWorkerBuildIdCompatibility(context. func (UnimplementedMatchingServiceServer) GetTaskQueueUserData(context.Context, *GetTaskQueueUserDataRequest) (*GetTaskQueueUserDataResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetTaskQueueUserData not implemented") } +func (UnimplementedMatchingServiceServer) UpdateWorkerVersioningRules(context.Context, *UpdateWorkerVersioningRulesRequest) (*UpdateWorkerVersioningRulesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateWorkerVersioningRules not implemented") +} +func (UnimplementedMatchingServiceServer) GetWorkerVersioningRules(context.Context, *GetWorkerVersioningRulesRequest) (*GetWorkerVersioningRulesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetWorkerVersioningRules not implemented") +} +func (UnimplementedMatchingServiceServer) SyncDeploymentUserData(context.Context, *SyncDeploymentUserDataRequest) (*SyncDeploymentUserDataResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SyncDeploymentUserData not implemented") +} func (UnimplementedMatchingServiceServer) ApplyTaskQueueUserDataReplicationEvent(context.Context, *ApplyTaskQueueUserDataReplicationEventRequest) (*ApplyTaskQueueUserDataReplicationEventResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ApplyTaskQueueUserDataReplicationEvent not implemented") } func (UnimplementedMatchingServiceServer) GetBuildIdTaskQueueMapping(context.Context, *GetBuildIdTaskQueueMappingRequest) (*GetBuildIdTaskQueueMappingResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetBuildIdTaskQueueMapping not implemented") } +func (UnimplementedMatchingServiceServer) ForceLoadTaskQueuePartition(context.Context, *ForceLoadTaskQueuePartitionRequest) (*ForceLoadTaskQueuePartitionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ForceLoadTaskQueuePartition not implemented") +} func (UnimplementedMatchingServiceServer) ForceUnloadTaskQueue(context.Context, *ForceUnloadTaskQueueRequest) (*ForceUnloadTaskQueueResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ForceUnloadTaskQueue not implemented") } +func (UnimplementedMatchingServiceServer) ForceUnloadTaskQueuePartition(context.Context, *ForceUnloadTaskQueuePartitionRequest) (*ForceUnloadTaskQueuePartitionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ForceUnloadTaskQueuePartition not implemented") +} func (UnimplementedMatchingServiceServer) UpdateTaskQueueUserData(context.Context, *UpdateTaskQueueUserDataRequest) (*UpdateTaskQueueUserDataResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateTaskQueueUserData not implemented") } func (UnimplementedMatchingServiceServer) ReplicateTaskQueueUserData(context.Context, *ReplicateTaskQueueUserDataRequest) (*ReplicateTaskQueueUserDataResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ReplicateTaskQueueUserData not implemented") } -func (UnimplementedMatchingServiceServer) CreateNexusIncomingService(context.Context, *CreateNexusIncomingServiceRequest) (*CreateNexusIncomingServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateNexusIncomingService not implemented") +func (UnimplementedMatchingServiceServer) CheckTaskQueueUserDataPropagation(context.Context, *CheckTaskQueueUserDataPropagationRequest) (*CheckTaskQueueUserDataPropagationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckTaskQueueUserDataPropagation not implemented") +} +func (UnimplementedMatchingServiceServer) CreateNexusEndpoint(context.Context, *CreateNexusEndpointRequest) (*CreateNexusEndpointResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateNexusEndpoint not implemented") } -func (UnimplementedMatchingServiceServer) UpdateNexusIncomingService(context.Context, *UpdateNexusIncomingServiceRequest) (*UpdateNexusIncomingServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateNexusIncomingService not implemented") +func (UnimplementedMatchingServiceServer) UpdateNexusEndpoint(context.Context, *UpdateNexusEndpointRequest) (*UpdateNexusEndpointResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateNexusEndpoint not implemented") } -func (UnimplementedMatchingServiceServer) DeleteNexusIncomingService(context.Context, *DeleteNexusIncomingServiceRequest) (*DeleteNexusIncomingServiceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteNexusIncomingService not implemented") +func (UnimplementedMatchingServiceServer) DeleteNexusEndpoint(context.Context, *DeleteNexusEndpointRequest) (*DeleteNexusEndpointResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNexusEndpoint not implemented") } -func (UnimplementedMatchingServiceServer) ListNexusIncomingServices(context.Context, *ListNexusIncomingServicesRequest) (*ListNexusIncomingServicesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListNexusIncomingServices not implemented") +func (UnimplementedMatchingServiceServer) ListNexusEndpoints(context.Context, *ListNexusEndpointsRequest) (*ListNexusEndpointsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListNexusEndpoints not implemented") +} +func (UnimplementedMatchingServiceServer) RecordWorkerHeartbeat(context.Context, *RecordWorkerHeartbeatRequest) (*RecordWorkerHeartbeatResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RecordWorkerHeartbeat not implemented") +} +func (UnimplementedMatchingServiceServer) ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListWorkers not implemented") +} +func (UnimplementedMatchingServiceServer) UpdateTaskQueueConfig(context.Context, *UpdateTaskQueueConfigRequest) (*UpdateTaskQueueConfigResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTaskQueueConfig not implemented") +} +func (UnimplementedMatchingServiceServer) DescribeWorker(context.Context, *DescribeWorkerRequest) (*DescribeWorkerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeWorker not implemented") +} +func (UnimplementedMatchingServiceServer) UpdateFairnessState(context.Context, *UpdateFairnessStateRequest) (*UpdateFairnessStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateFairnessState not implemented") +} +func (UnimplementedMatchingServiceServer) CheckTaskQueueVersionMembership(context.Context, *CheckTaskQueueVersionMembershipRequest) (*CheckTaskQueueVersionMembershipResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckTaskQueueVersionMembership not implemented") } func (UnimplementedMatchingServiceServer) mustEmbedUnimplementedMatchingServiceServer() {} @@ -803,6 +1138,24 @@ func _MatchingService_CancelOutstandingPoll_Handler(srv interface{}, ctx context return interceptor(ctx, in, info, handler) } +func _MatchingService_CancelOutstandingWorkerPolls_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelOutstandingWorkerPollsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).CancelOutstandingWorkerPolls(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_CancelOutstandingWorkerPolls_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).CancelOutstandingWorkerPolls(ctx, req.(*CancelOutstandingWorkerPollsRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _MatchingService_DescribeTaskQueue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DescribeTaskQueueRequest) if err := dec(in); err != nil { @@ -821,6 +1174,42 @@ func _MatchingService_DescribeTaskQueue_Handler(srv interface{}, ctx context.Con return interceptor(ctx, in, info, handler) } +func _MatchingService_DescribeTaskQueuePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeTaskQueuePartitionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).DescribeTaskQueuePartition(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_DescribeTaskQueuePartition_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).DescribeTaskQueuePartition(ctx, req.(*DescribeTaskQueuePartitionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_DescribeVersionedTaskQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeVersionedTaskQueuesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).DescribeVersionedTaskQueues(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_DescribeVersionedTaskQueues_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).DescribeVersionedTaskQueues(ctx, req.(*DescribeVersionedTaskQueuesRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _MatchingService_ListTaskQueuePartitions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListTaskQueuePartitionsRequest) if err := dec(in); err != nil { @@ -893,6 +1282,60 @@ func _MatchingService_GetTaskQueueUserData_Handler(srv interface{}, ctx context. return interceptor(ctx, in, info, handler) } +func _MatchingService_UpdateWorkerVersioningRules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateWorkerVersioningRulesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).UpdateWorkerVersioningRules(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_UpdateWorkerVersioningRules_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).UpdateWorkerVersioningRules(ctx, req.(*UpdateWorkerVersioningRulesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_GetWorkerVersioningRules_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWorkerVersioningRulesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).GetWorkerVersioningRules(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_GetWorkerVersioningRules_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).GetWorkerVersioningRules(ctx, req.(*GetWorkerVersioningRulesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_SyncDeploymentUserData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SyncDeploymentUserDataRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).SyncDeploymentUserData(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_SyncDeploymentUserData_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).SyncDeploymentUserData(ctx, req.(*SyncDeploymentUserDataRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _MatchingService_ApplyTaskQueueUserDataReplicationEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ApplyTaskQueueUserDataReplicationEventRequest) if err := dec(in); err != nil { @@ -929,6 +1372,24 @@ func _MatchingService_GetBuildIdTaskQueueMapping_Handler(srv interface{}, ctx co return interceptor(ctx, in, info, handler) } +func _MatchingService_ForceLoadTaskQueuePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ForceLoadTaskQueuePartitionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).ForceLoadTaskQueuePartition(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_ForceLoadTaskQueuePartition_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).ForceLoadTaskQueuePartition(ctx, req.(*ForceLoadTaskQueuePartitionRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _MatchingService_ForceUnloadTaskQueue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ForceUnloadTaskQueueRequest) if err := dec(in); err != nil { @@ -947,6 +1408,24 @@ func _MatchingService_ForceUnloadTaskQueue_Handler(srv interface{}, ctx context. return interceptor(ctx, in, info, handler) } +func _MatchingService_ForceUnloadTaskQueuePartition_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ForceUnloadTaskQueuePartitionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).ForceUnloadTaskQueuePartition(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_ForceUnloadTaskQueuePartition_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).ForceUnloadTaskQueuePartition(ctx, req.(*ForceUnloadTaskQueuePartitionRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _MatchingService_UpdateTaskQueueUserData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateTaskQueueUserDataRequest) if err := dec(in); err != nil { @@ -983,74 +1462,200 @@ func _MatchingService_ReplicateTaskQueueUserData_Handler(srv interface{}, ctx co return interceptor(ctx, in, info, handler) } -func _MatchingService_CreateNexusIncomingService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNexusIncomingServiceRequest) +func _MatchingService_CheckTaskQueueUserDataPropagation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckTaskQueueUserDataPropagationRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(MatchingServiceServer).CreateNexusIncomingService(ctx, in) + return srv.(MatchingServiceServer).CheckTaskQueueUserDataPropagation(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: MatchingService_CreateNexusIncomingService_FullMethodName, + FullMethod: MatchingService_CheckTaskQueueUserDataPropagation_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MatchingServiceServer).CreateNexusIncomingService(ctx, req.(*CreateNexusIncomingServiceRequest)) + return srv.(MatchingServiceServer).CheckTaskQueueUserDataPropagation(ctx, req.(*CheckTaskQueueUserDataPropagationRequest)) } return interceptor(ctx, in, info, handler) } -func _MatchingService_UpdateNexusIncomingService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateNexusIncomingServiceRequest) +func _MatchingService_CreateNexusEndpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNexusEndpointRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(MatchingServiceServer).UpdateNexusIncomingService(ctx, in) + return srv.(MatchingServiceServer).CreateNexusEndpoint(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: MatchingService_UpdateNexusIncomingService_FullMethodName, + FullMethod: MatchingService_CreateNexusEndpoint_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MatchingServiceServer).UpdateNexusIncomingService(ctx, req.(*UpdateNexusIncomingServiceRequest)) + return srv.(MatchingServiceServer).CreateNexusEndpoint(ctx, req.(*CreateNexusEndpointRequest)) } return interceptor(ctx, in, info, handler) } -func _MatchingService_DeleteNexusIncomingService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNexusIncomingServiceRequest) +func _MatchingService_UpdateNexusEndpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNexusEndpointRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(MatchingServiceServer).DeleteNexusIncomingService(ctx, in) + return srv.(MatchingServiceServer).UpdateNexusEndpoint(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: MatchingService_DeleteNexusIncomingService_FullMethodName, + FullMethod: MatchingService_UpdateNexusEndpoint_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MatchingServiceServer).DeleteNexusIncomingService(ctx, req.(*DeleteNexusIncomingServiceRequest)) + return srv.(MatchingServiceServer).UpdateNexusEndpoint(ctx, req.(*UpdateNexusEndpointRequest)) } return interceptor(ctx, in, info, handler) } -func _MatchingService_ListNexusIncomingServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNexusIncomingServicesRequest) +func _MatchingService_DeleteNexusEndpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNexusEndpointRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(MatchingServiceServer).ListNexusIncomingServices(ctx, in) + return srv.(MatchingServiceServer).DeleteNexusEndpoint(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: MatchingService_ListNexusIncomingServices_FullMethodName, + FullMethod: MatchingService_DeleteNexusEndpoint_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MatchingServiceServer).ListNexusIncomingServices(ctx, req.(*ListNexusIncomingServicesRequest)) + return srv.(MatchingServiceServer).DeleteNexusEndpoint(ctx, req.(*DeleteNexusEndpointRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_ListNexusEndpoints_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNexusEndpointsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).ListNexusEndpoints(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_ListNexusEndpoints_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).ListNexusEndpoints(ctx, req.(*ListNexusEndpointsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_RecordWorkerHeartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RecordWorkerHeartbeatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).RecordWorkerHeartbeat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_RecordWorkerHeartbeat_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).RecordWorkerHeartbeat(ctx, req.(*RecordWorkerHeartbeatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListWorkersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).ListWorkers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_ListWorkers_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).ListWorkers(ctx, req.(*ListWorkersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_UpdateTaskQueueConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskQueueConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).UpdateTaskQueueConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_UpdateTaskQueueConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).UpdateTaskQueueConfig(ctx, req.(*UpdateTaskQueueConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_DescribeWorker_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeWorkerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).DescribeWorker(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_DescribeWorker_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).DescribeWorker(ctx, req.(*DescribeWorkerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_UpdateFairnessState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateFairnessStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).UpdateFairnessState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_UpdateFairnessState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).UpdateFairnessState(ctx, req.(*UpdateFairnessStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _MatchingService_CheckTaskQueueVersionMembership_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckTaskQueueVersionMembershipRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MatchingServiceServer).CheckTaskQueueVersionMembership(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: MatchingService_CheckTaskQueueVersionMembership_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MatchingServiceServer).CheckTaskQueueVersionMembership(ctx, req.(*CheckTaskQueueVersionMembershipRequest)) } return interceptor(ctx, in, info, handler) } @@ -1106,10 +1711,22 @@ var MatchingService_ServiceDesc = grpc.ServiceDesc{ MethodName: "CancelOutstandingPoll", Handler: _MatchingService_CancelOutstandingPoll_Handler, }, + { + MethodName: "CancelOutstandingWorkerPolls", + Handler: _MatchingService_CancelOutstandingWorkerPolls_Handler, + }, { MethodName: "DescribeTaskQueue", Handler: _MatchingService_DescribeTaskQueue_Handler, }, + { + MethodName: "DescribeTaskQueuePartition", + Handler: _MatchingService_DescribeTaskQueuePartition_Handler, + }, + { + MethodName: "DescribeVersionedTaskQueues", + Handler: _MatchingService_DescribeVersionedTaskQueues_Handler, + }, { MethodName: "ListTaskQueuePartitions", Handler: _MatchingService_ListTaskQueuePartitions_Handler, @@ -1126,6 +1743,18 @@ var MatchingService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetTaskQueueUserData", Handler: _MatchingService_GetTaskQueueUserData_Handler, }, + { + MethodName: "UpdateWorkerVersioningRules", + Handler: _MatchingService_UpdateWorkerVersioningRules_Handler, + }, + { + MethodName: "GetWorkerVersioningRules", + Handler: _MatchingService_GetWorkerVersioningRules_Handler, + }, + { + MethodName: "SyncDeploymentUserData", + Handler: _MatchingService_SyncDeploymentUserData_Handler, + }, { MethodName: "ApplyTaskQueueUserDataReplicationEvent", Handler: _MatchingService_ApplyTaskQueueUserDataReplicationEvent_Handler, @@ -1134,10 +1763,18 @@ var MatchingService_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetBuildIdTaskQueueMapping", Handler: _MatchingService_GetBuildIdTaskQueueMapping_Handler, }, + { + MethodName: "ForceLoadTaskQueuePartition", + Handler: _MatchingService_ForceLoadTaskQueuePartition_Handler, + }, { MethodName: "ForceUnloadTaskQueue", Handler: _MatchingService_ForceUnloadTaskQueue_Handler, }, + { + MethodName: "ForceUnloadTaskQueuePartition", + Handler: _MatchingService_ForceUnloadTaskQueuePartition_Handler, + }, { MethodName: "UpdateTaskQueueUserData", Handler: _MatchingService_UpdateTaskQueueUserData_Handler, @@ -1147,20 +1784,48 @@ var MatchingService_ServiceDesc = grpc.ServiceDesc{ Handler: _MatchingService_ReplicateTaskQueueUserData_Handler, }, { - MethodName: "CreateNexusIncomingService", - Handler: _MatchingService_CreateNexusIncomingService_Handler, + MethodName: "CheckTaskQueueUserDataPropagation", + Handler: _MatchingService_CheckTaskQueueUserDataPropagation_Handler, + }, + { + MethodName: "CreateNexusEndpoint", + Handler: _MatchingService_CreateNexusEndpoint_Handler, + }, + { + MethodName: "UpdateNexusEndpoint", + Handler: _MatchingService_UpdateNexusEndpoint_Handler, + }, + { + MethodName: "DeleteNexusEndpoint", + Handler: _MatchingService_DeleteNexusEndpoint_Handler, + }, + { + MethodName: "ListNexusEndpoints", + Handler: _MatchingService_ListNexusEndpoints_Handler, + }, + { + MethodName: "RecordWorkerHeartbeat", + Handler: _MatchingService_RecordWorkerHeartbeat_Handler, + }, + { + MethodName: "ListWorkers", + Handler: _MatchingService_ListWorkers_Handler, + }, + { + MethodName: "UpdateTaskQueueConfig", + Handler: _MatchingService_UpdateTaskQueueConfig_Handler, }, { - MethodName: "UpdateNexusIncomingService", - Handler: _MatchingService_UpdateNexusIncomingService_Handler, + MethodName: "DescribeWorker", + Handler: _MatchingService_DescribeWorker_Handler, }, { - MethodName: "DeleteNexusIncomingService", - Handler: _MatchingService_DeleteNexusIncomingService_Handler, + MethodName: "UpdateFairnessState", + Handler: _MatchingService_UpdateFairnessState_Handler, }, { - MethodName: "ListNexusIncomingServices", - Handler: _MatchingService_ListNexusIncomingServices_Handler, + MethodName: "CheckTaskQueueVersionMembership", + Handler: _MatchingService_CheckTaskQueueVersionMembership_Handler, }, }, Streams: []grpc.StreamDesc{}, diff --git a/api/matchingservicemock/v1/service.pb.mock.go b/api/matchingservicemock/v1/service.pb.mock.go index a04b89857fa..abc2cf41b89 100644 --- a/api/matchingservicemock/v1/service.pb.mock.go +++ b/api/matchingservicemock/v1/service.pb.mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: api/matchingservice/v1/service.pb.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package matchingservicemock -source api/matchingservice/v1/service.pb.go -destination api/matchingservicemock/v1/service.pb.mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: matchingservice/v1/service.pb.go // Package matchingservicemock is a generated GoMock package. package matchingservicemock diff --git a/api/matchingservicemock/v1/service_grpc.pb.mock.go b/api/matchingservicemock/v1/service_grpc.pb.mock.go index 5717790ee6d..6f053811b3d 100644 --- a/api/matchingservicemock/v1/service_grpc.pb.mock.go +++ b/api/matchingservicemock/v1/service_grpc.pb.mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: api/matchingservice/v1/service_grpc.pb.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package matchingservicemock -source api/matchingservice/v1/service_grpc.pb.go -destination api/matchingservicemock/v1/service_grpc.pb.mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: matchingservice/v1/service_grpc.pb.go // Package matchingservicemock is a generated GoMock package. package matchingservicemock @@ -32,8 +13,8 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" matchingservice "go.temporal.io/server/api/matchingservice/v1" + gomock "go.uber.org/mock/gomock" grpc "google.golang.org/grpc" ) @@ -41,6 +22,7 @@ import ( type MockMatchingServiceClient struct { ctrl *gomock.Controller recorder *MockMatchingServiceClientMockRecorder + isgomock struct{} } // MockMatchingServiceClientMockRecorder is the mock recorder for MockMatchingServiceClient. @@ -63,7 +45,7 @@ func (m *MockMatchingServiceClient) EXPECT() *MockMatchingServiceClientMockRecor // AddActivityTask mocks base method. func (m *MockMatchingServiceClient) AddActivityTask(ctx context.Context, in *matchingservice.AddActivityTaskRequest, opts ...grpc.CallOption) (*matchingservice.AddActivityTaskResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -74,16 +56,16 @@ func (m *MockMatchingServiceClient) AddActivityTask(ctx context.Context, in *mat } // AddActivityTask indicates an expected call of AddActivityTask. -func (mr *MockMatchingServiceClientMockRecorder) AddActivityTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) AddActivityTask(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddActivityTask", reflect.TypeOf((*MockMatchingServiceClient)(nil).AddActivityTask), varargs...) } // AddWorkflowTask mocks base method. func (m *MockMatchingServiceClient) AddWorkflowTask(ctx context.Context, in *matchingservice.AddWorkflowTaskRequest, opts ...grpc.CallOption) (*matchingservice.AddWorkflowTaskResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -94,16 +76,16 @@ func (m *MockMatchingServiceClient) AddWorkflowTask(ctx context.Context, in *mat } // AddWorkflowTask indicates an expected call of AddWorkflowTask. -func (mr *MockMatchingServiceClientMockRecorder) AddWorkflowTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) AddWorkflowTask(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWorkflowTask", reflect.TypeOf((*MockMatchingServiceClient)(nil).AddWorkflowTask), varargs...) } // ApplyTaskQueueUserDataReplicationEvent mocks base method. func (m *MockMatchingServiceClient) ApplyTaskQueueUserDataReplicationEvent(ctx context.Context, in *matchingservice.ApplyTaskQueueUserDataReplicationEventRequest, opts ...grpc.CallOption) (*matchingservice.ApplyTaskQueueUserDataReplicationEventResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -114,16 +96,16 @@ func (m *MockMatchingServiceClient) ApplyTaskQueueUserDataReplicationEvent(ctx c } // ApplyTaskQueueUserDataReplicationEvent indicates an expected call of ApplyTaskQueueUserDataReplicationEvent. -func (mr *MockMatchingServiceClientMockRecorder) ApplyTaskQueueUserDataReplicationEvent(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) ApplyTaskQueueUserDataReplicationEvent(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyTaskQueueUserDataReplicationEvent", reflect.TypeOf((*MockMatchingServiceClient)(nil).ApplyTaskQueueUserDataReplicationEvent), varargs...) } // CancelOutstandingPoll mocks base method. func (m *MockMatchingServiceClient) CancelOutstandingPoll(ctx context.Context, in *matchingservice.CancelOutstandingPollRequest, opts ...grpc.CallOption) (*matchingservice.CancelOutstandingPollResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -134,56 +116,116 @@ func (m *MockMatchingServiceClient) CancelOutstandingPoll(ctx context.Context, i } // CancelOutstandingPoll indicates an expected call of CancelOutstandingPoll. -func (mr *MockMatchingServiceClientMockRecorder) CancelOutstandingPoll(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) CancelOutstandingPoll(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelOutstandingPoll", reflect.TypeOf((*MockMatchingServiceClient)(nil).CancelOutstandingPoll), varargs...) } -// CreateNexusIncomingService mocks base method. -func (m *MockMatchingServiceClient) CreateNexusIncomingService(ctx context.Context, in *matchingservice.CreateNexusIncomingServiceRequest, opts ...grpc.CallOption) (*matchingservice.CreateNexusIncomingServiceResponse, error) { +// CancelOutstandingWorkerPolls mocks base method. +func (m *MockMatchingServiceClient) CancelOutstandingWorkerPolls(ctx context.Context, in *matchingservice.CancelOutstandingWorkerPollsRequest, opts ...grpc.CallOption) (*matchingservice.CancelOutstandingWorkerPollsResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "CreateNexusIncomingService", varargs...) - ret0, _ := ret[0].(*matchingservice.CreateNexusIncomingServiceResponse) + ret := m.ctrl.Call(m, "CancelOutstandingWorkerPolls", varargs...) + ret0, _ := ret[0].(*matchingservice.CancelOutstandingWorkerPollsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// CreateNexusIncomingService indicates an expected call of CreateNexusIncomingService. -func (mr *MockMatchingServiceClientMockRecorder) CreateNexusIncomingService(ctx, in interface{}, opts ...interface{}) *gomock.Call { +// CancelOutstandingWorkerPolls indicates an expected call of CancelOutstandingWorkerPolls. +func (mr *MockMatchingServiceClientMockRecorder) CancelOutstandingWorkerPolls(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNexusIncomingService", reflect.TypeOf((*MockMatchingServiceClient)(nil).CreateNexusIncomingService), varargs...) + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelOutstandingWorkerPolls", reflect.TypeOf((*MockMatchingServiceClient)(nil).CancelOutstandingWorkerPolls), varargs...) } -// DeleteNexusIncomingService mocks base method. -func (m *MockMatchingServiceClient) DeleteNexusIncomingService(ctx context.Context, in *matchingservice.DeleteNexusIncomingServiceRequest, opts ...grpc.CallOption) (*matchingservice.DeleteNexusIncomingServiceResponse, error) { +// CheckTaskQueueUserDataPropagation mocks base method. +func (m *MockMatchingServiceClient) CheckTaskQueueUserDataPropagation(ctx context.Context, in *matchingservice.CheckTaskQueueUserDataPropagationRequest, opts ...grpc.CallOption) (*matchingservice.CheckTaskQueueUserDataPropagationResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "DeleteNexusIncomingService", varargs...) - ret0, _ := ret[0].(*matchingservice.DeleteNexusIncomingServiceResponse) + ret := m.ctrl.Call(m, "CheckTaskQueueUserDataPropagation", varargs...) + ret0, _ := ret[0].(*matchingservice.CheckTaskQueueUserDataPropagationResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// DeleteNexusIncomingService indicates an expected call of DeleteNexusIncomingService. -func (mr *MockMatchingServiceClientMockRecorder) DeleteNexusIncomingService(ctx, in interface{}, opts ...interface{}) *gomock.Call { +// CheckTaskQueueUserDataPropagation indicates an expected call of CheckTaskQueueUserDataPropagation. +func (mr *MockMatchingServiceClientMockRecorder) CheckTaskQueueUserDataPropagation(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNexusIncomingService", reflect.TypeOf((*MockMatchingServiceClient)(nil).DeleteNexusIncomingService), varargs...) + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckTaskQueueUserDataPropagation", reflect.TypeOf((*MockMatchingServiceClient)(nil).CheckTaskQueueUserDataPropagation), varargs...) +} + +// CheckTaskQueueVersionMembership mocks base method. +func (m *MockMatchingServiceClient) CheckTaskQueueVersionMembership(ctx context.Context, in *matchingservice.CheckTaskQueueVersionMembershipRequest, opts ...grpc.CallOption) (*matchingservice.CheckTaskQueueVersionMembershipResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CheckTaskQueueVersionMembership", varargs...) + ret0, _ := ret[0].(*matchingservice.CheckTaskQueueVersionMembershipResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CheckTaskQueueVersionMembership indicates an expected call of CheckTaskQueueVersionMembership. +func (mr *MockMatchingServiceClientMockRecorder) CheckTaskQueueVersionMembership(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckTaskQueueVersionMembership", reflect.TypeOf((*MockMatchingServiceClient)(nil).CheckTaskQueueVersionMembership), varargs...) +} + +// CreateNexusEndpoint mocks base method. +func (m *MockMatchingServiceClient) CreateNexusEndpoint(ctx context.Context, in *matchingservice.CreateNexusEndpointRequest, opts ...grpc.CallOption) (*matchingservice.CreateNexusEndpointResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateNexusEndpoint", varargs...) + ret0, _ := ret[0].(*matchingservice.CreateNexusEndpointResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNexusEndpoint indicates an expected call of CreateNexusEndpoint. +func (mr *MockMatchingServiceClientMockRecorder) CreateNexusEndpoint(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNexusEndpoint", reflect.TypeOf((*MockMatchingServiceClient)(nil).CreateNexusEndpoint), varargs...) +} + +// DeleteNexusEndpoint mocks base method. +func (m *MockMatchingServiceClient) DeleteNexusEndpoint(ctx context.Context, in *matchingservice.DeleteNexusEndpointRequest, opts ...grpc.CallOption) (*matchingservice.DeleteNexusEndpointResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteNexusEndpoint", varargs...) + ret0, _ := ret[0].(*matchingservice.DeleteNexusEndpointResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteNexusEndpoint indicates an expected call of DeleteNexusEndpoint. +func (mr *MockMatchingServiceClientMockRecorder) DeleteNexusEndpoint(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNexusEndpoint", reflect.TypeOf((*MockMatchingServiceClient)(nil).DeleteNexusEndpoint), varargs...) } // DescribeTaskQueue mocks base method. func (m *MockMatchingServiceClient) DescribeTaskQueue(ctx context.Context, in *matchingservice.DescribeTaskQueueRequest, opts ...grpc.CallOption) (*matchingservice.DescribeTaskQueueResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -194,16 +236,76 @@ func (m *MockMatchingServiceClient) DescribeTaskQueue(ctx context.Context, in *m } // DescribeTaskQueue indicates an expected call of DescribeTaskQueue. -func (mr *MockMatchingServiceClientMockRecorder) DescribeTaskQueue(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) DescribeTaskQueue(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTaskQueue", reflect.TypeOf((*MockMatchingServiceClient)(nil).DescribeTaskQueue), varargs...) } +// DescribeTaskQueuePartition mocks base method. +func (m *MockMatchingServiceClient) DescribeTaskQueuePartition(ctx context.Context, in *matchingservice.DescribeTaskQueuePartitionRequest, opts ...grpc.CallOption) (*matchingservice.DescribeTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeTaskQueuePartition", varargs...) + ret0, _ := ret[0].(*matchingservice.DescribeTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTaskQueuePartition indicates an expected call of DescribeTaskQueuePartition. +func (mr *MockMatchingServiceClientMockRecorder) DescribeTaskQueuePartition(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTaskQueuePartition", reflect.TypeOf((*MockMatchingServiceClient)(nil).DescribeTaskQueuePartition), varargs...) +} + +// DescribeVersionedTaskQueues mocks base method. +func (m *MockMatchingServiceClient) DescribeVersionedTaskQueues(ctx context.Context, in *matchingservice.DescribeVersionedTaskQueuesRequest, opts ...grpc.CallOption) (*matchingservice.DescribeVersionedTaskQueuesResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeVersionedTaskQueues", varargs...) + ret0, _ := ret[0].(*matchingservice.DescribeVersionedTaskQueuesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeVersionedTaskQueues indicates an expected call of DescribeVersionedTaskQueues. +func (mr *MockMatchingServiceClientMockRecorder) DescribeVersionedTaskQueues(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeVersionedTaskQueues", reflect.TypeOf((*MockMatchingServiceClient)(nil).DescribeVersionedTaskQueues), varargs...) +} + +// DescribeWorker mocks base method. +func (m *MockMatchingServiceClient) DescribeWorker(ctx context.Context, in *matchingservice.DescribeWorkerRequest, opts ...grpc.CallOption) (*matchingservice.DescribeWorkerResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeWorker", varargs...) + ret0, _ := ret[0].(*matchingservice.DescribeWorkerResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeWorker indicates an expected call of DescribeWorker. +func (mr *MockMatchingServiceClientMockRecorder) DescribeWorker(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeWorker", reflect.TypeOf((*MockMatchingServiceClient)(nil).DescribeWorker), varargs...) +} + // DispatchNexusTask mocks base method. func (m *MockMatchingServiceClient) DispatchNexusTask(ctx context.Context, in *matchingservice.DispatchNexusTaskRequest, opts ...grpc.CallOption) (*matchingservice.DispatchNexusTaskResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -214,16 +316,36 @@ func (m *MockMatchingServiceClient) DispatchNexusTask(ctx context.Context, in *m } // DispatchNexusTask indicates an expected call of DispatchNexusTask. -func (mr *MockMatchingServiceClientMockRecorder) DispatchNexusTask(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) DispatchNexusTask(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DispatchNexusTask", reflect.TypeOf((*MockMatchingServiceClient)(nil).DispatchNexusTask), varargs...) } +// ForceLoadTaskQueuePartition mocks base method. +func (m *MockMatchingServiceClient) ForceLoadTaskQueuePartition(ctx context.Context, in *matchingservice.ForceLoadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*matchingservice.ForceLoadTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ForceLoadTaskQueuePartition", varargs...) + ret0, _ := ret[0].(*matchingservice.ForceLoadTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ForceLoadTaskQueuePartition indicates an expected call of ForceLoadTaskQueuePartition. +func (mr *MockMatchingServiceClientMockRecorder) ForceLoadTaskQueuePartition(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceLoadTaskQueuePartition", reflect.TypeOf((*MockMatchingServiceClient)(nil).ForceLoadTaskQueuePartition), varargs...) +} + // ForceUnloadTaskQueue mocks base method. func (m *MockMatchingServiceClient) ForceUnloadTaskQueue(ctx context.Context, in *matchingservice.ForceUnloadTaskQueueRequest, opts ...grpc.CallOption) (*matchingservice.ForceUnloadTaskQueueResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -234,16 +356,36 @@ func (m *MockMatchingServiceClient) ForceUnloadTaskQueue(ctx context.Context, in } // ForceUnloadTaskQueue indicates an expected call of ForceUnloadTaskQueue. -func (mr *MockMatchingServiceClientMockRecorder) ForceUnloadTaskQueue(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) ForceUnloadTaskQueue(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUnloadTaskQueue", reflect.TypeOf((*MockMatchingServiceClient)(nil).ForceUnloadTaskQueue), varargs...) } +// ForceUnloadTaskQueuePartition mocks base method. +func (m *MockMatchingServiceClient) ForceUnloadTaskQueuePartition(ctx context.Context, in *matchingservice.ForceUnloadTaskQueuePartitionRequest, opts ...grpc.CallOption) (*matchingservice.ForceUnloadTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ForceUnloadTaskQueuePartition", varargs...) + ret0, _ := ret[0].(*matchingservice.ForceUnloadTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ForceUnloadTaskQueuePartition indicates an expected call of ForceUnloadTaskQueuePartition. +func (mr *MockMatchingServiceClientMockRecorder) ForceUnloadTaskQueuePartition(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUnloadTaskQueuePartition", reflect.TypeOf((*MockMatchingServiceClient)(nil).ForceUnloadTaskQueuePartition), varargs...) +} + // GetBuildIdTaskQueueMapping mocks base method. func (m *MockMatchingServiceClient) GetBuildIdTaskQueueMapping(ctx context.Context, in *matchingservice.GetBuildIdTaskQueueMappingRequest, opts ...grpc.CallOption) (*matchingservice.GetBuildIdTaskQueueMappingResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -254,16 +396,16 @@ func (m *MockMatchingServiceClient) GetBuildIdTaskQueueMapping(ctx context.Conte } // GetBuildIdTaskQueueMapping indicates an expected call of GetBuildIdTaskQueueMapping. -func (mr *MockMatchingServiceClientMockRecorder) GetBuildIdTaskQueueMapping(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) GetBuildIdTaskQueueMapping(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBuildIdTaskQueueMapping", reflect.TypeOf((*MockMatchingServiceClient)(nil).GetBuildIdTaskQueueMapping), varargs...) } // GetTaskQueueUserData mocks base method. func (m *MockMatchingServiceClient) GetTaskQueueUserData(ctx context.Context, in *matchingservice.GetTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.GetTaskQueueUserDataResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -274,16 +416,16 @@ func (m *MockMatchingServiceClient) GetTaskQueueUserData(ctx context.Context, in } // GetTaskQueueUserData indicates an expected call of GetTaskQueueUserData. -func (mr *MockMatchingServiceClientMockRecorder) GetTaskQueueUserData(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) GetTaskQueueUserData(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueueUserData", reflect.TypeOf((*MockMatchingServiceClient)(nil).GetTaskQueueUserData), varargs...) } // GetWorkerBuildIdCompatibility mocks base method. func (m *MockMatchingServiceClient) GetWorkerBuildIdCompatibility(ctx context.Context, in *matchingservice.GetWorkerBuildIdCompatibilityRequest, opts ...grpc.CallOption) (*matchingservice.GetWorkerBuildIdCompatibilityResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -294,36 +436,56 @@ func (m *MockMatchingServiceClient) GetWorkerBuildIdCompatibility(ctx context.Co } // GetWorkerBuildIdCompatibility indicates an expected call of GetWorkerBuildIdCompatibility. -func (mr *MockMatchingServiceClientMockRecorder) GetWorkerBuildIdCompatibility(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) GetWorkerBuildIdCompatibility(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkerBuildIdCompatibility", reflect.TypeOf((*MockMatchingServiceClient)(nil).GetWorkerBuildIdCompatibility), varargs...) } -// ListNexusIncomingServices mocks base method. -func (m *MockMatchingServiceClient) ListNexusIncomingServices(ctx context.Context, in *matchingservice.ListNexusIncomingServicesRequest, opts ...grpc.CallOption) (*matchingservice.ListNexusIncomingServicesResponse, error) { +// GetWorkerVersioningRules mocks base method. +func (m *MockMatchingServiceClient) GetWorkerVersioningRules(ctx context.Context, in *matchingservice.GetWorkerVersioningRulesRequest, opts ...grpc.CallOption) (*matchingservice.GetWorkerVersioningRulesResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetWorkerVersioningRules", varargs...) + ret0, _ := ret[0].(*matchingservice.GetWorkerVersioningRulesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWorkerVersioningRules indicates an expected call of GetWorkerVersioningRules. +func (mr *MockMatchingServiceClientMockRecorder) GetWorkerVersioningRules(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkerVersioningRules", reflect.TypeOf((*MockMatchingServiceClient)(nil).GetWorkerVersioningRules), varargs...) +} + +// ListNexusEndpoints mocks base method. +func (m *MockMatchingServiceClient) ListNexusEndpoints(ctx context.Context, in *matchingservice.ListNexusEndpointsRequest, opts ...grpc.CallOption) (*matchingservice.ListNexusEndpointsResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "ListNexusIncomingServices", varargs...) - ret0, _ := ret[0].(*matchingservice.ListNexusIncomingServicesResponse) + ret := m.ctrl.Call(m, "ListNexusEndpoints", varargs...) + ret0, _ := ret[0].(*matchingservice.ListNexusEndpointsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// ListNexusIncomingServices indicates an expected call of ListNexusIncomingServices. -func (mr *MockMatchingServiceClientMockRecorder) ListNexusIncomingServices(ctx, in interface{}, opts ...interface{}) *gomock.Call { +// ListNexusEndpoints indicates an expected call of ListNexusEndpoints. +func (mr *MockMatchingServiceClientMockRecorder) ListNexusEndpoints(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNexusIncomingServices", reflect.TypeOf((*MockMatchingServiceClient)(nil).ListNexusIncomingServices), varargs...) + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNexusEndpoints", reflect.TypeOf((*MockMatchingServiceClient)(nil).ListNexusEndpoints), varargs...) } // ListTaskQueuePartitions mocks base method. func (m *MockMatchingServiceClient) ListTaskQueuePartitions(ctx context.Context, in *matchingservice.ListTaskQueuePartitionsRequest, opts ...grpc.CallOption) (*matchingservice.ListTaskQueuePartitionsResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -334,16 +496,36 @@ func (m *MockMatchingServiceClient) ListTaskQueuePartitions(ctx context.Context, } // ListTaskQueuePartitions indicates an expected call of ListTaskQueuePartitions. -func (mr *MockMatchingServiceClientMockRecorder) ListTaskQueuePartitions(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) ListTaskQueuePartitions(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTaskQueuePartitions", reflect.TypeOf((*MockMatchingServiceClient)(nil).ListTaskQueuePartitions), varargs...) } +// ListWorkers mocks base method. +func (m *MockMatchingServiceClient) ListWorkers(ctx context.Context, in *matchingservice.ListWorkersRequest, opts ...grpc.CallOption) (*matchingservice.ListWorkersResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListWorkers", varargs...) + ret0, _ := ret[0].(*matchingservice.ListWorkersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWorkers indicates an expected call of ListWorkers. +func (mr *MockMatchingServiceClientMockRecorder) ListWorkers(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWorkers", reflect.TypeOf((*MockMatchingServiceClient)(nil).ListWorkers), varargs...) +} + // PollActivityTaskQueue mocks base method. func (m *MockMatchingServiceClient) PollActivityTaskQueue(ctx context.Context, in *matchingservice.PollActivityTaskQueueRequest, opts ...grpc.CallOption) (*matchingservice.PollActivityTaskQueueResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -354,16 +536,16 @@ func (m *MockMatchingServiceClient) PollActivityTaskQueue(ctx context.Context, i } // PollActivityTaskQueue indicates an expected call of PollActivityTaskQueue. -func (mr *MockMatchingServiceClientMockRecorder) PollActivityTaskQueue(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) PollActivityTaskQueue(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollActivityTaskQueue", reflect.TypeOf((*MockMatchingServiceClient)(nil).PollActivityTaskQueue), varargs...) } // PollNexusTaskQueue mocks base method. func (m *MockMatchingServiceClient) PollNexusTaskQueue(ctx context.Context, in *matchingservice.PollNexusTaskQueueRequest, opts ...grpc.CallOption) (*matchingservice.PollNexusTaskQueueResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -374,16 +556,16 @@ func (m *MockMatchingServiceClient) PollNexusTaskQueue(ctx context.Context, in * } // PollNexusTaskQueue indicates an expected call of PollNexusTaskQueue. -func (mr *MockMatchingServiceClientMockRecorder) PollNexusTaskQueue(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) PollNexusTaskQueue(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollNexusTaskQueue", reflect.TypeOf((*MockMatchingServiceClient)(nil).PollNexusTaskQueue), varargs...) } // PollWorkflowTaskQueue mocks base method. func (m *MockMatchingServiceClient) PollWorkflowTaskQueue(ctx context.Context, in *matchingservice.PollWorkflowTaskQueueRequest, opts ...grpc.CallOption) (*matchingservice.PollWorkflowTaskQueueResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -394,16 +576,16 @@ func (m *MockMatchingServiceClient) PollWorkflowTaskQueue(ctx context.Context, i } // PollWorkflowTaskQueue indicates an expected call of PollWorkflowTaskQueue. -func (mr *MockMatchingServiceClientMockRecorder) PollWorkflowTaskQueue(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) PollWorkflowTaskQueue(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollWorkflowTaskQueue", reflect.TypeOf((*MockMatchingServiceClient)(nil).PollWorkflowTaskQueue), varargs...) } // QueryWorkflow mocks base method. func (m *MockMatchingServiceClient) QueryWorkflow(ctx context.Context, in *matchingservice.QueryWorkflowRequest, opts ...grpc.CallOption) (*matchingservice.QueryWorkflowResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -414,16 +596,36 @@ func (m *MockMatchingServiceClient) QueryWorkflow(ctx context.Context, in *match } // QueryWorkflow indicates an expected call of QueryWorkflow. -func (mr *MockMatchingServiceClientMockRecorder) QueryWorkflow(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) QueryWorkflow(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryWorkflow", reflect.TypeOf((*MockMatchingServiceClient)(nil).QueryWorkflow), varargs...) } +// RecordWorkerHeartbeat mocks base method. +func (m *MockMatchingServiceClient) RecordWorkerHeartbeat(ctx context.Context, in *matchingservice.RecordWorkerHeartbeatRequest, opts ...grpc.CallOption) (*matchingservice.RecordWorkerHeartbeatResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "RecordWorkerHeartbeat", varargs...) + ret0, _ := ret[0].(*matchingservice.RecordWorkerHeartbeatResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RecordWorkerHeartbeat indicates an expected call of RecordWorkerHeartbeat. +func (mr *MockMatchingServiceClientMockRecorder) RecordWorkerHeartbeat(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordWorkerHeartbeat", reflect.TypeOf((*MockMatchingServiceClient)(nil).RecordWorkerHeartbeat), varargs...) +} + // ReplicateTaskQueueUserData mocks base method. func (m *MockMatchingServiceClient) ReplicateTaskQueueUserData(ctx context.Context, in *matchingservice.ReplicateTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.ReplicateTaskQueueUserDataResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -434,16 +636,16 @@ func (m *MockMatchingServiceClient) ReplicateTaskQueueUserData(ctx context.Conte } // ReplicateTaskQueueUserData indicates an expected call of ReplicateTaskQueueUserData. -func (mr *MockMatchingServiceClientMockRecorder) ReplicateTaskQueueUserData(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) ReplicateTaskQueueUserData(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateTaskQueueUserData", reflect.TypeOf((*MockMatchingServiceClient)(nil).ReplicateTaskQueueUserData), varargs...) } // RespondNexusTaskCompleted mocks base method. func (m *MockMatchingServiceClient) RespondNexusTaskCompleted(ctx context.Context, in *matchingservice.RespondNexusTaskCompletedRequest, opts ...grpc.CallOption) (*matchingservice.RespondNexusTaskCompletedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -454,16 +656,16 @@ func (m *MockMatchingServiceClient) RespondNexusTaskCompleted(ctx context.Contex } // RespondNexusTaskCompleted indicates an expected call of RespondNexusTaskCompleted. -func (mr *MockMatchingServiceClientMockRecorder) RespondNexusTaskCompleted(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) RespondNexusTaskCompleted(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondNexusTaskCompleted", reflect.TypeOf((*MockMatchingServiceClient)(nil).RespondNexusTaskCompleted), varargs...) } // RespondNexusTaskFailed mocks base method. func (m *MockMatchingServiceClient) RespondNexusTaskFailed(ctx context.Context, in *matchingservice.RespondNexusTaskFailedRequest, opts ...grpc.CallOption) (*matchingservice.RespondNexusTaskFailedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -474,16 +676,16 @@ func (m *MockMatchingServiceClient) RespondNexusTaskFailed(ctx context.Context, } // RespondNexusTaskFailed indicates an expected call of RespondNexusTaskFailed. -func (mr *MockMatchingServiceClientMockRecorder) RespondNexusTaskFailed(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) RespondNexusTaskFailed(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondNexusTaskFailed", reflect.TypeOf((*MockMatchingServiceClient)(nil).RespondNexusTaskFailed), varargs...) } // RespondQueryTaskCompleted mocks base method. func (m *MockMatchingServiceClient) RespondQueryTaskCompleted(ctx context.Context, in *matchingservice.RespondQueryTaskCompletedRequest, opts ...grpc.CallOption) (*matchingservice.RespondQueryTaskCompletedResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -494,36 +696,96 @@ func (m *MockMatchingServiceClient) RespondQueryTaskCompleted(ctx context.Contex } // RespondQueryTaskCompleted indicates an expected call of RespondQueryTaskCompleted. -func (mr *MockMatchingServiceClientMockRecorder) RespondQueryTaskCompleted(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) RespondQueryTaskCompleted(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondQueryTaskCompleted", reflect.TypeOf((*MockMatchingServiceClient)(nil).RespondQueryTaskCompleted), varargs...) } -// UpdateNexusIncomingService mocks base method. -func (m *MockMatchingServiceClient) UpdateNexusIncomingService(ctx context.Context, in *matchingservice.UpdateNexusIncomingServiceRequest, opts ...grpc.CallOption) (*matchingservice.UpdateNexusIncomingServiceResponse, error) { +// SyncDeploymentUserData mocks base method. +func (m *MockMatchingServiceClient) SyncDeploymentUserData(ctx context.Context, in *matchingservice.SyncDeploymentUserDataRequest, opts ...grpc.CallOption) (*matchingservice.SyncDeploymentUserDataResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SyncDeploymentUserData", varargs...) + ret0, _ := ret[0].(*matchingservice.SyncDeploymentUserDataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncDeploymentUserData indicates an expected call of SyncDeploymentUserData. +func (mr *MockMatchingServiceClientMockRecorder) SyncDeploymentUserData(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncDeploymentUserData", reflect.TypeOf((*MockMatchingServiceClient)(nil).SyncDeploymentUserData), varargs...) +} + +// UpdateFairnessState mocks base method. +func (m *MockMatchingServiceClient) UpdateFairnessState(ctx context.Context, in *matchingservice.UpdateFairnessStateRequest, opts ...grpc.CallOption) (*matchingservice.UpdateFairnessStateResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateFairnessState", varargs...) + ret0, _ := ret[0].(*matchingservice.UpdateFairnessStateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateFairnessState indicates an expected call of UpdateFairnessState. +func (mr *MockMatchingServiceClientMockRecorder) UpdateFairnessState(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFairnessState", reflect.TypeOf((*MockMatchingServiceClient)(nil).UpdateFairnessState), varargs...) +} + +// UpdateNexusEndpoint mocks base method. +func (m *MockMatchingServiceClient) UpdateNexusEndpoint(ctx context.Context, in *matchingservice.UpdateNexusEndpointRequest, opts ...grpc.CallOption) (*matchingservice.UpdateNexusEndpointResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "UpdateNexusIncomingService", varargs...) - ret0, _ := ret[0].(*matchingservice.UpdateNexusIncomingServiceResponse) + ret := m.ctrl.Call(m, "UpdateNexusEndpoint", varargs...) + ret0, _ := ret[0].(*matchingservice.UpdateNexusEndpointResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// UpdateNexusIncomingService indicates an expected call of UpdateNexusIncomingService. -func (mr *MockMatchingServiceClientMockRecorder) UpdateNexusIncomingService(ctx, in interface{}, opts ...interface{}) *gomock.Call { +// UpdateNexusEndpoint indicates an expected call of UpdateNexusEndpoint. +func (mr *MockMatchingServiceClientMockRecorder) UpdateNexusEndpoint(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNexusIncomingService", reflect.TypeOf((*MockMatchingServiceClient)(nil).UpdateNexusIncomingService), varargs...) + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNexusEndpoint", reflect.TypeOf((*MockMatchingServiceClient)(nil).UpdateNexusEndpoint), varargs...) +} + +// UpdateTaskQueueConfig mocks base method. +func (m *MockMatchingServiceClient) UpdateTaskQueueConfig(ctx context.Context, in *matchingservice.UpdateTaskQueueConfigRequest, opts ...grpc.CallOption) (*matchingservice.UpdateTaskQueueConfigResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateTaskQueueConfig", varargs...) + ret0, _ := ret[0].(*matchingservice.UpdateTaskQueueConfigResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateTaskQueueConfig indicates an expected call of UpdateTaskQueueConfig. +func (mr *MockMatchingServiceClientMockRecorder) UpdateTaskQueueConfig(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskQueueConfig", reflect.TypeOf((*MockMatchingServiceClient)(nil).UpdateTaskQueueConfig), varargs...) } // UpdateTaskQueueUserData mocks base method. func (m *MockMatchingServiceClient) UpdateTaskQueueUserData(ctx context.Context, in *matchingservice.UpdateTaskQueueUserDataRequest, opts ...grpc.CallOption) (*matchingservice.UpdateTaskQueueUserDataResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -534,16 +796,16 @@ func (m *MockMatchingServiceClient) UpdateTaskQueueUserData(ctx context.Context, } // UpdateTaskQueueUserData indicates an expected call of UpdateTaskQueueUserData. -func (mr *MockMatchingServiceClientMockRecorder) UpdateTaskQueueUserData(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) UpdateTaskQueueUserData(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskQueueUserData", reflect.TypeOf((*MockMatchingServiceClient)(nil).UpdateTaskQueueUserData), varargs...) } // UpdateWorkerBuildIdCompatibility mocks base method. func (m *MockMatchingServiceClient) UpdateWorkerBuildIdCompatibility(ctx context.Context, in *matchingservice.UpdateWorkerBuildIdCompatibilityRequest, opts ...grpc.CallOption) (*matchingservice.UpdateWorkerBuildIdCompatibilityResponse, error) { m.ctrl.T.Helper() - varargs := []interface{}{ctx, in} + varargs := []any{ctx, in} for _, a := range opts { varargs = append(varargs, a) } @@ -554,16 +816,37 @@ func (m *MockMatchingServiceClient) UpdateWorkerBuildIdCompatibility(ctx context } // UpdateWorkerBuildIdCompatibility indicates an expected call of UpdateWorkerBuildIdCompatibility. -func (mr *MockMatchingServiceClientMockRecorder) UpdateWorkerBuildIdCompatibility(ctx, in interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockMatchingServiceClientMockRecorder) UpdateWorkerBuildIdCompatibility(ctx, in any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, in}, opts...) + varargs := append([]any{ctx, in}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkerBuildIdCompatibility", reflect.TypeOf((*MockMatchingServiceClient)(nil).UpdateWorkerBuildIdCompatibility), varargs...) } +// UpdateWorkerVersioningRules mocks base method. +func (m *MockMatchingServiceClient) UpdateWorkerVersioningRules(ctx context.Context, in *matchingservice.UpdateWorkerVersioningRulesRequest, opts ...grpc.CallOption) (*matchingservice.UpdateWorkerVersioningRulesResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateWorkerVersioningRules", varargs...) + ret0, _ := ret[0].(*matchingservice.UpdateWorkerVersioningRulesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateWorkerVersioningRules indicates an expected call of UpdateWorkerVersioningRules. +func (mr *MockMatchingServiceClientMockRecorder) UpdateWorkerVersioningRules(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkerVersioningRules", reflect.TypeOf((*MockMatchingServiceClient)(nil).UpdateWorkerVersioningRules), varargs...) +} + // MockMatchingServiceServer is a mock of MatchingServiceServer interface. type MockMatchingServiceServer struct { ctrl *gomock.Controller recorder *MockMatchingServiceServerMockRecorder + isgomock struct{} } // MockMatchingServiceServerMockRecorder is the mock recorder for MockMatchingServiceServer. @@ -593,7 +876,7 @@ func (m *MockMatchingServiceServer) AddActivityTask(arg0 context.Context, arg1 * } // AddActivityTask indicates an expected call of AddActivityTask. -func (mr *MockMatchingServiceServerMockRecorder) AddActivityTask(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) AddActivityTask(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddActivityTask", reflect.TypeOf((*MockMatchingServiceServer)(nil).AddActivityTask), arg0, arg1) } @@ -608,7 +891,7 @@ func (m *MockMatchingServiceServer) AddWorkflowTask(arg0 context.Context, arg1 * } // AddWorkflowTask indicates an expected call of AddWorkflowTask. -func (mr *MockMatchingServiceServerMockRecorder) AddWorkflowTask(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) AddWorkflowTask(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWorkflowTask", reflect.TypeOf((*MockMatchingServiceServer)(nil).AddWorkflowTask), arg0, arg1) } @@ -623,7 +906,7 @@ func (m *MockMatchingServiceServer) ApplyTaskQueueUserDataReplicationEvent(arg0 } // ApplyTaskQueueUserDataReplicationEvent indicates an expected call of ApplyTaskQueueUserDataReplicationEvent. -func (mr *MockMatchingServiceServerMockRecorder) ApplyTaskQueueUserDataReplicationEvent(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) ApplyTaskQueueUserDataReplicationEvent(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplyTaskQueueUserDataReplicationEvent", reflect.TypeOf((*MockMatchingServiceServer)(nil).ApplyTaskQueueUserDataReplicationEvent), arg0, arg1) } @@ -638,39 +921,84 @@ func (m *MockMatchingServiceServer) CancelOutstandingPoll(arg0 context.Context, } // CancelOutstandingPoll indicates an expected call of CancelOutstandingPoll. -func (mr *MockMatchingServiceServerMockRecorder) CancelOutstandingPoll(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) CancelOutstandingPoll(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelOutstandingPoll", reflect.TypeOf((*MockMatchingServiceServer)(nil).CancelOutstandingPoll), arg0, arg1) } -// CreateNexusIncomingService mocks base method. -func (m *MockMatchingServiceServer) CreateNexusIncomingService(arg0 context.Context, arg1 *matchingservice.CreateNexusIncomingServiceRequest) (*matchingservice.CreateNexusIncomingServiceResponse, error) { +// CancelOutstandingWorkerPolls mocks base method. +func (m *MockMatchingServiceServer) CancelOutstandingWorkerPolls(arg0 context.Context, arg1 *matchingservice.CancelOutstandingWorkerPollsRequest) (*matchingservice.CancelOutstandingWorkerPollsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CancelOutstandingWorkerPolls", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.CancelOutstandingWorkerPollsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelOutstandingWorkerPolls indicates an expected call of CancelOutstandingWorkerPolls. +func (mr *MockMatchingServiceServerMockRecorder) CancelOutstandingWorkerPolls(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelOutstandingWorkerPolls", reflect.TypeOf((*MockMatchingServiceServer)(nil).CancelOutstandingWorkerPolls), arg0, arg1) +} + +// CheckTaskQueueUserDataPropagation mocks base method. +func (m *MockMatchingServiceServer) CheckTaskQueueUserDataPropagation(arg0 context.Context, arg1 *matchingservice.CheckTaskQueueUserDataPropagationRequest) (*matchingservice.CheckTaskQueueUserDataPropagationResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateNexusIncomingService", arg0, arg1) - ret0, _ := ret[0].(*matchingservice.CreateNexusIncomingServiceResponse) + ret := m.ctrl.Call(m, "CheckTaskQueueUserDataPropagation", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.CheckTaskQueueUserDataPropagationResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// CreateNexusIncomingService indicates an expected call of CreateNexusIncomingService. -func (mr *MockMatchingServiceServerMockRecorder) CreateNexusIncomingService(arg0, arg1 interface{}) *gomock.Call { +// CheckTaskQueueUserDataPropagation indicates an expected call of CheckTaskQueueUserDataPropagation. +func (mr *MockMatchingServiceServerMockRecorder) CheckTaskQueueUserDataPropagation(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNexusIncomingService", reflect.TypeOf((*MockMatchingServiceServer)(nil).CreateNexusIncomingService), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckTaskQueueUserDataPropagation", reflect.TypeOf((*MockMatchingServiceServer)(nil).CheckTaskQueueUserDataPropagation), arg0, arg1) } -// DeleteNexusIncomingService mocks base method. -func (m *MockMatchingServiceServer) DeleteNexusIncomingService(arg0 context.Context, arg1 *matchingservice.DeleteNexusIncomingServiceRequest) (*matchingservice.DeleteNexusIncomingServiceResponse, error) { +// CheckTaskQueueVersionMembership mocks base method. +func (m *MockMatchingServiceServer) CheckTaskQueueVersionMembership(arg0 context.Context, arg1 *matchingservice.CheckTaskQueueVersionMembershipRequest) (*matchingservice.CheckTaskQueueVersionMembershipResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteNexusIncomingService", arg0, arg1) - ret0, _ := ret[0].(*matchingservice.DeleteNexusIncomingServiceResponse) + ret := m.ctrl.Call(m, "CheckTaskQueueVersionMembership", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.CheckTaskQueueVersionMembershipResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// DeleteNexusIncomingService indicates an expected call of DeleteNexusIncomingService. -func (mr *MockMatchingServiceServerMockRecorder) DeleteNexusIncomingService(arg0, arg1 interface{}) *gomock.Call { +// CheckTaskQueueVersionMembership indicates an expected call of CheckTaskQueueVersionMembership. +func (mr *MockMatchingServiceServerMockRecorder) CheckTaskQueueVersionMembership(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNexusIncomingService", reflect.TypeOf((*MockMatchingServiceServer)(nil).DeleteNexusIncomingService), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckTaskQueueVersionMembership", reflect.TypeOf((*MockMatchingServiceServer)(nil).CheckTaskQueueVersionMembership), arg0, arg1) +} + +// CreateNexusEndpoint mocks base method. +func (m *MockMatchingServiceServer) CreateNexusEndpoint(arg0 context.Context, arg1 *matchingservice.CreateNexusEndpointRequest) (*matchingservice.CreateNexusEndpointResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateNexusEndpoint", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.CreateNexusEndpointResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateNexusEndpoint indicates an expected call of CreateNexusEndpoint. +func (mr *MockMatchingServiceServerMockRecorder) CreateNexusEndpoint(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateNexusEndpoint", reflect.TypeOf((*MockMatchingServiceServer)(nil).CreateNexusEndpoint), arg0, arg1) +} + +// DeleteNexusEndpoint mocks base method. +func (m *MockMatchingServiceServer) DeleteNexusEndpoint(arg0 context.Context, arg1 *matchingservice.DeleteNexusEndpointRequest) (*matchingservice.DeleteNexusEndpointResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteNexusEndpoint", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.DeleteNexusEndpointResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteNexusEndpoint indicates an expected call of DeleteNexusEndpoint. +func (mr *MockMatchingServiceServerMockRecorder) DeleteNexusEndpoint(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteNexusEndpoint", reflect.TypeOf((*MockMatchingServiceServer)(nil).DeleteNexusEndpoint), arg0, arg1) } // DescribeTaskQueue mocks base method. @@ -683,11 +1011,56 @@ func (m *MockMatchingServiceServer) DescribeTaskQueue(arg0 context.Context, arg1 } // DescribeTaskQueue indicates an expected call of DescribeTaskQueue. -func (mr *MockMatchingServiceServerMockRecorder) DescribeTaskQueue(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) DescribeTaskQueue(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTaskQueue", reflect.TypeOf((*MockMatchingServiceServer)(nil).DescribeTaskQueue), arg0, arg1) } +// DescribeTaskQueuePartition mocks base method. +func (m *MockMatchingServiceServer) DescribeTaskQueuePartition(arg0 context.Context, arg1 *matchingservice.DescribeTaskQueuePartitionRequest) (*matchingservice.DescribeTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeTaskQueuePartition", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.DescribeTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeTaskQueuePartition indicates an expected call of DescribeTaskQueuePartition. +func (mr *MockMatchingServiceServerMockRecorder) DescribeTaskQueuePartition(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeTaskQueuePartition", reflect.TypeOf((*MockMatchingServiceServer)(nil).DescribeTaskQueuePartition), arg0, arg1) +} + +// DescribeVersionedTaskQueues mocks base method. +func (m *MockMatchingServiceServer) DescribeVersionedTaskQueues(arg0 context.Context, arg1 *matchingservice.DescribeVersionedTaskQueuesRequest) (*matchingservice.DescribeVersionedTaskQueuesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeVersionedTaskQueues", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.DescribeVersionedTaskQueuesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeVersionedTaskQueues indicates an expected call of DescribeVersionedTaskQueues. +func (mr *MockMatchingServiceServerMockRecorder) DescribeVersionedTaskQueues(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeVersionedTaskQueues", reflect.TypeOf((*MockMatchingServiceServer)(nil).DescribeVersionedTaskQueues), arg0, arg1) +} + +// DescribeWorker mocks base method. +func (m *MockMatchingServiceServer) DescribeWorker(arg0 context.Context, arg1 *matchingservice.DescribeWorkerRequest) (*matchingservice.DescribeWorkerResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeWorker", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.DescribeWorkerResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeWorker indicates an expected call of DescribeWorker. +func (mr *MockMatchingServiceServerMockRecorder) DescribeWorker(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeWorker", reflect.TypeOf((*MockMatchingServiceServer)(nil).DescribeWorker), arg0, arg1) +} + // DispatchNexusTask mocks base method. func (m *MockMatchingServiceServer) DispatchNexusTask(arg0 context.Context, arg1 *matchingservice.DispatchNexusTaskRequest) (*matchingservice.DispatchNexusTaskResponse, error) { m.ctrl.T.Helper() @@ -698,11 +1071,26 @@ func (m *MockMatchingServiceServer) DispatchNexusTask(arg0 context.Context, arg1 } // DispatchNexusTask indicates an expected call of DispatchNexusTask. -func (mr *MockMatchingServiceServerMockRecorder) DispatchNexusTask(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) DispatchNexusTask(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DispatchNexusTask", reflect.TypeOf((*MockMatchingServiceServer)(nil).DispatchNexusTask), arg0, arg1) } +// ForceLoadTaskQueuePartition mocks base method. +func (m *MockMatchingServiceServer) ForceLoadTaskQueuePartition(arg0 context.Context, arg1 *matchingservice.ForceLoadTaskQueuePartitionRequest) (*matchingservice.ForceLoadTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ForceLoadTaskQueuePartition", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.ForceLoadTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ForceLoadTaskQueuePartition indicates an expected call of ForceLoadTaskQueuePartition. +func (mr *MockMatchingServiceServerMockRecorder) ForceLoadTaskQueuePartition(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceLoadTaskQueuePartition", reflect.TypeOf((*MockMatchingServiceServer)(nil).ForceLoadTaskQueuePartition), arg0, arg1) +} + // ForceUnloadTaskQueue mocks base method. func (m *MockMatchingServiceServer) ForceUnloadTaskQueue(arg0 context.Context, arg1 *matchingservice.ForceUnloadTaskQueueRequest) (*matchingservice.ForceUnloadTaskQueueResponse, error) { m.ctrl.T.Helper() @@ -713,11 +1101,26 @@ func (m *MockMatchingServiceServer) ForceUnloadTaskQueue(arg0 context.Context, a } // ForceUnloadTaskQueue indicates an expected call of ForceUnloadTaskQueue. -func (mr *MockMatchingServiceServerMockRecorder) ForceUnloadTaskQueue(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) ForceUnloadTaskQueue(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUnloadTaskQueue", reflect.TypeOf((*MockMatchingServiceServer)(nil).ForceUnloadTaskQueue), arg0, arg1) } +// ForceUnloadTaskQueuePartition mocks base method. +func (m *MockMatchingServiceServer) ForceUnloadTaskQueuePartition(arg0 context.Context, arg1 *matchingservice.ForceUnloadTaskQueuePartitionRequest) (*matchingservice.ForceUnloadTaskQueuePartitionResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ForceUnloadTaskQueuePartition", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.ForceUnloadTaskQueuePartitionResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ForceUnloadTaskQueuePartition indicates an expected call of ForceUnloadTaskQueuePartition. +func (mr *MockMatchingServiceServerMockRecorder) ForceUnloadTaskQueuePartition(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUnloadTaskQueuePartition", reflect.TypeOf((*MockMatchingServiceServer)(nil).ForceUnloadTaskQueuePartition), arg0, arg1) +} + // GetBuildIdTaskQueueMapping mocks base method. func (m *MockMatchingServiceServer) GetBuildIdTaskQueueMapping(arg0 context.Context, arg1 *matchingservice.GetBuildIdTaskQueueMappingRequest) (*matchingservice.GetBuildIdTaskQueueMappingResponse, error) { m.ctrl.T.Helper() @@ -728,7 +1131,7 @@ func (m *MockMatchingServiceServer) GetBuildIdTaskQueueMapping(arg0 context.Cont } // GetBuildIdTaskQueueMapping indicates an expected call of GetBuildIdTaskQueueMapping. -func (mr *MockMatchingServiceServerMockRecorder) GetBuildIdTaskQueueMapping(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) GetBuildIdTaskQueueMapping(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBuildIdTaskQueueMapping", reflect.TypeOf((*MockMatchingServiceServer)(nil).GetBuildIdTaskQueueMapping), arg0, arg1) } @@ -743,7 +1146,7 @@ func (m *MockMatchingServiceServer) GetTaskQueueUserData(arg0 context.Context, a } // GetTaskQueueUserData indicates an expected call of GetTaskQueueUserData. -func (mr *MockMatchingServiceServerMockRecorder) GetTaskQueueUserData(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) GetTaskQueueUserData(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTaskQueueUserData", reflect.TypeOf((*MockMatchingServiceServer)(nil).GetTaskQueueUserData), arg0, arg1) } @@ -758,24 +1161,39 @@ func (m *MockMatchingServiceServer) GetWorkerBuildIdCompatibility(arg0 context.C } // GetWorkerBuildIdCompatibility indicates an expected call of GetWorkerBuildIdCompatibility. -func (mr *MockMatchingServiceServerMockRecorder) GetWorkerBuildIdCompatibility(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) GetWorkerBuildIdCompatibility(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkerBuildIdCompatibility", reflect.TypeOf((*MockMatchingServiceServer)(nil).GetWorkerBuildIdCompatibility), arg0, arg1) } -// ListNexusIncomingServices mocks base method. -func (m *MockMatchingServiceServer) ListNexusIncomingServices(arg0 context.Context, arg1 *matchingservice.ListNexusIncomingServicesRequest) (*matchingservice.ListNexusIncomingServicesResponse, error) { +// GetWorkerVersioningRules mocks base method. +func (m *MockMatchingServiceServer) GetWorkerVersioningRules(arg0 context.Context, arg1 *matchingservice.GetWorkerVersioningRulesRequest) (*matchingservice.GetWorkerVersioningRulesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWorkerVersioningRules", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.GetWorkerVersioningRulesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetWorkerVersioningRules indicates an expected call of GetWorkerVersioningRules. +func (mr *MockMatchingServiceServerMockRecorder) GetWorkerVersioningRules(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWorkerVersioningRules", reflect.TypeOf((*MockMatchingServiceServer)(nil).GetWorkerVersioningRules), arg0, arg1) +} + +// ListNexusEndpoints mocks base method. +func (m *MockMatchingServiceServer) ListNexusEndpoints(arg0 context.Context, arg1 *matchingservice.ListNexusEndpointsRequest) (*matchingservice.ListNexusEndpointsResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListNexusIncomingServices", arg0, arg1) - ret0, _ := ret[0].(*matchingservice.ListNexusIncomingServicesResponse) + ret := m.ctrl.Call(m, "ListNexusEndpoints", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.ListNexusEndpointsResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// ListNexusIncomingServices indicates an expected call of ListNexusIncomingServices. -func (mr *MockMatchingServiceServerMockRecorder) ListNexusIncomingServices(arg0, arg1 interface{}) *gomock.Call { +// ListNexusEndpoints indicates an expected call of ListNexusEndpoints. +func (mr *MockMatchingServiceServerMockRecorder) ListNexusEndpoints(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNexusIncomingServices", reflect.TypeOf((*MockMatchingServiceServer)(nil).ListNexusIncomingServices), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNexusEndpoints", reflect.TypeOf((*MockMatchingServiceServer)(nil).ListNexusEndpoints), arg0, arg1) } // ListTaskQueuePartitions mocks base method. @@ -788,11 +1206,26 @@ func (m *MockMatchingServiceServer) ListTaskQueuePartitions(arg0 context.Context } // ListTaskQueuePartitions indicates an expected call of ListTaskQueuePartitions. -func (mr *MockMatchingServiceServerMockRecorder) ListTaskQueuePartitions(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) ListTaskQueuePartitions(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTaskQueuePartitions", reflect.TypeOf((*MockMatchingServiceServer)(nil).ListTaskQueuePartitions), arg0, arg1) } +// ListWorkers mocks base method. +func (m *MockMatchingServiceServer) ListWorkers(arg0 context.Context, arg1 *matchingservice.ListWorkersRequest) (*matchingservice.ListWorkersResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListWorkers", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.ListWorkersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListWorkers indicates an expected call of ListWorkers. +func (mr *MockMatchingServiceServerMockRecorder) ListWorkers(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListWorkers", reflect.TypeOf((*MockMatchingServiceServer)(nil).ListWorkers), arg0, arg1) +} + // PollActivityTaskQueue mocks base method. func (m *MockMatchingServiceServer) PollActivityTaskQueue(arg0 context.Context, arg1 *matchingservice.PollActivityTaskQueueRequest) (*matchingservice.PollActivityTaskQueueResponse, error) { m.ctrl.T.Helper() @@ -803,7 +1236,7 @@ func (m *MockMatchingServiceServer) PollActivityTaskQueue(arg0 context.Context, } // PollActivityTaskQueue indicates an expected call of PollActivityTaskQueue. -func (mr *MockMatchingServiceServerMockRecorder) PollActivityTaskQueue(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) PollActivityTaskQueue(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollActivityTaskQueue", reflect.TypeOf((*MockMatchingServiceServer)(nil).PollActivityTaskQueue), arg0, arg1) } @@ -818,7 +1251,7 @@ func (m *MockMatchingServiceServer) PollNexusTaskQueue(arg0 context.Context, arg } // PollNexusTaskQueue indicates an expected call of PollNexusTaskQueue. -func (mr *MockMatchingServiceServerMockRecorder) PollNexusTaskQueue(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) PollNexusTaskQueue(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollNexusTaskQueue", reflect.TypeOf((*MockMatchingServiceServer)(nil).PollNexusTaskQueue), arg0, arg1) } @@ -833,7 +1266,7 @@ func (m *MockMatchingServiceServer) PollWorkflowTaskQueue(arg0 context.Context, } // PollWorkflowTaskQueue indicates an expected call of PollWorkflowTaskQueue. -func (mr *MockMatchingServiceServerMockRecorder) PollWorkflowTaskQueue(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) PollWorkflowTaskQueue(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollWorkflowTaskQueue", reflect.TypeOf((*MockMatchingServiceServer)(nil).PollWorkflowTaskQueue), arg0, arg1) } @@ -848,11 +1281,26 @@ func (m *MockMatchingServiceServer) QueryWorkflow(arg0 context.Context, arg1 *ma } // QueryWorkflow indicates an expected call of QueryWorkflow. -func (mr *MockMatchingServiceServerMockRecorder) QueryWorkflow(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) QueryWorkflow(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryWorkflow", reflect.TypeOf((*MockMatchingServiceServer)(nil).QueryWorkflow), arg0, arg1) } +// RecordWorkerHeartbeat mocks base method. +func (m *MockMatchingServiceServer) RecordWorkerHeartbeat(arg0 context.Context, arg1 *matchingservice.RecordWorkerHeartbeatRequest) (*matchingservice.RecordWorkerHeartbeatResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RecordWorkerHeartbeat", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.RecordWorkerHeartbeatResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RecordWorkerHeartbeat indicates an expected call of RecordWorkerHeartbeat. +func (mr *MockMatchingServiceServerMockRecorder) RecordWorkerHeartbeat(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecordWorkerHeartbeat", reflect.TypeOf((*MockMatchingServiceServer)(nil).RecordWorkerHeartbeat), arg0, arg1) +} + // ReplicateTaskQueueUserData mocks base method. func (m *MockMatchingServiceServer) ReplicateTaskQueueUserData(arg0 context.Context, arg1 *matchingservice.ReplicateTaskQueueUserDataRequest) (*matchingservice.ReplicateTaskQueueUserDataResponse, error) { m.ctrl.T.Helper() @@ -863,7 +1311,7 @@ func (m *MockMatchingServiceServer) ReplicateTaskQueueUserData(arg0 context.Cont } // ReplicateTaskQueueUserData indicates an expected call of ReplicateTaskQueueUserData. -func (mr *MockMatchingServiceServerMockRecorder) ReplicateTaskQueueUserData(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) ReplicateTaskQueueUserData(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReplicateTaskQueueUserData", reflect.TypeOf((*MockMatchingServiceServer)(nil).ReplicateTaskQueueUserData), arg0, arg1) } @@ -878,7 +1326,7 @@ func (m *MockMatchingServiceServer) RespondNexusTaskCompleted(arg0 context.Conte } // RespondNexusTaskCompleted indicates an expected call of RespondNexusTaskCompleted. -func (mr *MockMatchingServiceServerMockRecorder) RespondNexusTaskCompleted(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) RespondNexusTaskCompleted(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondNexusTaskCompleted", reflect.TypeOf((*MockMatchingServiceServer)(nil).RespondNexusTaskCompleted), arg0, arg1) } @@ -893,7 +1341,7 @@ func (m *MockMatchingServiceServer) RespondNexusTaskFailed(arg0 context.Context, } // RespondNexusTaskFailed indicates an expected call of RespondNexusTaskFailed. -func (mr *MockMatchingServiceServerMockRecorder) RespondNexusTaskFailed(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) RespondNexusTaskFailed(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondNexusTaskFailed", reflect.TypeOf((*MockMatchingServiceServer)(nil).RespondNexusTaskFailed), arg0, arg1) } @@ -908,24 +1356,69 @@ func (m *MockMatchingServiceServer) RespondQueryTaskCompleted(arg0 context.Conte } // RespondQueryTaskCompleted indicates an expected call of RespondQueryTaskCompleted. -func (mr *MockMatchingServiceServerMockRecorder) RespondQueryTaskCompleted(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) RespondQueryTaskCompleted(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RespondQueryTaskCompleted", reflect.TypeOf((*MockMatchingServiceServer)(nil).RespondQueryTaskCompleted), arg0, arg1) } -// UpdateNexusIncomingService mocks base method. -func (m *MockMatchingServiceServer) UpdateNexusIncomingService(arg0 context.Context, arg1 *matchingservice.UpdateNexusIncomingServiceRequest) (*matchingservice.UpdateNexusIncomingServiceResponse, error) { +// SyncDeploymentUserData mocks base method. +func (m *MockMatchingServiceServer) SyncDeploymentUserData(arg0 context.Context, arg1 *matchingservice.SyncDeploymentUserDataRequest) (*matchingservice.SyncDeploymentUserDataResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncDeploymentUserData", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.SyncDeploymentUserDataResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SyncDeploymentUserData indicates an expected call of SyncDeploymentUserData. +func (mr *MockMatchingServiceServerMockRecorder) SyncDeploymentUserData(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncDeploymentUserData", reflect.TypeOf((*MockMatchingServiceServer)(nil).SyncDeploymentUserData), arg0, arg1) +} + +// UpdateFairnessState mocks base method. +func (m *MockMatchingServiceServer) UpdateFairnessState(arg0 context.Context, arg1 *matchingservice.UpdateFairnessStateRequest) (*matchingservice.UpdateFairnessStateResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateFairnessState", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.UpdateFairnessStateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateFairnessState indicates an expected call of UpdateFairnessState. +func (mr *MockMatchingServiceServerMockRecorder) UpdateFairnessState(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFairnessState", reflect.TypeOf((*MockMatchingServiceServer)(nil).UpdateFairnessState), arg0, arg1) +} + +// UpdateNexusEndpoint mocks base method. +func (m *MockMatchingServiceServer) UpdateNexusEndpoint(arg0 context.Context, arg1 *matchingservice.UpdateNexusEndpointRequest) (*matchingservice.UpdateNexusEndpointResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateNexusEndpoint", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.UpdateNexusEndpointResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateNexusEndpoint indicates an expected call of UpdateNexusEndpoint. +func (mr *MockMatchingServiceServerMockRecorder) UpdateNexusEndpoint(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNexusEndpoint", reflect.TypeOf((*MockMatchingServiceServer)(nil).UpdateNexusEndpoint), arg0, arg1) +} + +// UpdateTaskQueueConfig mocks base method. +func (m *MockMatchingServiceServer) UpdateTaskQueueConfig(arg0 context.Context, arg1 *matchingservice.UpdateTaskQueueConfigRequest) (*matchingservice.UpdateTaskQueueConfigResponse, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateNexusIncomingService", arg0, arg1) - ret0, _ := ret[0].(*matchingservice.UpdateNexusIncomingServiceResponse) + ret := m.ctrl.Call(m, "UpdateTaskQueueConfig", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.UpdateTaskQueueConfigResponse) ret1, _ := ret[1].(error) return ret0, ret1 } -// UpdateNexusIncomingService indicates an expected call of UpdateNexusIncomingService. -func (mr *MockMatchingServiceServerMockRecorder) UpdateNexusIncomingService(arg0, arg1 interface{}) *gomock.Call { +// UpdateTaskQueueConfig indicates an expected call of UpdateTaskQueueConfig. +func (mr *MockMatchingServiceServerMockRecorder) UpdateTaskQueueConfig(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateNexusIncomingService", reflect.TypeOf((*MockMatchingServiceServer)(nil).UpdateNexusIncomingService), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskQueueConfig", reflect.TypeOf((*MockMatchingServiceServer)(nil).UpdateTaskQueueConfig), arg0, arg1) } // UpdateTaskQueueUserData mocks base method. @@ -938,7 +1431,7 @@ func (m *MockMatchingServiceServer) UpdateTaskQueueUserData(arg0 context.Context } // UpdateTaskQueueUserData indicates an expected call of UpdateTaskQueueUserData. -func (mr *MockMatchingServiceServerMockRecorder) UpdateTaskQueueUserData(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) UpdateTaskQueueUserData(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTaskQueueUserData", reflect.TypeOf((*MockMatchingServiceServer)(nil).UpdateTaskQueueUserData), arg0, arg1) } @@ -953,11 +1446,26 @@ func (m *MockMatchingServiceServer) UpdateWorkerBuildIdCompatibility(arg0 contex } // UpdateWorkerBuildIdCompatibility indicates an expected call of UpdateWorkerBuildIdCompatibility. -func (mr *MockMatchingServiceServerMockRecorder) UpdateWorkerBuildIdCompatibility(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMatchingServiceServerMockRecorder) UpdateWorkerBuildIdCompatibility(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkerBuildIdCompatibility", reflect.TypeOf((*MockMatchingServiceServer)(nil).UpdateWorkerBuildIdCompatibility), arg0, arg1) } +// UpdateWorkerVersioningRules mocks base method. +func (m *MockMatchingServiceServer) UpdateWorkerVersioningRules(arg0 context.Context, arg1 *matchingservice.UpdateWorkerVersioningRulesRequest) (*matchingservice.UpdateWorkerVersioningRulesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateWorkerVersioningRules", arg0, arg1) + ret0, _ := ret[0].(*matchingservice.UpdateWorkerVersioningRulesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateWorkerVersioningRules indicates an expected call of UpdateWorkerVersioningRules. +func (mr *MockMatchingServiceServerMockRecorder) UpdateWorkerVersioningRules(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWorkerVersioningRules", reflect.TypeOf((*MockMatchingServiceServer)(nil).UpdateWorkerVersioningRules), arg0, arg1) +} + // mustEmbedUnimplementedMatchingServiceServer mocks base method. func (m *MockMatchingServiceServer) mustEmbedUnimplementedMatchingServiceServer() { m.ctrl.T.Helper() @@ -974,6 +1482,7 @@ func (mr *MockMatchingServiceServerMockRecorder) mustEmbedUnimplementedMatchingS type MockUnsafeMatchingServiceServer struct { ctrl *gomock.Controller recorder *MockUnsafeMatchingServiceServerMockRecorder + isgomock struct{} } // MockUnsafeMatchingServiceServerMockRecorder is the mock recorder for MockUnsafeMatchingServiceServer. diff --git a/api/metrics/v1/message.go-helpers.pb.go b/api/metrics/v1/message.go-helpers.pb.go index ac24ca1757a..f68a0912ef4 100644 --- a/api/metrics/v1/message.go-helpers.pb.go +++ b/api/metrics/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package metrics diff --git a/api/metrics/v1/message.pb.go b/api/metrics/v1/message.pb.go index 2a50768dd2e..984b72e8feb 100644 --- a/api/metrics/v1/message.pb.go +++ b/api/metrics/v1/message.pb.go @@ -1,47 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -53,6 +9,7 @@ package metrics import ( reflect "reflect" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -66,20 +23,17 @@ const ( ) type Baggage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + CountersInt map[string]int64 `protobuf:"bytes,1,rep,name=counters_int,json=countersInt,proto3" json:"counters_int,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - CountersInt map[string]int64 `protobuf:"bytes,1,rep,name=counters_int,json=countersInt,proto3" json:"counters_int,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *Baggage) Reset() { *x = Baggage{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_metrics_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_metrics_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Baggage) String() string { @@ -90,7 +44,7 @@ func (*Baggage) ProtoMessage() {} func (x *Baggage) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_metrics_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -114,42 +68,29 @@ func (x *Baggage) GetCountersInt() map[string]int64 { var File_temporal_server_api_metrics_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_metrics_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x22, 0xb2, 0x01, 0x0a, 0x07, 0x42, - 0x61, 0x67, 0x67, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x0c, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, - 0x72, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x67, 0x67, - 0x61, 0x67, 0x65, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x74, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0b, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x49, 0x6e, 0x74, - 0x42, 0x02, 0x68, 0x00, 0x1a, 0x46, 0x0a, 0x10, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x49, - 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x2e, 0x5a, 0x2c, 0x67, 0x6f, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x31, 0x3b, - 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_metrics_v1_message_proto_rawDesc = "" + + "\n" + + ",temporal/server/api/metrics/v1/message.proto\x12\x1etemporal.server.api.metrics.v1\"\xa6\x01\n" + + "\aBaggage\x12[\n" + + "\fcounters_int\x18\x01 \x03(\v28.temporal.server.api.metrics.v1.Baggage.CountersIntEntryR\vcountersInt\x1a>\n" + + "\x10CountersIntEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01B.Z,go.temporal.io/server/api/metrics/v1;metricsb\x06proto3" var ( file_temporal_server_api_metrics_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_metrics_v1_message_proto_rawDescData = file_temporal_server_api_metrics_v1_message_proto_rawDesc + file_temporal_server_api_metrics_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_metrics_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_metrics_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_metrics_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_metrics_v1_message_proto_rawDescData) + file_temporal_server_api_metrics_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_metrics_v1_message_proto_rawDesc), len(file_temporal_server_api_metrics_v1_message_proto_rawDesc))) }) return file_temporal_server_api_metrics_v1_message_proto_rawDescData } var file_temporal_server_api_metrics_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_temporal_server_api_metrics_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_metrics_v1_message_proto_goTypes = []any{ (*Baggage)(nil), // 0: temporal.server.api.metrics.v1.Baggage nil, // 1: temporal.server.api.metrics.v1.Baggage.CountersIntEntry } @@ -167,25 +108,11 @@ func file_temporal_server_api_metrics_v1_message_proto_init() { if File_temporal_server_api_metrics_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_metrics_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Baggage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_metrics_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_metrics_v1_message_proto_rawDesc), len(file_temporal_server_api_metrics_v1_message_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -196,7 +123,6 @@ func file_temporal_server_api_metrics_v1_message_proto_init() { MessageInfos: file_temporal_server_api_metrics_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_metrics_v1_message_proto = out.File - file_temporal_server_api_metrics_v1_message_proto_rawDesc = nil file_temporal_server_api_metrics_v1_message_proto_goTypes = nil file_temporal_server_api_metrics_v1_message_proto_depIdxs = nil } diff --git a/api/namespace/v1/message.go-helpers.pb.go b/api/namespace/v1/message.go-helpers.pb.go index b1416849a43..74c78a6d7c8 100644 --- a/api/namespace/v1/message.go-helpers.pb.go +++ b/api/namespace/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package namespace diff --git a/api/namespace/v1/message.pb.go b/api/namespace/v1/message.pb.go index fd119372c81..b3162fabfb3 100644 --- a/api/namespace/v1/message.pb.go +++ b/api/namespace/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package namespace import ( reflect "reflect" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -44,10 +23,7 @@ const ( ) type NamespaceCacheInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "in" and "by" are needed here. --) @@ -56,15 +32,15 @@ type NamespaceCacheInfo struct { // // aip.dev/not-precedent: "in" and "by" are needed here. --) ItemsInCacheByNameCount int64 `protobuf:"varint,2,opt,name=items_in_cache_by_name_count,json=itemsInCacheByNameCount,proto3" json:"items_in_cache_by_name_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NamespaceCacheInfo) Reset() { *x = NamespaceCacheInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_namespace_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_namespace_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamespaceCacheInfo) String() string { @@ -75,7 +51,7 @@ func (*NamespaceCacheInfo) ProtoMessage() {} func (x *NamespaceCacheInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_namespace_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -106,41 +82,27 @@ func (x *NamespaceCacheInfo) GetItemsInCacheByNameCount() int64 { var File_temporal_server_api_namespace_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_namespace_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, - 0x96, 0x01, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x61, 0x63, - 0x68, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x1a, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x69, - 0x6e, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x69, 0x64, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x49, - 0x6e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x42, 0x79, 0x49, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x1c, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x5f, 0x69, 0x6e, 0x5f, - 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x62, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x17, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x49, - 0x6e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x42, 0x02, 0x68, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_namespace_v1_message_proto_rawDesc = "" + + "\n" + + ".temporal/server/api/namespace/v1/message.proto\x12 temporal.server.api.namespace.v1\"\x8e\x01\n" + + "\x12NamespaceCacheInfo\x129\n" + + "\x1aitems_in_cache_by_id_count\x18\x01 \x01(\x03R\x15itemsInCacheByIdCount\x12=\n" + + "\x1citems_in_cache_by_name_count\x18\x02 \x01(\x03R\x17itemsInCacheByNameCountB2Z0go.temporal.io/server/api/namespace/v1;namespaceb\x06proto3" var ( file_temporal_server_api_namespace_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_namespace_v1_message_proto_rawDescData = file_temporal_server_api_namespace_v1_message_proto_rawDesc + file_temporal_server_api_namespace_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_namespace_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_namespace_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_namespace_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_namespace_v1_message_proto_rawDescData) + file_temporal_server_api_namespace_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_namespace_v1_message_proto_rawDesc), len(file_temporal_server_api_namespace_v1_message_proto_rawDesc))) }) return file_temporal_server_api_namespace_v1_message_proto_rawDescData } var file_temporal_server_api_namespace_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_temporal_server_api_namespace_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_namespace_v1_message_proto_goTypes = []any{ (*NamespaceCacheInfo)(nil), // 0: temporal.server.api.namespace.v1.NamespaceCacheInfo } var file_temporal_server_api_namespace_v1_message_proto_depIdxs = []int32{ @@ -156,25 +118,11 @@ func file_temporal_server_api_namespace_v1_message_proto_init() { if File_temporal_server_api_namespace_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_namespace_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamespaceCacheInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_namespace_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_namespace_v1_message_proto_rawDesc), len(file_temporal_server_api_namespace_v1_message_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -185,7 +133,6 @@ func file_temporal_server_api_namespace_v1_message_proto_init() { MessageInfos: file_temporal_server_api_namespace_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_namespace_v1_message_proto = out.File - file_temporal_server_api_namespace_v1_message_proto_rawDesc = nil file_temporal_server_api_namespace_v1_message_proto_goTypes = nil file_temporal_server_api_namespace_v1_message_proto_depIdxs = nil } diff --git a/api/persistence/v1/chasm.go-helpers.pb.go b/api/persistence/v1/chasm.go-helpers.pb.go new file mode 100644 index 00000000000..e746326b439 --- /dev/null +++ b/api/persistence/v1/chasm.go-helpers.pb.go @@ -0,0 +1,339 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package persistence + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ChasmNode to the protobuf v3 wire format +func (val *ChasmNode) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmNode from the protobuf v3 wire format +func (val *ChasmNode) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmNode) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmNode values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmNode) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmNode + switch t := that.(type) { + case *ChasmNode: + that1 = t + case ChasmNode: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmNodeMetadata to the protobuf v3 wire format +func (val *ChasmNodeMetadata) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmNodeMetadata from the protobuf v3 wire format +func (val *ChasmNodeMetadata) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmNodeMetadata) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmNodeMetadata values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmNodeMetadata) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmNodeMetadata + switch t := that.(type) { + case *ChasmNodeMetadata: + that1 = t + case ChasmNodeMetadata: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmComponentAttributes to the protobuf v3 wire format +func (val *ChasmComponentAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmComponentAttributes from the protobuf v3 wire format +func (val *ChasmComponentAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmComponentAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmComponentAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmComponentAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmComponentAttributes + switch t := that.(type) { + case *ChasmComponentAttributes: + that1 = t + case ChasmComponentAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmDataAttributes to the protobuf v3 wire format +func (val *ChasmDataAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmDataAttributes from the protobuf v3 wire format +func (val *ChasmDataAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmDataAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmDataAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmDataAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmDataAttributes + switch t := that.(type) { + case *ChasmDataAttributes: + that1 = t + case ChasmDataAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmCollectionAttributes to the protobuf v3 wire format +func (val *ChasmCollectionAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmCollectionAttributes from the protobuf v3 wire format +func (val *ChasmCollectionAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmCollectionAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmCollectionAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmCollectionAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmCollectionAttributes + switch t := that.(type) { + case *ChasmCollectionAttributes: + that1 = t + case ChasmCollectionAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmPointerAttributes to the protobuf v3 wire format +func (val *ChasmPointerAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmPointerAttributes from the protobuf v3 wire format +func (val *ChasmPointerAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmPointerAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmPointerAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmPointerAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmPointerAttributes + switch t := that.(type) { + case *ChasmPointerAttributes: + that1 = t + case ChasmPointerAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmTaskInfo to the protobuf v3 wire format +func (val *ChasmTaskInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmTaskInfo from the protobuf v3 wire format +func (val *ChasmTaskInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmTaskInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmTaskInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmTaskInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmTaskInfo + switch t := that.(type) { + case *ChasmTaskInfo: + that1 = t + case ChasmTaskInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmComponentRef to the protobuf v3 wire format +func (val *ChasmComponentRef) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmComponentRef from the protobuf v3 wire format +func (val *ChasmComponentRef) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmComponentRef) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmComponentRef values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmComponentRef) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmComponentRef + switch t := that.(type) { + case *ChasmComponentRef: + that1 = t + case ChasmComponentRef: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmNexusCompletion to the protobuf v3 wire format +func (val *ChasmNexusCompletion) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmNexusCompletion from the protobuf v3 wire format +func (val *ChasmNexusCompletion) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmNexusCompletion) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmNexusCompletion values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmNexusCompletion) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmNexusCompletion + switch t := that.(type) { + case *ChasmNexusCompletion: + that1 = t + case ChasmNexusCompletion: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/persistence/v1/chasm.pb.go b/api/persistence/v1/chasm.pb.go new file mode 100644 index 00000000000..ee1a217adf0 --- /dev/null +++ b/api/persistence/v1/chasm.pb.go @@ -0,0 +1,1014 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/persistence/v1/chasm.proto + +package persistence + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/common/v1" + v11 "go.temporal.io/api/failure/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ChasmNode struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Metadata present for all nodes. + Metadata *ChasmNodeMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + // User data for any type of node that stores it. + Data *v1.DataBlob `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmNode) Reset() { + *x = ChasmNode{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmNode) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmNode) ProtoMessage() {} + +func (x *ChasmNode) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmNode.ProtoReflect.Descriptor instead. +func (*ChasmNode) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{0} +} + +func (x *ChasmNode) GetMetadata() *ChasmNodeMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *ChasmNode) GetData() *v1.DataBlob { + if x != nil { + return x.Data + } + return nil +} + +type ChasmNodeMetadata struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Versioned transition when the node was instantiated. + InitialVersionedTransition *VersionedTransition `protobuf:"bytes,1,opt,name=initial_versioned_transition,json=initialVersionedTransition,proto3" json:"initial_versioned_transition,omitempty"` + // Versioned transition when the node was last updated. + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,2,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + // Types that are valid to be assigned to Attributes: + // + // *ChasmNodeMetadata_ComponentAttributes + // *ChasmNodeMetadata_DataAttributes + // *ChasmNodeMetadata_CollectionAttributes + // *ChasmNodeMetadata_PointerAttributes + Attributes isChasmNodeMetadata_Attributes `protobuf_oneof:"attributes"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmNodeMetadata) Reset() { + *x = ChasmNodeMetadata{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmNodeMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmNodeMetadata) ProtoMessage() {} + +func (x *ChasmNodeMetadata) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmNodeMetadata.ProtoReflect.Descriptor instead. +func (*ChasmNodeMetadata) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{1} +} + +func (x *ChasmNodeMetadata) GetInitialVersionedTransition() *VersionedTransition { + if x != nil { + return x.InitialVersionedTransition + } + return nil +} + +func (x *ChasmNodeMetadata) GetLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.LastUpdateVersionedTransition + } + return nil +} + +func (x *ChasmNodeMetadata) GetAttributes() isChasmNodeMetadata_Attributes { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *ChasmNodeMetadata) GetComponentAttributes() *ChasmComponentAttributes { + if x != nil { + if x, ok := x.Attributes.(*ChasmNodeMetadata_ComponentAttributes); ok { + return x.ComponentAttributes + } + } + return nil +} + +func (x *ChasmNodeMetadata) GetDataAttributes() *ChasmDataAttributes { + if x != nil { + if x, ok := x.Attributes.(*ChasmNodeMetadata_DataAttributes); ok { + return x.DataAttributes + } + } + return nil +} + +func (x *ChasmNodeMetadata) GetCollectionAttributes() *ChasmCollectionAttributes { + if x != nil { + if x, ok := x.Attributes.(*ChasmNodeMetadata_CollectionAttributes); ok { + return x.CollectionAttributes + } + } + return nil +} + +func (x *ChasmNodeMetadata) GetPointerAttributes() *ChasmPointerAttributes { + if x != nil { + if x, ok := x.Attributes.(*ChasmNodeMetadata_PointerAttributes); ok { + return x.PointerAttributes + } + } + return nil +} + +type isChasmNodeMetadata_Attributes interface { + isChasmNodeMetadata_Attributes() +} + +type ChasmNodeMetadata_ComponentAttributes struct { + ComponentAttributes *ChasmComponentAttributes `protobuf:"bytes,11,opt,name=component_attributes,json=componentAttributes,proto3,oneof"` +} + +type ChasmNodeMetadata_DataAttributes struct { + DataAttributes *ChasmDataAttributes `protobuf:"bytes,12,opt,name=data_attributes,json=dataAttributes,proto3,oneof"` +} + +type ChasmNodeMetadata_CollectionAttributes struct { + CollectionAttributes *ChasmCollectionAttributes `protobuf:"bytes,13,opt,name=collection_attributes,json=collectionAttributes,proto3,oneof"` +} + +type ChasmNodeMetadata_PointerAttributes struct { + PointerAttributes *ChasmPointerAttributes `protobuf:"bytes,14,opt,name=pointer_attributes,json=pointerAttributes,proto3,oneof"` +} + +func (*ChasmNodeMetadata_ComponentAttributes) isChasmNodeMetadata_Attributes() {} + +func (*ChasmNodeMetadata_DataAttributes) isChasmNodeMetadata_Attributes() {} + +func (*ChasmNodeMetadata_CollectionAttributes) isChasmNodeMetadata_Attributes() {} + +func (*ChasmNodeMetadata_PointerAttributes) isChasmNodeMetadata_Attributes() {} + +type ChasmComponentAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Registered component's type ID. + // (-- api-linter: core::0141::forbidden-types=disabled --) + TypeId uint32 `protobuf:"varint,1,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + // Tasks are in their insertion order, + // i.e. by versioned transtion and versioned_transition_offset. + SideEffectTasks []*ChasmComponentAttributes_Task `protobuf:"bytes,2,rep,name=side_effect_tasks,json=sideEffectTasks,proto3" json:"side_effect_tasks,omitempty"` + // Tasks are ordered by their scheduled time, breaking ties by + // versioned transition and versioned_transition_offset. + PureTasks []*ChasmComponentAttributes_Task `protobuf:"bytes,3,rep,name=pure_tasks,json=pureTasks,proto3" json:"pure_tasks,omitempty"` + // When true, this component ignores parent lifecycle validation. + // Detached components can continue operating, accepting writes and executing + // tasks, even when their parent is closed/terminated. + Detached bool `protobuf:"varint,4,opt,name=detached,proto3" json:"detached,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmComponentAttributes) Reset() { + *x = ChasmComponentAttributes{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmComponentAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmComponentAttributes) ProtoMessage() {} + +func (x *ChasmComponentAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmComponentAttributes.ProtoReflect.Descriptor instead. +func (*ChasmComponentAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{2} +} + +func (x *ChasmComponentAttributes) GetTypeId() uint32 { + if x != nil { + return x.TypeId + } + return 0 +} + +func (x *ChasmComponentAttributes) GetSideEffectTasks() []*ChasmComponentAttributes_Task { + if x != nil { + return x.SideEffectTasks + } + return nil +} + +func (x *ChasmComponentAttributes) GetPureTasks() []*ChasmComponentAttributes_Task { + if x != nil { + return x.PureTasks + } + return nil +} + +func (x *ChasmComponentAttributes) GetDetached() bool { + if x != nil { + return x.Detached + } + return false +} + +type ChasmDataAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmDataAttributes) Reset() { + *x = ChasmDataAttributes{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmDataAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmDataAttributes) ProtoMessage() {} + +func (x *ChasmDataAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmDataAttributes.ProtoReflect.Descriptor instead. +func (*ChasmDataAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{3} +} + +type ChasmCollectionAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmCollectionAttributes) Reset() { + *x = ChasmCollectionAttributes{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmCollectionAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmCollectionAttributes) ProtoMessage() {} + +func (x *ChasmCollectionAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmCollectionAttributes.ProtoReflect.Descriptor instead. +func (*ChasmCollectionAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{4} +} + +type ChasmPointerAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + NodePath []string `protobuf:"bytes,1,rep,name=node_path,json=nodePath,proto3" json:"node_path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmPointerAttributes) Reset() { + *x = ChasmPointerAttributes{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmPointerAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmPointerAttributes) ProtoMessage() {} + +func (x *ChasmPointerAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmPointerAttributes.ProtoReflect.Descriptor instead. +func (*ChasmPointerAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{5} +} + +func (x *ChasmPointerAttributes) GetNodePath() []string { + if x != nil { + return x.NodePath + } + return nil +} + +// ChasmTaskInfo includes component-facing task metadata +type ChasmTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Initial versioned transition of the component being referenced. + ComponentInitialVersionedTransition *VersionedTransition `protobuf:"bytes,1,opt,name=component_initial_versioned_transition,json=componentInitialVersionedTransition,proto3" json:"component_initial_versioned_transition,omitempty"` + // Last updated transition of the component being referenced at the time the + // reference was created. Can be used to invalidate this reference. + ComponentLastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,2,opt,name=component_last_update_versioned_transition,json=componentLastUpdateVersionedTransition,proto3" json:"component_last_update_versioned_transition,omitempty"` + // Path to the component. + Path []string `protobuf:"bytes,3,rep,name=path,proto3" json:"path,omitempty"` + // Registered task's type ID. + // (-- api-linter: core::0141::forbidden-types=disabled --) + TypeId uint32 `protobuf:"varint,4,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + // Opaque attached task data. May be nil. Usable by components, not the CHASM + // framework itself. + Data *v1.DataBlob `protobuf:"bytes,5,opt,name=data,proto3" json:"data,omitempty"` + // ArchetypeID of the execution that generated this task. + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,6,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + // Versioned transition of the execution when the logical task was created. + // Together with task_versioned_transition_offset, uniquely identifies the + // corresponding logical task in ChasmComponentAttributes.side_effect_tasks. + // Used to verify the logical task still exists before executing the physical task. + TaskVersionedTransition *VersionedTransition `protobuf:"bytes,7,opt,name=task_versioned_transition,json=taskVersionedTransition,proto3" json:"task_versioned_transition,omitempty"` + // The xth task generated in the versioned transition identified by + // task_versioned_transition. Together with task_versioned_transition, + // forms a unique identifier for the logical task. + TaskVersionedTransitionOffset int64 `protobuf:"varint,8,opt,name=task_versioned_transition_offset,json=taskVersionedTransitionOffset,proto3" json:"task_versioned_transition_offset,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmTaskInfo) Reset() { + *x = ChasmTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmTaskInfo) ProtoMessage() {} + +func (x *ChasmTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmTaskInfo.ProtoReflect.Descriptor instead. +func (*ChasmTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{6} +} + +func (x *ChasmTaskInfo) GetComponentInitialVersionedTransition() *VersionedTransition { + if x != nil { + return x.ComponentInitialVersionedTransition + } + return nil +} + +func (x *ChasmTaskInfo) GetComponentLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.ComponentLastUpdateVersionedTransition + } + return nil +} + +func (x *ChasmTaskInfo) GetPath() []string { + if x != nil { + return x.Path + } + return nil +} + +func (x *ChasmTaskInfo) GetTypeId() uint32 { + if x != nil { + return x.TypeId + } + return 0 +} + +func (x *ChasmTaskInfo) GetData() *v1.DataBlob { + if x != nil { + return x.Data + } + return nil +} + +func (x *ChasmTaskInfo) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +func (x *ChasmTaskInfo) GetTaskVersionedTransition() *VersionedTransition { + if x != nil { + return x.TaskVersionedTransition + } + return nil +} + +func (x *ChasmTaskInfo) GetTaskVersionedTransitionOffset() int64 { + if x != nil { + return x.TaskVersionedTransitionOffset + } + return 0 +} + +// ChasmComponentRef references a specific chasm component. +type ChasmComponentRef struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + BusinessId string `protobuf:"bytes,2,opt,name=business_id,json=businessId,proto3" json:"business_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + // Executions's root component's type ID. + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,4,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + ExecutionVersionedTransition *VersionedTransition `protobuf:"bytes,5,opt,name=execution_versioned_transition,json=executionVersionedTransition,proto3" json:"execution_versioned_transition,omitempty"` + ComponentPath []string `protobuf:"bytes,6,rep,name=component_path,json=componentPath,proto3" json:"component_path,omitempty"` + ComponentInitialVersionedTransition *VersionedTransition `protobuf:"bytes,7,opt,name=component_initial_versioned_transition,json=componentInitialVersionedTransition,proto3" json:"component_initial_versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmComponentRef) Reset() { + *x = ChasmComponentRef{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmComponentRef) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmComponentRef) ProtoMessage() {} + +func (x *ChasmComponentRef) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmComponentRef.ProtoReflect.Descriptor instead. +func (*ChasmComponentRef) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{7} +} + +func (x *ChasmComponentRef) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ChasmComponentRef) GetBusinessId() string { + if x != nil { + return x.BusinessId + } + return "" +} + +func (x *ChasmComponentRef) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *ChasmComponentRef) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +func (x *ChasmComponentRef) GetExecutionVersionedTransition() *VersionedTransition { + if x != nil { + return x.ExecutionVersionedTransition + } + return nil +} + +func (x *ChasmComponentRef) GetComponentPath() []string { + if x != nil { + return x.ComponentPath + } + return nil +} + +func (x *ChasmComponentRef) GetComponentInitialVersionedTransition() *VersionedTransition { + if x != nil { + return x.ComponentInitialVersionedTransition + } + return nil +} + +// ChasmNexusCompletion includes details about a completed Nexus operation. +type ChasmNexusCompletion struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Outcome: + // + // *ChasmNexusCompletion_Success + // *ChasmNexusCompletion_Failure + Outcome isChasmNexusCompletion_Outcome `protobuf_oneof:"outcome"` + // Time when the operation was closed. + CloseTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"` + // Request ID embedded in the NexusOperationScheduledEvent. + // Allows completing a started operation after a workflow has been reset. + RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Links from the Nexus completion callback (e.g. references to the handler workflow). + Links []*v1.Link `protobuf:"bytes,5,rep,name=links,proto3" json:"links,omitempty"` + // Async operation token from the callback request, used for completion-before-start. + OperationToken string `protobuf:"bytes,6,opt,name=operation_token,json=operationToken,proto3" json:"operation_token,omitempty"` + // Start time from the callback request, used for completion-before-start. + StartTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmNexusCompletion) Reset() { + *x = ChasmNexusCompletion{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmNexusCompletion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmNexusCompletion) ProtoMessage() {} + +func (x *ChasmNexusCompletion) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmNexusCompletion.ProtoReflect.Descriptor instead. +func (*ChasmNexusCompletion) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{8} +} + +func (x *ChasmNexusCompletion) GetOutcome() isChasmNexusCompletion_Outcome { + if x != nil { + return x.Outcome + } + return nil +} + +func (x *ChasmNexusCompletion) GetSuccess() *v1.Payload { + if x != nil { + if x, ok := x.Outcome.(*ChasmNexusCompletion_Success); ok { + return x.Success + } + } + return nil +} + +func (x *ChasmNexusCompletion) GetFailure() *v11.Failure { + if x != nil { + if x, ok := x.Outcome.(*ChasmNexusCompletion_Failure); ok { + return x.Failure + } + } + return nil +} + +func (x *ChasmNexusCompletion) GetCloseTime() *timestamppb.Timestamp { + if x != nil { + return x.CloseTime + } + return nil +} + +func (x *ChasmNexusCompletion) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *ChasmNexusCompletion) GetLinks() []*v1.Link { + if x != nil { + return x.Links + } + return nil +} + +func (x *ChasmNexusCompletion) GetOperationToken() string { + if x != nil { + return x.OperationToken + } + return "" +} + +func (x *ChasmNexusCompletion) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +type isChasmNexusCompletion_Outcome interface { + isChasmNexusCompletion_Outcome() +} + +type ChasmNexusCompletion_Success struct { + // Result of a successful operation, only set if state == successful. + Success *v1.Payload `protobuf:"bytes,1,opt,name=success,proto3,oneof"` +} + +type ChasmNexusCompletion_Failure struct { + // Operation failure, only set if state != successful. + Failure *v11.Failure `protobuf:"bytes,2,opt,name=failure,proto3,oneof"` +} + +func (*ChasmNexusCompletion_Success) isChasmNexusCompletion_Outcome() {} + +func (*ChasmNexusCompletion_Failure) isChasmNexusCompletion_Outcome() {} + +type ChasmComponentAttributes_Task struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Registered task's type ID. + // (-- api-linter: core::0141::forbidden-types=disabled --) + TypeId uint32 `protobuf:"varint,1,opt,name=type_id,json=typeId,proto3" json:"type_id,omitempty"` + Destination string `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` + ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + Data *v1.DataBlob `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + // Versioned transition of the execution when the task was created. + VersionedTransition *VersionedTransition `protobuf:"bytes,5,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + // The xth task generated in this versioned transition. + // Together with the versioned transition, this is a unique identifier for + // this task. + VersionedTransitionOffset int64 `protobuf:"varint,6,opt,name=versioned_transition_offset,json=versionedTransitionOffset,proto3" json:"versioned_transition_offset,omitempty"` + // If a physical task is created for this task in this cluster. + // NOTE: this is a cluster-specific field and can not be replicated. + // Changes to this field also doesn't require an increase in versioned transition. + PhysicalTaskStatus int32 `protobuf:"varint,7,opt,name=physical_task_status,json=physicalTaskStatus,proto3" json:"physical_task_status,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmComponentAttributes_Task) Reset() { + *x = ChasmComponentAttributes_Task{} + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmComponentAttributes_Task) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmComponentAttributes_Task) ProtoMessage() {} + +func (x *ChasmComponentAttributes_Task) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmComponentAttributes_Task.ProtoReflect.Descriptor instead. +func (*ChasmComponentAttributes_Task) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ChasmComponentAttributes_Task) GetTypeId() uint32 { + if x != nil { + return x.TypeId + } + return 0 +} + +func (x *ChasmComponentAttributes_Task) GetDestination() string { + if x != nil { + return x.Destination + } + return "" +} + +func (x *ChasmComponentAttributes_Task) GetScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduledTime + } + return nil +} + +func (x *ChasmComponentAttributes_Task) GetData() *v1.DataBlob { + if x != nil { + return x.Data + } + return nil +} + +func (x *ChasmComponentAttributes_Task) GetVersionedTransition() *VersionedTransition { + if x != nil { + return x.VersionedTransition + } + return nil +} + +func (x *ChasmComponentAttributes_Task) GetVersionedTransitionOffset() int64 { + if x != nil { + return x.VersionedTransitionOffset + } + return 0 +} + +func (x *ChasmComponentAttributes_Task) GetPhysicalTaskStatus() int32 { + if x != nil { + return x.PhysicalTaskStatus + } + return 0 +} + +var File_temporal_server_api_persistence_v1_chasm_proto protoreflect.FileDescriptor + +const file_temporal_server_api_persistence_v1_chasm_proto_rawDesc = "" + + "\n" + + ".temporal/server/api/persistence/v1/chasm.proto\x12\"temporal.server.api.persistence.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a%temporal/api/failure/v1/message.proto\x1a,temporal/server/api/persistence/v1/hsm.proto\"\x94\x01\n" + + "\tChasmNode\x12Q\n" + + "\bmetadata\x18\x01 \x01(\v25.temporal.server.api.persistence.v1.ChasmNodeMetadataR\bmetadata\x124\n" + + "\x04data\x18\x02 \x01(\v2 .temporal.api.common.v1.DataBlobR\x04data\"\xd9\x05\n" + + "\x11ChasmNodeMetadata\x12y\n" + + "\x1cinitial_versioned_transition\x18\x01 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1ainitialVersionedTransition\x12\x80\x01\n" + + " last_update_versioned_transition\x18\x02 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransition\x12q\n" + + "\x14component_attributes\x18\v \x01(\v2<.temporal.server.api.persistence.v1.ChasmComponentAttributesH\x00R\x13componentAttributes\x12b\n" + + "\x0fdata_attributes\x18\f \x01(\v27.temporal.server.api.persistence.v1.ChasmDataAttributesH\x00R\x0edataAttributes\x12t\n" + + "\x15collection_attributes\x18\r \x01(\v2=.temporal.server.api.persistence.v1.ChasmCollectionAttributesH\x00R\x14collectionAttributes\x12k\n" + + "\x12pointer_attributes\x18\x0e \x01(\v2:.temporal.server.api.persistence.v1.ChasmPointerAttributesH\x00R\x11pointerAttributesB\f\n" + + "\n" + + "attributes\"\xbb\x05\n" + + "\x18ChasmComponentAttributes\x12\x17\n" + + "\atype_id\x18\x01 \x01(\rR\x06typeId\x12m\n" + + "\x11side_effect_tasks\x18\x02 \x03(\v2A.temporal.server.api.persistence.v1.ChasmComponentAttributes.TaskR\x0fsideEffectTasks\x12`\n" + + "\n" + + "pure_tasks\x18\x03 \x03(\v2A.temporal.server.api.persistence.v1.ChasmComponentAttributes.TaskR\tpureTasks\x12\x1a\n" + + "\bdetached\x18\x04 \x01(\bR\bdetached\x1a\x98\x03\n" + + "\x04Task\x12\x17\n" + + "\atype_id\x18\x01 \x01(\rR\x06typeId\x12 \n" + + "\vdestination\x18\x02 \x01(\tR\vdestination\x12A\n" + + "\x0escheduled_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x124\n" + + "\x04data\x18\x04 \x01(\v2 .temporal.api.common.v1.DataBlobR\x04data\x12j\n" + + "\x14versioned_transition\x18\x05 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransition\x12>\n" + + "\x1bversioned_transition_offset\x18\x06 \x01(\x03R\x19versionedTransitionOffset\x120\n" + + "\x14physical_task_status\x18\a \x01(\x05R\x12physicalTaskStatus\"\x15\n" + + "\x13ChasmDataAttributes\"\x1b\n" + + "\x19ChasmCollectionAttributes\"5\n" + + "\x16ChasmPointerAttributes\x12\x1b\n" + + "\tnode_path\x18\x01 \x03(\tR\bnodePath\"\xf8\x04\n" + + "\rChasmTaskInfo\x12\x8c\x01\n" + + "&component_initial_versioned_transition\x18\x01 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR#componentInitialVersionedTransition\x12\x93\x01\n" + + "*component_last_update_versioned_transition\x18\x02 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR&componentLastUpdateVersionedTransition\x12\x12\n" + + "\x04path\x18\x03 \x03(\tR\x04path\x12\x17\n" + + "\atype_id\x18\x04 \x01(\rR\x06typeId\x124\n" + + "\x04data\x18\x05 \x01(\v2 .temporal.api.common.v1.DataBlobR\x04data\x12!\n" + + "\farchetype_id\x18\x06 \x01(\rR\varchetypeId\x12s\n" + + "\x19task_versioned_transition\x18\a \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x17taskVersionedTransition\x12G\n" + + " task_versioned_transition_offset\x18\b \x01(\x03R\x1dtaskVersionedTransitionOffset\"\xc6\x03\n" + + "\x11ChasmComponentRef\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vbusiness_id\x18\x02 \x01(\tR\n" + + "businessId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12!\n" + + "\farchetype_id\x18\x04 \x01(\rR\varchetypeId\x12}\n" + + "\x1eexecution_versioned_transition\x18\x05 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1cexecutionVersionedTransition\x12%\n" + + "\x0ecomponent_path\x18\x06 \x03(\tR\rcomponentPath\x12\x8c\x01\n" + + "&component_initial_versioned_transition\x18\a \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR#componentInitialVersionedTransition\"\x8e\x03\n" + + "\x14ChasmNexusCompletion\x12;\n" + + "\asuccess\x18\x01 \x01(\v2\x1f.temporal.api.common.v1.PayloadH\x00R\asuccess\x12<\n" + + "\afailure\x18\x02 \x01(\v2 .temporal.api.failure.v1.FailureH\x00R\afailure\x129\n" + + "\n" + + "close_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTime\x12\x1d\n" + + "\n" + + "request_id\x18\x04 \x01(\tR\trequestId\x122\n" + + "\x05links\x18\x05 \x03(\v2\x1c.temporal.api.common.v1.LinkR\x05links\x12'\n" + + "\x0foperation_token\x18\x06 \x01(\tR\x0eoperationToken\x129\n" + + "\n" + + "start_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\tstartTimeB\t\n" + + "\aoutcomeB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" + +var ( + file_temporal_server_api_persistence_v1_chasm_proto_rawDescOnce sync.Once + file_temporal_server_api_persistence_v1_chasm_proto_rawDescData []byte +) + +func file_temporal_server_api_persistence_v1_chasm_proto_rawDescGZIP() []byte { + file_temporal_server_api_persistence_v1_chasm_proto_rawDescOnce.Do(func() { + file_temporal_server_api_persistence_v1_chasm_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_chasm_proto_rawDesc), len(file_temporal_server_api_persistence_v1_chasm_proto_rawDesc))) + }) + return file_temporal_server_api_persistence_v1_chasm_proto_rawDescData +} + +var file_temporal_server_api_persistence_v1_chasm_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_temporal_server_api_persistence_v1_chasm_proto_goTypes = []any{ + (*ChasmNode)(nil), // 0: temporal.server.api.persistence.v1.ChasmNode + (*ChasmNodeMetadata)(nil), // 1: temporal.server.api.persistence.v1.ChasmNodeMetadata + (*ChasmComponentAttributes)(nil), // 2: temporal.server.api.persistence.v1.ChasmComponentAttributes + (*ChasmDataAttributes)(nil), // 3: temporal.server.api.persistence.v1.ChasmDataAttributes + (*ChasmCollectionAttributes)(nil), // 4: temporal.server.api.persistence.v1.ChasmCollectionAttributes + (*ChasmPointerAttributes)(nil), // 5: temporal.server.api.persistence.v1.ChasmPointerAttributes + (*ChasmTaskInfo)(nil), // 6: temporal.server.api.persistence.v1.ChasmTaskInfo + (*ChasmComponentRef)(nil), // 7: temporal.server.api.persistence.v1.ChasmComponentRef + (*ChasmNexusCompletion)(nil), // 8: temporal.server.api.persistence.v1.ChasmNexusCompletion + (*ChasmComponentAttributes_Task)(nil), // 9: temporal.server.api.persistence.v1.ChasmComponentAttributes.Task + (*v1.DataBlob)(nil), // 10: temporal.api.common.v1.DataBlob + (*VersionedTransition)(nil), // 11: temporal.server.api.persistence.v1.VersionedTransition + (*v1.Payload)(nil), // 12: temporal.api.common.v1.Payload + (*v11.Failure)(nil), // 13: temporal.api.failure.v1.Failure + (*timestamppb.Timestamp)(nil), // 14: google.protobuf.Timestamp + (*v1.Link)(nil), // 15: temporal.api.common.v1.Link +} +var file_temporal_server_api_persistence_v1_chasm_proto_depIdxs = []int32{ + 1, // 0: temporal.server.api.persistence.v1.ChasmNode.metadata:type_name -> temporal.server.api.persistence.v1.ChasmNodeMetadata + 10, // 1: temporal.server.api.persistence.v1.ChasmNode.data:type_name -> temporal.api.common.v1.DataBlob + 11, // 2: temporal.server.api.persistence.v1.ChasmNodeMetadata.initial_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 11, // 3: temporal.server.api.persistence.v1.ChasmNodeMetadata.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 2, // 4: temporal.server.api.persistence.v1.ChasmNodeMetadata.component_attributes:type_name -> temporal.server.api.persistence.v1.ChasmComponentAttributes + 3, // 5: temporal.server.api.persistence.v1.ChasmNodeMetadata.data_attributes:type_name -> temporal.server.api.persistence.v1.ChasmDataAttributes + 4, // 6: temporal.server.api.persistence.v1.ChasmNodeMetadata.collection_attributes:type_name -> temporal.server.api.persistence.v1.ChasmCollectionAttributes + 5, // 7: temporal.server.api.persistence.v1.ChasmNodeMetadata.pointer_attributes:type_name -> temporal.server.api.persistence.v1.ChasmPointerAttributes + 9, // 8: temporal.server.api.persistence.v1.ChasmComponentAttributes.side_effect_tasks:type_name -> temporal.server.api.persistence.v1.ChasmComponentAttributes.Task + 9, // 9: temporal.server.api.persistence.v1.ChasmComponentAttributes.pure_tasks:type_name -> temporal.server.api.persistence.v1.ChasmComponentAttributes.Task + 11, // 10: temporal.server.api.persistence.v1.ChasmTaskInfo.component_initial_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 11, // 11: temporal.server.api.persistence.v1.ChasmTaskInfo.component_last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 10, // 12: temporal.server.api.persistence.v1.ChasmTaskInfo.data:type_name -> temporal.api.common.v1.DataBlob + 11, // 13: temporal.server.api.persistence.v1.ChasmTaskInfo.task_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 11, // 14: temporal.server.api.persistence.v1.ChasmComponentRef.execution_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 11, // 15: temporal.server.api.persistence.v1.ChasmComponentRef.component_initial_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 12, // 16: temporal.server.api.persistence.v1.ChasmNexusCompletion.success:type_name -> temporal.api.common.v1.Payload + 13, // 17: temporal.server.api.persistence.v1.ChasmNexusCompletion.failure:type_name -> temporal.api.failure.v1.Failure + 14, // 18: temporal.server.api.persistence.v1.ChasmNexusCompletion.close_time:type_name -> google.protobuf.Timestamp + 15, // 19: temporal.server.api.persistence.v1.ChasmNexusCompletion.links:type_name -> temporal.api.common.v1.Link + 14, // 20: temporal.server.api.persistence.v1.ChasmNexusCompletion.start_time:type_name -> google.protobuf.Timestamp + 14, // 21: temporal.server.api.persistence.v1.ChasmComponentAttributes.Task.scheduled_time:type_name -> google.protobuf.Timestamp + 10, // 22: temporal.server.api.persistence.v1.ChasmComponentAttributes.Task.data:type_name -> temporal.api.common.v1.DataBlob + 11, // 23: temporal.server.api.persistence.v1.ChasmComponentAttributes.Task.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_persistence_v1_chasm_proto_init() } +func file_temporal_server_api_persistence_v1_chasm_proto_init() { + if File_temporal_server_api_persistence_v1_chasm_proto != nil { + return + } + file_temporal_server_api_persistence_v1_hsm_proto_init() + file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[1].OneofWrappers = []any{ + (*ChasmNodeMetadata_ComponentAttributes)(nil), + (*ChasmNodeMetadata_DataAttributes)(nil), + (*ChasmNodeMetadata_CollectionAttributes)(nil), + (*ChasmNodeMetadata_PointerAttributes)(nil), + } + file_temporal_server_api_persistence_v1_chasm_proto_msgTypes[8].OneofWrappers = []any{ + (*ChasmNexusCompletion_Success)(nil), + (*ChasmNexusCompletion_Failure)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_chasm_proto_rawDesc), len(file_temporal_server_api_persistence_v1_chasm_proto_rawDesc)), + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_persistence_v1_chasm_proto_goTypes, + DependencyIndexes: file_temporal_server_api_persistence_v1_chasm_proto_depIdxs, + MessageInfos: file_temporal_server_api_persistence_v1_chasm_proto_msgTypes, + }.Build() + File_temporal_server_api_persistence_v1_chasm_proto = out.File + file_temporal_server_api_persistence_v1_chasm_proto_goTypes = nil + file_temporal_server_api_persistence_v1_chasm_proto_depIdxs = nil +} diff --git a/api/persistence/v1/chasm_visibility.go-helpers.pb.go b/api/persistence/v1/chasm_visibility.go-helpers.pb.go new file mode 100644 index 00000000000..3da1e9cbfd0 --- /dev/null +++ b/api/persistence/v1/chasm_visibility.go-helpers.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package persistence + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ChasmVisibilityData to the protobuf v3 wire format +func (val *ChasmVisibilityData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmVisibilityData from the protobuf v3 wire format +func (val *ChasmVisibilityData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmVisibilityData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmVisibilityData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmVisibilityData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmVisibilityData + switch t := that.(type) { + case *ChasmVisibilityData: + that1 = t + case ChasmVisibilityData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ChasmVisibilityTaskData to the protobuf v3 wire format +func (val *ChasmVisibilityTaskData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ChasmVisibilityTaskData from the protobuf v3 wire format +func (val *ChasmVisibilityTaskData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ChasmVisibilityTaskData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ChasmVisibilityTaskData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ChasmVisibilityTaskData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ChasmVisibilityTaskData + switch t := that.(type) { + case *ChasmVisibilityTaskData: + that1 = t + case ChasmVisibilityTaskData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/persistence/v1/chasm_visibility.pb.go b/api/persistence/v1/chasm_visibility.pb.go new file mode 100644 index 00000000000..c89a3a0f61c --- /dev/null +++ b/api/persistence/v1/chasm_visibility.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/persistence/v1/chasm_visibility.proto + +package persistence + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ChasmVisibilityData struct { + state protoimpl.MessageState `protogen:"open.v1"` + TransitionCount int64 `protobuf:"varint,1,opt,name=transition_count,json=transitionCount,proto3" json:"transition_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmVisibilityData) Reset() { + *x = ChasmVisibilityData{} + mi := &file_temporal_server_api_persistence_v1_chasm_visibility_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmVisibilityData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmVisibilityData) ProtoMessage() {} + +func (x *ChasmVisibilityData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_visibility_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmVisibilityData.ProtoReflect.Descriptor instead. +func (*ChasmVisibilityData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDescGZIP(), []int{0} +} + +func (x *ChasmVisibilityData) GetTransitionCount() int64 { + if x != nil { + return x.TransitionCount + } + return 0 +} + +type ChasmVisibilityTaskData struct { + state protoimpl.MessageState `protogen:"open.v1"` + TransitionCount int64 `protobuf:"varint,1,opt,name=transition_count,json=transitionCount,proto3" json:"transition_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChasmVisibilityTaskData) Reset() { + *x = ChasmVisibilityTaskData{} + mi := &file_temporal_server_api_persistence_v1_chasm_visibility_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChasmVisibilityTaskData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChasmVisibilityTaskData) ProtoMessage() {} + +func (x *ChasmVisibilityTaskData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_chasm_visibility_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChasmVisibilityTaskData.ProtoReflect.Descriptor instead. +func (*ChasmVisibilityTaskData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDescGZIP(), []int{1} +} + +func (x *ChasmVisibilityTaskData) GetTransitionCount() int64 { + if x != nil { + return x.TransitionCount + } + return 0 +} + +var File_temporal_server_api_persistence_v1_chasm_visibility_proto protoreflect.FileDescriptor + +const file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDesc = "" + + "\n" + + "9temporal/server/api/persistence/v1/chasm_visibility.proto\x12\"temporal.server.api.persistence.v1\"@\n" + + "\x13ChasmVisibilityData\x12)\n" + + "\x10transition_count\x18\x01 \x01(\x03R\x0ftransitionCount\"D\n" + + "\x17ChasmVisibilityTaskData\x12)\n" + + "\x10transition_count\x18\x01 \x01(\x03R\x0ftransitionCountB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" + +var ( + file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDescOnce sync.Once + file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDescData []byte +) + +func file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDescGZIP() []byte { + file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDescOnce.Do(func() { + file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDesc), len(file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDesc))) + }) + return file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDescData +} + +var file_temporal_server_api_persistence_v1_chasm_visibility_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_temporal_server_api_persistence_v1_chasm_visibility_proto_goTypes = []any{ + (*ChasmVisibilityData)(nil), // 0: temporal.server.api.persistence.v1.ChasmVisibilityData + (*ChasmVisibilityTaskData)(nil), // 1: temporal.server.api.persistence.v1.ChasmVisibilityTaskData +} +var file_temporal_server_api_persistence_v1_chasm_visibility_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_persistence_v1_chasm_visibility_proto_init() } +func file_temporal_server_api_persistence_v1_chasm_visibility_proto_init() { + if File_temporal_server_api_persistence_v1_chasm_visibility_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDesc), len(file_temporal_server_api_persistence_v1_chasm_visibility_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_persistence_v1_chasm_visibility_proto_goTypes, + DependencyIndexes: file_temporal_server_api_persistence_v1_chasm_visibility_proto_depIdxs, + MessageInfos: file_temporal_server_api_persistence_v1_chasm_visibility_proto_msgTypes, + }.Build() + File_temporal_server_api_persistence_v1_chasm_visibility_proto = out.File + file_temporal_server_api_persistence_v1_chasm_visibility_proto_goTypes = nil + file_temporal_server_api_persistence_v1_chasm_visibility_proto_depIdxs = nil +} diff --git a/api/persistence/v1/cluster_metadata.go-helpers.pb.go b/api/persistence/v1/cluster_metadata.go-helpers.pb.go index d4228842a52..09aa53ab1c2 100644 --- a/api/persistence/v1/cluster_metadata.go-helpers.pb.go +++ b/api/persistence/v1/cluster_metadata.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence diff --git a/api/persistence/v1/cluster_metadata.pb.go b/api/persistence/v1/cluster_metadata.pb.go index ae1b9895b68..8e91fcbca6f 100644 --- a/api/persistence/v1/cluster_metadata.pb.go +++ b/api/persistence/v1/cluster_metadata.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" v11 "go.temporal.io/api/enums/v1" v1 "go.temporal.io/api/version/v1" @@ -47,31 +26,31 @@ const ( // data column type ClusterMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` HistoryShardCount int32 `protobuf:"varint,2,opt,name=history_shard_count,json=historyShardCount,proto3" json:"history_shard_count,omitempty"` ClusterId string `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` VersionInfo *v1.VersionInfo `protobuf:"bytes,4,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` - IndexSearchAttributes map[string]*IndexSearchAttributes `protobuf:"bytes,5,rep,name=index_search_attributes,json=indexSearchAttributes,proto3" json:"index_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + IndexSearchAttributes map[string]*IndexSearchAttributes `protobuf:"bytes,5,rep,name=index_search_attributes,json=indexSearchAttributes,proto3" json:"index_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` ClusterAddress string `protobuf:"bytes,6,opt,name=cluster_address,json=clusterAddress,proto3" json:"cluster_address,omitempty"` + HttpAddress string `protobuf:"bytes,13,opt,name=http_address,json=httpAddress,proto3" json:"http_address,omitempty"` FailoverVersionIncrement int64 `protobuf:"varint,7,opt,name=failover_version_increment,json=failoverVersionIncrement,proto3" json:"failover_version_increment,omitempty"` InitialFailoverVersion int64 `protobuf:"varint,8,opt,name=initial_failover_version,json=initialFailoverVersion,proto3" json:"initial_failover_version,omitempty"` IsGlobalNamespaceEnabled bool `protobuf:"varint,9,opt,name=is_global_namespace_enabled,json=isGlobalNamespaceEnabled,proto3" json:"is_global_namespace_enabled,omitempty"` IsConnectionEnabled bool `protobuf:"varint,10,opt,name=is_connection_enabled,json=isConnectionEnabled,proto3" json:"is_connection_enabled,omitempty"` UseClusterIdMembership bool `protobuf:"varint,11,opt,name=use_cluster_id_membership,json=useClusterIdMembership,proto3" json:"use_cluster_id_membership,omitempty"` - Tags map[string]string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Tags map[string]string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // is_replication_enabled controls whether replication streams are active. + IsReplicationEnabled bool `protobuf:"varint,14,opt,name=is_replication_enabled,json=isReplicationEnabled,proto3" json:"is_replication_enabled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClusterMetadata) Reset() { *x = ClusterMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ClusterMetadata) String() string { @@ -82,7 +61,7 @@ func (*ClusterMetadata) ProtoMessage() {} func (x *ClusterMetadata) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -139,6 +118,13 @@ func (x *ClusterMetadata) GetClusterAddress() string { return "" } +func (x *ClusterMetadata) GetHttpAddress() string { + if x != nil { + return x.HttpAddress + } + return "" +} + func (x *ClusterMetadata) GetFailoverVersionIncrement() int64 { if x != nil { return x.FailoverVersionIncrement @@ -181,21 +167,25 @@ func (x *ClusterMetadata) GetTags() map[string]string { return nil } -type IndexSearchAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ClusterMetadata) GetIsReplicationEnabled() bool { + if x != nil { + return x.IsReplicationEnabled + } + return false +} - CustomSearchAttributes map[string]v11.IndexedValueType `protobuf:"bytes,1,rep,name=custom_search_attributes,json=customSearchAttributes,proto3" json:"custom_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=temporal.api.enums.v1.IndexedValueType"` +type IndexSearchAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + CustomSearchAttributes map[string]v11.IndexedValueType `protobuf:"bytes,1,rep,name=custom_search_attributes,json=customSearchAttributes,proto3" json:"custom_search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=temporal.api.enums.v1.IndexedValueType"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *IndexSearchAttributes) Reset() { *x = IndexSearchAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *IndexSearchAttributes) String() string { @@ -206,7 +196,7 @@ func (*IndexSearchAttributes) ProtoMessage() {} func (x *IndexSearchAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -230,116 +220,52 @@ func (x *IndexSearchAttributes) GetCustomSearchAttributes() map[string]v11.Index var File_temporal_server_api_persistence_v1_cluster_metadata_proto protoreflect.FileDescriptor -var file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x22, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xf6, 0x07, 0x0a, 0x0f, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x0c, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x8a, 0x01, 0x0a, 0x17, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, - 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x53, 0x65, - 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x15, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2b, 0x0a, 0x0f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65, - 0x73, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x40, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, - 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x63, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, - 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x63, 0x72, 0x65, 0x6d, 0x65, 0x6e, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x18, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, - 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x46, 0x61, - 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x41, 0x0a, 0x1b, 0x69, 0x73, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x73, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x36, 0x0a, 0x15, 0x69, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, - 0x73, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x19, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x16, 0x75, 0x73, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x49, 0x64, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x68, 0x69, 0x70, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x55, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x1a, 0x8b, 0x01, 0x0a, 0x1a, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x53, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x53, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x53, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x09, 0x54, 0x61, - 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa9, 0x02, 0x0a, 0x15, 0x49, 0x6e, 0x64, - 0x65, 0x78, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x93, 0x01, 0x0a, 0x18, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, 0x65, 0x61, 0x72, - 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x55, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x53, 0x65, 0x61, 0x72, 0x63, - 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x61, - 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x1a, 0x7a, 0x0a, 0x1b, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x27, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc = "" + + "\n" + + "9temporal/server/api/persistence/v1/cluster_metadata.proto\x12\"temporal.server.api.persistence.v1\x1a\"temporal/api/enums/v1/common.proto\x1a%temporal/api/version/v1/message.proto\"\x8f\b\n" + + "\x0fClusterMetadata\x12!\n" + + "\fcluster_name\x18\x01 \x01(\tR\vclusterName\x12.\n" + + "\x13history_shard_count\x18\x02 \x01(\x05R\x11historyShardCount\x12\x1d\n" + + "\n" + + "cluster_id\x18\x03 \x01(\tR\tclusterId\x12G\n" + + "\fversion_info\x18\x04 \x01(\v2$.temporal.api.version.v1.VersionInfoR\vversionInfo\x12\x86\x01\n" + + "\x17index_search_attributes\x18\x05 \x03(\v2N.temporal.server.api.persistence.v1.ClusterMetadata.IndexSearchAttributesEntryR\x15indexSearchAttributes\x12'\n" + + "\x0fcluster_address\x18\x06 \x01(\tR\x0eclusterAddress\x12!\n" + + "\fhttp_address\x18\r \x01(\tR\vhttpAddress\x12<\n" + + "\x1afailover_version_increment\x18\a \x01(\x03R\x18failoverVersionIncrement\x128\n" + + "\x18initial_failover_version\x18\b \x01(\x03R\x16initialFailoverVersion\x12=\n" + + "\x1bis_global_namespace_enabled\x18\t \x01(\bR\x18isGlobalNamespaceEnabled\x122\n" + + "\x15is_connection_enabled\x18\n" + + " \x01(\bR\x13isConnectionEnabled\x129\n" + + "\x19use_cluster_id_membership\x18\v \x01(\bR\x16useClusterIdMembership\x12Q\n" + + "\x04tags\x18\f \x03(\v2=.temporal.server.api.persistence.v1.ClusterMetadata.TagsEntryR\x04tags\x124\n" + + "\x16is_replication_enabled\x18\x0e \x01(\bR\x14isReplicationEnabled\x1a\x83\x01\n" + + "\x1aIndexSearchAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12O\n" + + "\x05value\x18\x02 \x01(\v29.temporal.server.api.persistence.v1.IndexSearchAttributesR\x05value:\x028\x01\x1a7\n" + + "\tTagsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\x9d\x02\n" + + "\x15IndexSearchAttributes\x12\x8f\x01\n" + + "\x18custom_search_attributes\x18\x01 \x03(\v2U.temporal.server.api.persistence.v1.IndexSearchAttributes.CustomSearchAttributesEntryR\x16customSearchAttributes\x1ar\n" + + "\x1bCustomSearchAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12=\n" + + "\x05value\x18\x02 \x01(\x0e2'.temporal.api.enums.v1.IndexedValueTypeR\x05value:\x028\x01B6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" var ( file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescData = file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc + file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescData) + file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc), len(file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDescData } var file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_temporal_server_api_persistence_v1_cluster_metadata_proto_goTypes = []interface{}{ +var file_temporal_server_api_persistence_v1_cluster_metadata_proto_goTypes = []any{ (*ClusterMetadata)(nil), // 0: temporal.server.api.persistence.v1.ClusterMetadata (*IndexSearchAttributes)(nil), // 1: temporal.server.api.persistence.v1.IndexSearchAttributes nil, // 2: temporal.server.api.persistence.v1.ClusterMetadata.IndexSearchAttributesEntry @@ -367,37 +293,11 @@ func file_temporal_server_api_persistence_v1_cluster_metadata_proto_init() { if File_temporal_server_api_persistence_v1_cluster_metadata_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClusterMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IndexSearchAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc), len(file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc)), NumEnums: 0, NumMessages: 5, NumExtensions: 0, @@ -408,7 +308,6 @@ func file_temporal_server_api_persistence_v1_cluster_metadata_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_cluster_metadata_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_cluster_metadata_proto = out.File - file_temporal_server_api_persistence_v1_cluster_metadata_proto_rawDesc = nil file_temporal_server_api_persistence_v1_cluster_metadata_proto_goTypes = nil file_temporal_server_api_persistence_v1_cluster_metadata_proto_depIdxs = nil } diff --git a/api/persistence/v1/executions.go-helpers.pb.go b/api/persistence/v1/executions.go-helpers.pb.go index 40abdce2e36..ccd9ed4898f 100644 --- a/api/persistence/v1/executions.go-helpers.pb.go +++ b/api/persistence/v1/executions.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence @@ -103,6 +79,117 @@ func (this *WorkflowExecutionInfo) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type TimeSkippingInfo to the protobuf v3 wire format +func (val *TimeSkippingInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TimeSkippingInfo from the protobuf v3 wire format +func (val *TimeSkippingInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TimeSkippingInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TimeSkippingInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TimeSkippingInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TimeSkippingInfo + switch t := that.(type) { + case *TimeSkippingInfo: + that1 = t + case TimeSkippingInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TimeSkippingBoundInfo to the protobuf v3 wire format +func (val *TimeSkippingBoundInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TimeSkippingBoundInfo from the protobuf v3 wire format +func (val *TimeSkippingBoundInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TimeSkippingBoundInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TimeSkippingBoundInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TimeSkippingBoundInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TimeSkippingBoundInfo + switch t := that.(type) { + case *TimeSkippingBoundInfo: + that1 = t + case TimeSkippingBoundInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type LastNotifiedTargetVersion to the protobuf v3 wire format +func (val *LastNotifiedTargetVersion) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type LastNotifiedTargetVersion from the protobuf v3 wire format +func (val *LastNotifiedTargetVersion) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *LastNotifiedTargetVersion) Size() int { + return proto.Size(val) +} + +// Equal returns whether two LastNotifiedTargetVersion values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *LastNotifiedTargetVersion) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *LastNotifiedTargetVersion + switch t := that.(type) { + case *LastNotifiedTargetVersion: + that1 = t + case LastNotifiedTargetVersion: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type ExecutionStats to the protobuf v3 wire format func (val *ExecutionStats) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -177,6 +264,43 @@ func (this *WorkflowExecutionState) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type RequestIDInfo to the protobuf v3 wire format +func (val *RequestIDInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RequestIDInfo from the protobuf v3 wire format +func (val *RequestIDInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RequestIDInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RequestIDInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RequestIDInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RequestIDInfo + switch t := that.(type) { + case *RequestIDInfo: + that1 = t + case RequestIDInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type TransferTaskInfo to the protobuf v3 wire format func (val *TransferTaskInfo) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -399,6 +523,117 @@ func (this *OutboundTaskInfo) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type WorkerCommandsTask to the protobuf v3 wire format +func (val *WorkerCommandsTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerCommandsTask from the protobuf v3 wire format +func (val *WorkerCommandsTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerCommandsTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerCommandsTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerCommandsTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerCommandsTask + switch t := that.(type) { + case *WorkerCommandsTask: + that1 = t + case WorkerCommandsTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type NexusInvocationTaskInfo to the protobuf v3 wire format +func (val *NexusInvocationTaskInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusInvocationTaskInfo from the protobuf v3 wire format +func (val *NexusInvocationTaskInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusInvocationTaskInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusInvocationTaskInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusInvocationTaskInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusInvocationTaskInfo + switch t := that.(type) { + case *NexusInvocationTaskInfo: + that1 = t + case NexusInvocationTaskInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type NexusCancelationTaskInfo to the protobuf v3 wire format +func (val *NexusCancelationTaskInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusCancelationTaskInfo from the protobuf v3 wire format +func (val *NexusCancelationTaskInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusCancelationTaskInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusCancelationTaskInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusCancelationTaskInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusCancelationTaskInfo + switch t := that.(type) { + case *NexusCancelationTaskInfo: + that1 = t + case NexusCancelationTaskInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type ActivityInfo to the protobuf v3 wire format func (val *ActivityInfo) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -621,6 +856,80 @@ func (this *Checksum) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type Callback to the protobuf v3 wire format +func (val *Callback) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type Callback from the protobuf v3 wire format +func (val *Callback) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *Callback) Size() int { + return proto.Size(val) +} + +// Equal returns whether two Callback values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *Callback) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *Callback + switch t := that.(type) { + case *Callback: + that1 = t + case Callback: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type HSMCompletionCallbackArg to the protobuf v3 wire format +func (val *HSMCompletionCallbackArg) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type HSMCompletionCallbackArg from the protobuf v3 wire format +func (val *HSMCompletionCallbackArg) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *HSMCompletionCallbackArg) Size() int { + return proto.Size(val) +} + +// Equal returns whether two HSMCompletionCallbackArg values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *HSMCompletionCallbackArg) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *HSMCompletionCallbackArg + switch t := that.(type) { + case *HSMCompletionCallbackArg: + that1 = t + case HSMCompletionCallbackArg: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type CallbackInfo to the protobuf v3 wire format func (val *CallbackInfo) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -657,3 +966,151 @@ func (this *CallbackInfo) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type NexusOperationInfo to the protobuf v3 wire format +func (val *NexusOperationInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusOperationInfo from the protobuf v3 wire format +func (val *NexusOperationInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusOperationInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusOperationInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusOperationInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusOperationInfo + switch t := that.(type) { + case *NexusOperationInfo: + that1 = t + case NexusOperationInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type NexusOperationCancellationInfo to the protobuf v3 wire format +func (val *NexusOperationCancellationInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusOperationCancellationInfo from the protobuf v3 wire format +func (val *NexusOperationCancellationInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusOperationCancellationInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusOperationCancellationInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusOperationCancellationInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusOperationCancellationInfo + switch t := that.(type) { + case *NexusOperationCancellationInfo: + that1 = t + case NexusOperationCancellationInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ResetChildInfo to the protobuf v3 wire format +func (val *ResetChildInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ResetChildInfo from the protobuf v3 wire format +func (val *ResetChildInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ResetChildInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ResetChildInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ResetChildInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ResetChildInfo + switch t := that.(type) { + case *ResetChildInfo: + that1 = t + case ResetChildInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkflowPauseInfo to the protobuf v3 wire format +func (val *WorkflowPauseInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkflowPauseInfo from the protobuf v3 wire format +func (val *WorkflowPauseInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkflowPauseInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkflowPauseInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkflowPauseInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkflowPauseInfo + switch t := that.(type) { + case *WorkflowPauseInfo: + that1 = t + case WorkflowPauseInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/persistence/v1/executions.pb.go b/api/persistence/v1/executions.pb.go index a43dc6cf011..87acaa3432a 100644 --- a/api/persistence/v1/executions.pb.go +++ b/api/persistence/v1/executions.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,16 +9,19 @@ package persistence import ( reflect "reflect" sync "sync" - - v12 "go.temporal.io/api/common/v1" - v17 "go.temporal.io/api/enums/v1" - v18 "go.temporal.io/api/failure/v1" - v11 "go.temporal.io/api/workflow/v1" - v14 "go.temporal.io/server/api/clock/v1" + unsafe "unsafe" + + v13 "go.temporal.io/api/common/v1" + v18 "go.temporal.io/api/deployment/v1" + v11 "go.temporal.io/api/enums/v1" + v110 "go.temporal.io/api/failure/v1" + v17 "go.temporal.io/api/history/v1" + v19 "go.temporal.io/api/worker/v1" + v12 "go.temporal.io/api/workflow/v1" + v15 "go.temporal.io/server/api/clock/v1" v1 "go.temporal.io/server/api/enums/v1" - v13 "go.temporal.io/server/api/history/v1" - v16 "go.temporal.io/server/api/update/v1" - v15 "go.temporal.io/server/api/workflow/v1" + v14 "go.temporal.io/server/api/history/v1" + v16 "go.temporal.io/server/api/workflow/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -56,29 +37,26 @@ const ( // shard column type ShardInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` - RangeId int64 `protobuf:"varint,2,opt,name=range_id,json=rangeId,proto3" json:"range_id,omitempty"` - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` + RangeId int64 `protobuf:"varint,2,opt,name=range_id,json=rangeId,proto3" json:"range_id,omitempty"` + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "since" is needed here. --) StolenSinceRenew int32 `protobuf:"varint,6,opt,name=stolen_since_renew,json=stolenSinceRenew,proto3" json:"stolen_since_renew,omitempty"` UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` - ReplicationDlqAckLevel map[string]int64 `protobuf:"bytes,13,rep,name=replication_dlq_ack_level,json=replicationDlqAckLevel,proto3" json:"replication_dlq_ack_level,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` - QueueStates map[int32]*QueueState `protobuf:"bytes,17,rep,name=queue_states,json=queueStates,proto3" json:"queue_states,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ReplicationDlqAckLevel map[string]int64 `protobuf:"bytes,13,rep,name=replication_dlq_ack_level,json=replicationDlqAckLevel,proto3" json:"replication_dlq_ack_level,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + QueueStates map[int32]*QueueState `protobuf:"bytes,17,rep,name=queue_states,json=queueStates,proto3" json:"queue_states,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ShardInfo) Reset() { *x = ShardInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ShardInfo) String() string { @@ -89,7 +67,7 @@ func (*ShardInfo) ProtoMessage() {} func (x *ShardInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -155,43 +133,64 @@ func (x *ShardInfo) GetQueueStates() map[int32]*QueueState { // execution column type WorkflowExecutionInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - ParentNamespaceId string `protobuf:"bytes,3,opt,name=parent_namespace_id,json=parentNamespaceId,proto3" json:"parent_namespace_id,omitempty"` - ParentWorkflowId string `protobuf:"bytes,4,opt,name=parent_workflow_id,json=parentWorkflowId,proto3" json:"parent_workflow_id,omitempty"` - ParentRunId string `protobuf:"bytes,5,opt,name=parent_run_id,json=parentRunId,proto3" json:"parent_run_id,omitempty"` - ParentInitiatedId int64 `protobuf:"varint,6,opt,name=parent_initiated_id,json=parentInitiatedId,proto3" json:"parent_initiated_id,omitempty"` - CompletionEventBatchId int64 `protobuf:"varint,7,opt,name=completion_event_batch_id,json=completionEventBatchId,proto3" json:"completion_event_batch_id,omitempty"` - TaskQueue string `protobuf:"bytes,9,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - WorkflowTypeName string `protobuf:"bytes,10,opt,name=workflow_type_name,json=workflowTypeName,proto3" json:"workflow_type_name,omitempty"` - WorkflowExecutionTimeout *durationpb.Duration `protobuf:"bytes,11,opt,name=workflow_execution_timeout,json=workflowExecutionTimeout,proto3" json:"workflow_execution_timeout,omitempty"` - WorkflowRunTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=workflow_run_timeout,json=workflowRunTimeout,proto3" json:"workflow_run_timeout,omitempty"` - DefaultWorkflowTaskTimeout *durationpb.Duration `protobuf:"bytes,13,opt,name=default_workflow_task_timeout,json=defaultWorkflowTaskTimeout,proto3" json:"default_workflow_task_timeout,omitempty"` - LastEventTaskId int64 `protobuf:"varint,17,opt,name=last_event_task_id,json=lastEventTaskId,proto3" json:"last_event_task_id,omitempty"` - LastFirstEventId int64 `protobuf:"varint,18,opt,name=last_first_event_id,json=lastFirstEventId,proto3" json:"last_first_event_id,omitempty"` - LastWorkflowTaskStartedEventId int64 `protobuf:"varint,19,opt,name=last_workflow_task_started_event_id,json=lastWorkflowTaskStartedEventId,proto3" json:"last_workflow_task_started_event_id,omitempty"` - StartTime *timestamppb.Timestamp `protobuf:"bytes,20,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` - LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + ParentNamespaceId string `protobuf:"bytes,3,opt,name=parent_namespace_id,json=parentNamespaceId,proto3" json:"parent_namespace_id,omitempty"` + ParentWorkflowId string `protobuf:"bytes,4,opt,name=parent_workflow_id,json=parentWorkflowId,proto3" json:"parent_workflow_id,omitempty"` + ParentRunId string `protobuf:"bytes,5,opt,name=parent_run_id,json=parentRunId,proto3" json:"parent_run_id,omitempty"` + ParentInitiatedId int64 `protobuf:"varint,6,opt,name=parent_initiated_id,json=parentInitiatedId,proto3" json:"parent_initiated_id,omitempty"` + CompletionEventBatchId int64 `protobuf:"varint,7,opt,name=completion_event_batch_id,json=completionEventBatchId,proto3" json:"completion_event_batch_id,omitempty"` + TaskQueue string `protobuf:"bytes,9,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + WorkflowTypeName string `protobuf:"bytes,10,opt,name=workflow_type_name,json=workflowTypeName,proto3" json:"workflow_type_name,omitempty"` + WorkflowExecutionTimeout *durationpb.Duration `protobuf:"bytes,11,opt,name=workflow_execution_timeout,json=workflowExecutionTimeout,proto3" json:"workflow_execution_timeout,omitempty"` + WorkflowRunTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=workflow_run_timeout,json=workflowRunTimeout,proto3" json:"workflow_run_timeout,omitempty"` + DefaultWorkflowTaskTimeout *durationpb.Duration `protobuf:"bytes,13,opt,name=default_workflow_task_timeout,json=defaultWorkflowTaskTimeout,proto3" json:"default_workflow_task_timeout,omitempty"` + LastRunningClock int64 `protobuf:"varint,17,opt,name=last_running_clock,json=lastRunningClock,proto3" json:"last_running_clock,omitempty"` + LastFirstEventId int64 `protobuf:"varint,18,opt,name=last_first_event_id,json=lastFirstEventId,proto3" json:"last_first_event_id,omitempty"` + LastCompletedWorkflowTaskStartedEventId int64 `protobuf:"varint,19,opt,name=last_completed_workflow_task_started_event_id,json=lastCompletedWorkflowTaskStartedEventId,proto3" json:"last_completed_workflow_task_started_event_id,omitempty"` + // Deprecated. use `WorkflowExecutionState.start_time` + StartTime *timestamppb.Timestamp `protobuf:"bytes,20,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` // Workflow task fields. - WorkflowTaskVersion int64 `protobuf:"varint,22,opt,name=workflow_task_version,json=workflowTaskVersion,proto3" json:"workflow_task_version,omitempty"` - WorkflowTaskScheduledEventId int64 `protobuf:"varint,23,opt,name=workflow_task_scheduled_event_id,json=workflowTaskScheduledEventId,proto3" json:"workflow_task_scheduled_event_id,omitempty"` - WorkflowTaskStartedEventId int64 `protobuf:"varint,24,opt,name=workflow_task_started_event_id,json=workflowTaskStartedEventId,proto3" json:"workflow_task_started_event_id,omitempty"` - WorkflowTaskTimeout *durationpb.Duration `protobuf:"bytes,25,opt,name=workflow_task_timeout,json=workflowTaskTimeout,proto3" json:"workflow_task_timeout,omitempty"` - WorkflowTaskAttempt int32 `protobuf:"varint,26,opt,name=workflow_task_attempt,json=workflowTaskAttempt,proto3" json:"workflow_task_attempt,omitempty"` - WorkflowTaskStartedTime *timestamppb.Timestamp `protobuf:"bytes,27,opt,name=workflow_task_started_time,json=workflowTaskStartedTime,proto3" json:"workflow_task_started_time,omitempty"` - WorkflowTaskScheduledTime *timestamppb.Timestamp `protobuf:"bytes,28,opt,name=workflow_task_scheduled_time,json=workflowTaskScheduledTime,proto3" json:"workflow_task_scheduled_time,omitempty"` - WorkflowTaskOriginalScheduledTime *timestamppb.Timestamp `protobuf:"bytes,30,opt,name=workflow_task_original_scheduled_time,json=workflowTaskOriginalScheduledTime,proto3" json:"workflow_task_original_scheduled_time,omitempty"` - WorkflowTaskRequestId string `protobuf:"bytes,31,opt,name=workflow_task_request_id,json=workflowTaskRequestId,proto3" json:"workflow_task_request_id,omitempty"` - WorkflowTaskType v1.WorkflowTaskType `protobuf:"varint,68,opt,name=workflow_task_type,json=workflowTaskType,proto3,enum=temporal.server.api.enums.v1.WorkflowTaskType" json:"workflow_task_type,omitempty"` - WorkflowTaskSuggestContinueAsNew bool `protobuf:"varint,69,opt,name=workflow_task_suggest_continue_as_new,json=workflowTaskSuggestContinueAsNew,proto3" json:"workflow_task_suggest_continue_as_new,omitempty"` - WorkflowTaskHistorySizeBytes int64 `protobuf:"varint,70,opt,name=workflow_task_history_size_bytes,json=workflowTaskHistorySizeBytes,proto3" json:"workflow_task_history_size_bytes,omitempty"` - CancelRequested bool `protobuf:"varint,29,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` - CancelRequestId string `protobuf:"bytes,32,opt,name=cancel_request_id,json=cancelRequestId,proto3" json:"cancel_request_id,omitempty"` - StickyTaskQueue string `protobuf:"bytes,33,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` + WorkflowTaskVersion int64 `protobuf:"varint,22,opt,name=workflow_task_version,json=workflowTaskVersion,proto3" json:"workflow_task_version,omitempty"` + WorkflowTaskScheduledEventId int64 `protobuf:"varint,23,opt,name=workflow_task_scheduled_event_id,json=workflowTaskScheduledEventId,proto3" json:"workflow_task_scheduled_event_id,omitempty"` + WorkflowTaskStartedEventId int64 `protobuf:"varint,24,opt,name=workflow_task_started_event_id,json=workflowTaskStartedEventId,proto3" json:"workflow_task_started_event_id,omitempty"` + WorkflowTaskTimeout *durationpb.Duration `protobuf:"bytes,25,opt,name=workflow_task_timeout,json=workflowTaskTimeout,proto3" json:"workflow_task_timeout,omitempty"` + WorkflowTaskAttempt int32 `protobuf:"varint,26,opt,name=workflow_task_attempt,json=workflowTaskAttempt,proto3" json:"workflow_task_attempt,omitempty"` + WorkflowTaskStartedTime *timestamppb.Timestamp `protobuf:"bytes,27,opt,name=workflow_task_started_time,json=workflowTaskStartedTime,proto3" json:"workflow_task_started_time,omitempty"` + WorkflowTaskScheduledTime *timestamppb.Timestamp `protobuf:"bytes,28,opt,name=workflow_task_scheduled_time,json=workflowTaskScheduledTime,proto3" json:"workflow_task_scheduled_time,omitempty"` + WorkflowTaskOriginalScheduledTime *timestamppb.Timestamp `protobuf:"bytes,30,opt,name=workflow_task_original_scheduled_time,json=workflowTaskOriginalScheduledTime,proto3" json:"workflow_task_original_scheduled_time,omitempty"` + WorkflowTaskRequestId string `protobuf:"bytes,31,opt,name=workflow_task_request_id,json=workflowTaskRequestId,proto3" json:"workflow_task_request_id,omitempty"` + WorkflowTaskType v1.WorkflowTaskType `protobuf:"varint,68,opt,name=workflow_task_type,json=workflowTaskType,proto3,enum=temporal.server.api.enums.v1.WorkflowTaskType" json:"workflow_task_type,omitempty"` + WorkflowTaskSuggestContinueAsNew bool `protobuf:"varint,69,opt,name=workflow_task_suggest_continue_as_new,json=workflowTaskSuggestContinueAsNew,proto3" json:"workflow_task_suggest_continue_as_new,omitempty"` + WorkflowTaskSuggestContinueAsNewReasons []v11.SuggestContinueAsNewReason `protobuf:"varint,110,rep,packed,name=workflow_task_suggest_continue_as_new_reasons,json=workflowTaskSuggestContinueAsNewReasons,proto3,enum=temporal.api.enums.v1.SuggestContinueAsNewReason" json:"workflow_task_suggest_continue_as_new_reasons,omitempty"` + WorkflowTaskTargetWorkerDeploymentVersionChanged bool `protobuf:"varint,112,opt,name=workflow_task_target_worker_deployment_version_changed,json=workflowTaskTargetWorkerDeploymentVersionChanged,proto3" json:"workflow_task_target_worker_deployment_version_changed,omitempty"` + WorkflowTaskHistorySizeBytes int64 `protobuf:"varint,70,opt,name=workflow_task_history_size_bytes,json=workflowTaskHistorySizeBytes,proto3" json:"workflow_task_history_size_bytes,omitempty"` + // tracks the started build ID for transient/speculative WFT. This info is used for two purposes: + // - verify WFT completes by the same Build ID that started in the latest attempt + // - when persisting transient/speculative WFT, the right Build ID is used in the WFT started event + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + WorkflowTaskBuildId string `protobuf:"bytes,88,opt,name=workflow_task_build_id,json=workflowTaskBuildId,proto3" json:"workflow_task_build_id,omitempty"` + // tracks the started build ID redirect counter for transient/speculative WFT. This info is to + // ensure the right redirect counter is used in the WFT started event created later + // for a transient/speculative WFT. + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + WorkflowTaskBuildIdRedirectCounter int64 `protobuf:"varint,89,opt,name=workflow_task_build_id_redirect_counter,json=workflowTaskBuildIdRedirectCounter,proto3" json:"workflow_task_build_id_redirect_counter,omitempty"` + // Stamp represents the "version" of the workflow's internal state. + // It increases monotonically when the workflow's options are modified. + // It is used to check if a workflow task is still relevant to the corresponding workflow state machine. + WorkflowTaskStamp int32 `protobuf:"varint,109,opt,name=workflow_task_stamp,json=workflowTaskStamp,proto3" json:"workflow_task_stamp,omitempty"` + // AttemptsSinceLastSuccess tracks the number of workflow task attempts since the last successful workflow task. + // This is carried over when buffered events are applied after workflow task failures. + // Used by the TemporalReportedProblems search attribute to track continuous failure count. + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "since" is needed here. --) + WorkflowTaskAttemptsSinceLastSuccess int32 `protobuf:"varint,111,opt,name=workflow_task_attempts_since_last_success,json=workflowTaskAttemptsSinceLastSuccess,proto3" json:"workflow_task_attempts_since_last_success,omitempty"` + CancelRequested bool `protobuf:"varint,29,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` + CancelRequestId string `protobuf:"bytes,32,opt,name=cancel_request_id,json=cancelRequestId,proto3" json:"cancel_request_id,omitempty"` + StickyTaskQueue string `protobuf:"bytes,33,opt,name=sticky_task_queue,json=stickyTaskQueue,proto3" json:"sticky_task_queue,omitempty"` // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "to" is used to indicate interval. --) @@ -212,10 +211,10 @@ type WorkflowExecutionInfo struct { RequestCancelExternalCount int64 `protobuf:"varint,74,opt,name=request_cancel_external_count,json=requestCancelExternalCount,proto3" json:"request_cancel_external_count,omitempty"` SignalExternalCount int64 `protobuf:"varint,75,opt,name=signal_external_count,json=signalExternalCount,proto3" json:"signal_external_count,omitempty"` UpdateCount int64 `protobuf:"varint,77,opt,name=update_count,json=updateCount,proto3" json:"update_count,omitempty"` - AutoResetPoints *v11.ResetPoints `protobuf:"bytes,51,opt,name=auto_reset_points,json=autoResetPoints,proto3" json:"auto_reset_points,omitempty"` - SearchAttributes map[string]*v12.Payload `protobuf:"bytes,52,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Memo map[string]*v12.Payload `protobuf:"bytes,53,rep,name=memo,proto3" json:"memo,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - VersionHistories *v13.VersionHistories `protobuf:"bytes,54,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + AutoResetPoints *v12.ResetPoints `protobuf:"bytes,51,opt,name=auto_reset_points,json=autoResetPoints,proto3" json:"auto_reset_points,omitempty"` + SearchAttributes map[string]*v13.Payload `protobuf:"bytes,52,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Memo map[string]*v13.Payload `protobuf:"bytes,53,rep,name=memo,proto3" json:"memo,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + VersionHistories *v14.VersionHistories `protobuf:"bytes,54,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` FirstExecutionRunId string `protobuf:"bytes,55,opt,name=first_execution_run_id,json=firstExecutionRunId,proto3" json:"first_execution_run_id,omitempty"` ExecutionStats *ExecutionStats `protobuf:"bytes,56,opt,name=execution_stats,json=executionStats,proto3" json:"execution_stats,omitempty"` WorkflowRunExpirationTime *timestamppb.Timestamp `protobuf:"bytes,57,opt,name=workflow_run_expiration_time,json=workflowRunExpirationTime,proto3" json:"workflow_run_expiration_time,omitempty"` @@ -225,36 +224,166 @@ type WorkflowExecutionInfo struct { ExecutionTime *timestamppb.Timestamp `protobuf:"bytes,60,opt,name=execution_time,json=executionTime,proto3" json:"execution_time,omitempty"` // If continued-as-new, or retried, or cron, holds the new run id. NewExecutionRunId string `protobuf:"bytes,61,opt,name=new_execution_run_id,json=newExecutionRunId,proto3" json:"new_execution_run_id,omitempty"` - ParentClock *v14.VectorClock `protobuf:"bytes,62,opt,name=parent_clock,json=parentClock,proto3" json:"parent_clock,omitempty"` + ParentClock *v15.VectorClock `protobuf:"bytes,62,opt,name=parent_clock,json=parentClock,proto3" json:"parent_clock,omitempty"` // version of child execution initiated event in parent workflow ParentInitiatedVersion int64 `protobuf:"varint,63,opt,name=parent_initiated_version,json=parentInitiatedVersion,proto3" json:"parent_initiated_version,omitempty"` // Used to check if transfer close task is processed before deleting the workflow execution. CloseTransferTaskId int64 `protobuf:"varint,64,opt,name=close_transfer_task_id,json=closeTransferTaskId,proto3" json:"close_transfer_task_id,omitempty"` // Used to check if visibility close task is processed before deleting the workflow execution. - CloseVisibilityTaskId int64 `protobuf:"varint,65,opt,name=close_visibility_task_id,json=closeVisibilityTaskId,proto3" json:"close_visibility_task_id,omitempty"` - CloseTime *timestamppb.Timestamp `protobuf:"bytes,66,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"` - CloseVisibilityTaskCompleted bool `protobuf:"varint,67,opt,name=close_visibility_task_completed,json=closeVisibilityTaskCompleted,proto3" json:"close_visibility_task_completed,omitempty"` - BaseExecutionInfo *v15.BaseExecutionInfo `protobuf:"bytes,76,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` - // If using build-id based versioning: version stamp of the last worker to process a - // workflow taks for this workflow. - WorkerVersionStamp *v12.WorkerVersionStamp `protobuf:"bytes,78,opt,name=worker_version_stamp,json=workerVersionStamp,proto3" json:"worker_version_stamp,omitempty"` + CloseVisibilityTaskId int64 `protobuf:"varint,65,opt,name=close_visibility_task_id,json=closeVisibilityTaskId,proto3" json:"close_visibility_task_id,omitempty"` + CloseTime *timestamppb.Timestamp `protobuf:"bytes,66,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"` + // Relocatable attributes are memo and search attributes. If they were removed, then they are not + // present in the mutable state, and they should be in visibility store. + RelocatableAttributesRemoved bool `protobuf:"varint,67,opt,name=relocatable_attributes_removed,json=relocatableAttributesRemoved,proto3" json:"relocatable_attributes_removed,omitempty"` + BaseExecutionInfo *v16.BaseExecutionInfo `protobuf:"bytes,76,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` + // If using build-id based versioning: version stamp of the last worker to complete a + // workflow tasks for this workflow. + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + MostRecentWorkerVersionStamp *v13.WorkerVersionStamp `protobuf:"bytes,78,opt,name=most_recent_worker_version_stamp,json=mostRecentWorkerVersionStamp,proto3" json:"most_recent_worker_version_stamp,omitempty"` + // The currently assigned build ID for this execution. Presence of this value means worker versioning is used + // for this execution. Assigned build ID is selected by matching based on Worker Versioning Assignment Rules + // when the first workflow task of the execution is scheduled. If the first workflow task fails and is scheduled + // again, the assigned build ID may change according to the latest versioning rules. + // Assigned build ID can also change in the middle of a execution if Compatible Redirect Rules are applied to + // this execution. + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + AssignedBuildId string `protobuf:"bytes,85,opt,name=assigned_build_id,json=assignedBuildId,proto3" json:"assigned_build_id,omitempty"` + // Build ID inherited from a previous/parent execution. If present, assigned_build_id will be set to this, instead + // of using the assignment rules. + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + InheritedBuildId string `protobuf:"bytes,86,opt,name=inherited_build_id,json=inheritedBuildId,proto3" json:"inherited_build_id,omitempty"` + // Tracks the number of times a redirect rule is applied to this workflow. Used to apply redirects in the right + // order when mutable state is rebuilt from history events. + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + BuildIdRedirectCounter int64 `protobuf:"varint,87,opt,name=build_id_redirect_counter,json=buildIdRedirectCounter,proto3" json:"build_id_redirect_counter,omitempty"` // index of update IDs and pointers to associated history events. - UpdateInfos map[string]*v16.UpdateInfo `protobuf:"bytes,79,rep,name=update_infos,json=updateInfos,proto3" json:"update_infos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - TransitionHistory []*VersionedTransition `protobuf:"bytes,80,rep,name=transition_history,json=transitionHistory,proto3" json:"transition_history,omitempty"` + UpdateInfos map[string]*UpdateInfo `protobuf:"bytes,79,rep,name=update_infos,json=updateInfos,proto3" json:"update_infos,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Transition history encodes all transitions a mutable state object has gone through in a compact way. + // Here the transition_count field of VersionedTransition represents the maximum transition count the mutable state object + // has gone through for the corresponding namespace failover version. + // For example, if the transition history is `[{v: 1, t: 3}, {v: 2, t: 5}]`, it means transition 1-3 have failover version 1, + // and transition 4-5 have failover version 2. + // + // Each task generated by the HSM framework is imprinted with the current VersionedTransition at the end of the transaction. + // When a task is being processed, the transition history is compared with the imprinted task information to + // verify that a task is not referencing a stale state or that the task itself is not stale. + // For example, with the same transition history above, task A `{v: 2, t: 4}` **is not** + // referencing stale state because for version `2` transitions `4-5` are valid, while task B `{v: 2, t: 6}` **is** + // referencing stale state because the transition count is out of range for version `2`. + // Furthermore, task C `{v: 1, t: 4}` itself is stale because it is referencing an impossible state, likely due to post + // split-brain reconciliation. + TransitionHistory []*VersionedTransition `protobuf:"bytes,80,rep,name=transition_history,json=transitionHistory,proto3" json:"transition_history,omitempty"` // Map of state machine type to map of machine by ID. // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "by" is used to clarify the keys and values. --) - SubStateMachinesByType map[int32]*StateMachineMap `protobuf:"bytes,81,rep,name=sub_state_machines_by_type,json=subStateMachinesByType,proto3" json:"sub_state_machines_by_type,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SubStateMachinesByType map[string]*StateMachineMap `protobuf:"bytes,81,rep,name=sub_state_machines_by_type,json=subStateMachinesByType,proto3" json:"sub_state_machines_by_type,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // This field is for tracking if the workflow execution timer task is created or not. + // We don't need this field if we always create the execution timer task when the first + // workflow in a workflow chain starts. However, this execution timer logic is later added. + // To maintain backward compatibility, we need to track if the execution timer task is created + // for a workflow chain since later workflows in the chain also need to create the execution + // timer task if it is not created yet. + // NOTE: Task status is clsuter specific information, so when replicating mutable state, this + // field need to be sanitized. + WorkflowExecutionTimerTaskStatus int32 `protobuf:"varint,82,opt,name=workflow_execution_timer_task_status,json=workflowExecutionTimerTaskStatus,proto3" json:"workflow_execution_timer_task_status,omitempty"` + // The root workflow execution is defined as follows: + // 1. A workflow without parent workflow is its own root workflow. + // 2. A workflow that has a parent workflow has the same root workflow as its parent workflow. + RootWorkflowId string `protobuf:"bytes,83,opt,name=root_workflow_id,json=rootWorkflowId,proto3" json:"root_workflow_id,omitempty"` + RootRunId string `protobuf:"bytes,84,opt,name=root_run_id,json=rootRunId,proto3" json:"root_run_id,omitempty"` + // Timer tasks emitted from state machines are stored in this array, grouped and sorted by their deadline. Only the + // next state machine timer task is generated at a time per mutable state. When that task is processed it iterates + // this array and triggers timers that are ready. + // NOTE: Task status is cluster specific information, so when replicating mutable state, this field needs to be + // sanitized. + StateMachineTimers []*StateMachineTimerGroup `protobuf:"bytes,90,rep,name=state_machine_timers,json=stateMachineTimers,proto3" json:"state_machine_timers,omitempty"` + // The shard clock's timestamp at the time the first valid task was created for this mutable state (either for a new + // mutable state or when rebuilding from events). The field should be updated whenever we refresh tasks, marking + // older generation tasks obsolete. + // This field is used for task staleness checks when mutable state is rebuilt. + // NOTE: Task status is cluster specific information, so when replicating mutable state, this field needs to be + // sanitized. + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: Ignoring api-linter rules for clarity --) + // + // (-- api-linter: core::0142::time-field-type=disabled + // + // aip.dev/not-precedent: This is a vector clock, not a timestamp --) + TaskGenerationShardClockTimestamp int64 `protobuf:"varint,91,opt,name=task_generation_shard_clock_timestamp,json=taskGenerationShardClockTimestamp,proto3" json:"task_generation_shard_clock_timestamp,omitempty"` + WorkflowTaskLastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,92,opt,name=workflow_task_last_update_versioned_transition,json=workflowTaskLastUpdateVersionedTransition,proto3" json:"workflow_task_last_update_versioned_transition,omitempty"` + VisibilityLastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,93,opt,name=visibility_last_update_versioned_transition,json=visibilityLastUpdateVersionedTransition,proto3" json:"visibility_last_update_versioned_transition,omitempty"` + SignalRequestIdsLastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,94,opt,name=signal_request_ids_last_update_versioned_transition,json=signalRequestIdsLastUpdateVersionedTransition,proto3" json:"signal_request_ids_last_update_versioned_transition,omitempty"` + SubStateMachineTombstoneBatches []*StateMachineTombstoneBatch `protobuf:"bytes,95,rep,name=sub_state_machine_tombstone_batches,json=subStateMachineTombstoneBatches,proto3" json:"sub_state_machine_tombstone_batches,omitempty"` + // The workflow has been reset. + WorkflowWasReset bool `protobuf:"varint,96,opt,name=workflow_was_reset,json=workflowWasReset,proto3" json:"workflow_was_reset,omitempty"` + // Reset Run ID points to the new nun when this execution is reset. If the execution is reset multiple times, it points to the latest run. + ResetRunId string `protobuf:"bytes,97,opt,name=reset_run_id,json=resetRunId,proto3" json:"reset_run_id,omitempty"` + // When present, it means the workflow execution is versioned, or is transitioning from + // unversioned workers to versioned ones. + // Note: Deployment objects inside versioning info are immutable, never change their fields. + // (-- api-linter: core::0203::immutable=disabled + // + // aip.dev/not-precedent: field_behavior annotation is not yet used in this repo --) + VersioningInfo *v12.WorkflowExecutionVersioningInfo `protobuf:"bytes,98,opt,name=versioning_info,json=versioningInfo,proto3" json:"versioning_info,omitempty"` + // This is the run id when the WorkflowExecutionStarted event was written. + // A workflow reset changes the execution run_id, but preserves this field so that we have a reference to the original workflow execution that was reset. + OriginalExecutionRunId string `protobuf:"bytes,99,opt,name=original_execution_run_id,json=originalExecutionRunId,proto3" json:"original_execution_run_id,omitempty"` + // These two fields are to record the transition history when the transition history is cleaned up due to disabling transition history + // Should be deprecated once the transition history is fully launched + PreviousTransitionHistory []*VersionedTransition `protobuf:"bytes,100,rep,name=previous_transition_history,json=previousTransitionHistory,proto3" json:"previous_transition_history,omitempty"` + LastTransitionHistoryBreakPoint *VersionedTransition `protobuf:"bytes,101,opt,name=last_transition_history_break_point,json=lastTransitionHistoryBreakPoint,proto3" json:"last_transition_history_break_point,omitempty"` + // This is a set of child workflows that were initialized after the reset point in the parent workflow. + // The children are identified by the key "workflow_type:workflow_id". When the parent starts to make progress after reset, it uses this data to + // determine the right start policy to apply to the child. This list will include children initiated in continue-as-new runs. + ChildrenInitializedPostResetPoint map[string]*ResetChildInfo `protobuf:"bytes,102,rep,name=children_initialized_post_reset_point,json=childrenInitializedPostResetPoint,proto3" json:"children_initialized_post_reset_point,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The worker deployment that completed the last WFT. + WorkerDeploymentName string `protobuf:"bytes,103,opt,name=worker_deployment_name,json=workerDeploymentName,proto3" json:"worker_deployment_name,omitempty"` + // Priority contains metadata that controls relative ordering of task processing + // when tasks are backed up in a queue. + Priority *v13.Priority `protobuf:"bytes,104,opt,name=priority,proto3" json:"priority,omitempty"` + // Run ID of the execution that supersedes this one (via terminate or continue-as-new). + SuccessorRunId string `protobuf:"bytes,105,opt,name=successor_run_id,json=successorRunId,proto3" json:"successor_run_id,omitempty"` + // Pause info contains the details of the request to pause the workflow. + PauseInfo *WorkflowPauseInfo `protobuf:"bytes,106,opt,name=pause_info,json=pauseInfo,proto3" json:"pause_info,omitempty"` + // Last workflow task failure category and cause are used to track the last workflow task failure category and cause. + // + // Types that are valid to be assigned to LastWorkflowTaskFailure: + // + // *WorkflowExecutionInfo_LastWorkflowTaskFailureCause + // *WorkflowExecutionInfo_LastWorkflowTaskTimedOutType + LastWorkflowTaskFailure isWorkflowExecutionInfo_LastWorkflowTaskFailure `protobuf_oneof:"last_workflow_task_failure"` + // The last target version for which the server set targetDeploymentVersionChanged + // to true on a workflow task started event. Updated on each workflow task start, + // set only when the server decides to set the targetDeploymentVersionChanged flag + // to true. + // + // This is a wrapper message to distinguish "never notified" (nil wrapper) from + // "notified about an unversioned target" (non-nil wrapper with nil deployment_version). + // + // Read at continue-as-new time: if set, it becomes the declined_target_version_upgrade + // for the next run. If nil, the existing declined value is preserved (CaN chain). + LastNotifiedTargetVersion *LastNotifiedTargetVersion `protobuf:"bytes,113,opt,name=last_notified_target_version,json=lastNotifiedTargetVersion,proto3" json:"last_notified_target_version,omitempty"` + // The target version that the SDK previously declined to upgrade to. Inherited + // from a previous run via continue-as-new or retry. At CaN time, computed as: + // + // if last_notified_target_version != nil → use that (latest signal was declined) + // else → preserve existing declined value (CaN chain, never re-signaled) + // + // Wrapper distinguishes "never declined" (nil) from "declined unversioned" (non-nil, nil version). + DeclinedTargetVersionUpgrade *v17.DeclinedTargetVersionUpgrade `protobuf:"bytes,114,opt,name=declined_target_version_upgrade,json=declinedTargetVersionUpgrade,proto3" json:"declined_target_version_upgrade,omitempty"` + // Time skipping info that contains the config and runtime history of the time skipping for the workflow. + TimeSkippingInfo *TimeSkippingInfo `protobuf:"bytes,115,opt,name=time_skipping_info,json=timeSkippingInfo,proto3" json:"time_skipping_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *WorkflowExecutionInfo) Reset() { *x = WorkflowExecutionInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *WorkflowExecutionInfo) String() string { @@ -265,7 +394,7 @@ func (*WorkflowExecutionInfo) ProtoMessage() {} func (x *WorkflowExecutionInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -364,9 +493,9 @@ func (x *WorkflowExecutionInfo) GetDefaultWorkflowTaskTimeout() *durationpb.Dura return nil } -func (x *WorkflowExecutionInfo) GetLastEventTaskId() int64 { +func (x *WorkflowExecutionInfo) GetLastRunningClock() int64 { if x != nil { - return x.LastEventTaskId + return x.LastRunningClock } return 0 } @@ -378,9 +507,9 @@ func (x *WorkflowExecutionInfo) GetLastFirstEventId() int64 { return 0 } -func (x *WorkflowExecutionInfo) GetLastWorkflowTaskStartedEventId() int64 { +func (x *WorkflowExecutionInfo) GetLastCompletedWorkflowTaskStartedEventId() int64 { if x != nil { - return x.LastWorkflowTaskStartedEventId + return x.LastCompletedWorkflowTaskStartedEventId } return 0 } @@ -476,6 +605,20 @@ func (x *WorkflowExecutionInfo) GetWorkflowTaskSuggestContinueAsNew() bool { return false } +func (x *WorkflowExecutionInfo) GetWorkflowTaskSuggestContinueAsNewReasons() []v11.SuggestContinueAsNewReason { + if x != nil { + return x.WorkflowTaskSuggestContinueAsNewReasons + } + return nil +} + +func (x *WorkflowExecutionInfo) GetWorkflowTaskTargetWorkerDeploymentVersionChanged() bool { + if x != nil { + return x.WorkflowTaskTargetWorkerDeploymentVersionChanged + } + return false +} + func (x *WorkflowExecutionInfo) GetWorkflowTaskHistorySizeBytes() int64 { if x != nil { return x.WorkflowTaskHistorySizeBytes @@ -483,6 +626,34 @@ func (x *WorkflowExecutionInfo) GetWorkflowTaskHistorySizeBytes() int64 { return 0 } +func (x *WorkflowExecutionInfo) GetWorkflowTaskBuildId() string { + if x != nil { + return x.WorkflowTaskBuildId + } + return "" +} + +func (x *WorkflowExecutionInfo) GetWorkflowTaskBuildIdRedirectCounter() int64 { + if x != nil { + return x.WorkflowTaskBuildIdRedirectCounter + } + return 0 +} + +func (x *WorkflowExecutionInfo) GetWorkflowTaskStamp() int32 { + if x != nil { + return x.WorkflowTaskStamp + } + return 0 +} + +func (x *WorkflowExecutionInfo) GetWorkflowTaskAttemptsSinceLastSuccess() int32 { + if x != nil { + return x.WorkflowTaskAttemptsSinceLastSuccess + } + return 0 +} + func (x *WorkflowExecutionInfo) GetCancelRequested() bool { if x != nil { return x.CancelRequested @@ -623,28 +794,28 @@ func (x *WorkflowExecutionInfo) GetUpdateCount() int64 { return 0 } -func (x *WorkflowExecutionInfo) GetAutoResetPoints() *v11.ResetPoints { +func (x *WorkflowExecutionInfo) GetAutoResetPoints() *v12.ResetPoints { if x != nil { return x.AutoResetPoints } return nil } -func (x *WorkflowExecutionInfo) GetSearchAttributes() map[string]*v12.Payload { +func (x *WorkflowExecutionInfo) GetSearchAttributes() map[string]*v13.Payload { if x != nil { return x.SearchAttributes } return nil } -func (x *WorkflowExecutionInfo) GetMemo() map[string]*v12.Payload { +func (x *WorkflowExecutionInfo) GetMemo() map[string]*v13.Payload { if x != nil { return x.Memo } return nil } -func (x *WorkflowExecutionInfo) GetVersionHistories() *v13.VersionHistories { +func (x *WorkflowExecutionInfo) GetVersionHistories() *v14.VersionHistories { if x != nil { return x.VersionHistories } @@ -700,7 +871,7 @@ func (x *WorkflowExecutionInfo) GetNewExecutionRunId() string { return "" } -func (x *WorkflowExecutionInfo) GetParentClock() *v14.VectorClock { +func (x *WorkflowExecutionInfo) GetParentClock() *v15.VectorClock { if x != nil { return x.ParentClock } @@ -735,389 +906,301 @@ func (x *WorkflowExecutionInfo) GetCloseTime() *timestamppb.Timestamp { return nil } -func (x *WorkflowExecutionInfo) GetCloseVisibilityTaskCompleted() bool { +func (x *WorkflowExecutionInfo) GetRelocatableAttributesRemoved() bool { if x != nil { - return x.CloseVisibilityTaskCompleted + return x.RelocatableAttributesRemoved } return false } -func (x *WorkflowExecutionInfo) GetBaseExecutionInfo() *v15.BaseExecutionInfo { +func (x *WorkflowExecutionInfo) GetBaseExecutionInfo() *v16.BaseExecutionInfo { if x != nil { return x.BaseExecutionInfo } return nil } -func (x *WorkflowExecutionInfo) GetWorkerVersionStamp() *v12.WorkerVersionStamp { +func (x *WorkflowExecutionInfo) GetMostRecentWorkerVersionStamp() *v13.WorkerVersionStamp { if x != nil { - return x.WorkerVersionStamp + return x.MostRecentWorkerVersionStamp } return nil } -func (x *WorkflowExecutionInfo) GetUpdateInfos() map[string]*v16.UpdateInfo { +func (x *WorkflowExecutionInfo) GetAssignedBuildId() string { if x != nil { - return x.UpdateInfos + return x.AssignedBuildId } - return nil + return "" } -func (x *WorkflowExecutionInfo) GetTransitionHistory() []*VersionedTransition { +func (x *WorkflowExecutionInfo) GetInheritedBuildId() string { if x != nil { - return x.TransitionHistory + return x.InheritedBuildId } - return nil + return "" } -func (x *WorkflowExecutionInfo) GetSubStateMachinesByType() map[int32]*StateMachineMap { +func (x *WorkflowExecutionInfo) GetBuildIdRedirectCounter() int64 { if x != nil { - return x.SubStateMachinesByType + return x.BuildIdRedirectCounter } - return nil -} - -type ExecutionStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - HistorySize int64 `protobuf:"varint,1,opt,name=history_size,json=historySize,proto3" json:"history_size,omitempty"` + return 0 } -func (x *ExecutionStats) Reset() { - *x = ExecutionStats{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *WorkflowExecutionInfo) GetUpdateInfos() map[string]*UpdateInfo { + if x != nil { + return x.UpdateInfos } + return nil } -func (x *ExecutionStats) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExecutionStats) ProtoMessage() {} - -func (x *ExecutionStats) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *WorkflowExecutionInfo) GetTransitionHistory() []*VersionedTransition { + if x != nil { + return x.TransitionHistory } - return mi.MessageOf(x) -} - -// Deprecated: Use ExecutionStats.ProtoReflect.Descriptor instead. -func (*ExecutionStats) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{2} + return nil } -func (x *ExecutionStats) GetHistorySize() int64 { +func (x *WorkflowExecutionInfo) GetSubStateMachinesByType() map[string]*StateMachineMap { if x != nil { - return x.HistorySize + return x.SubStateMachinesByType } - return 0 -} - -// execution_state column -type WorkflowExecutionState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - CreateRequestId string `protobuf:"bytes,1,opt,name=create_request_id,json=createRequestId,proto3" json:"create_request_id,omitempty"` - RunId string `protobuf:"bytes,2,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - State v1.WorkflowExecutionState `protobuf:"varint,3,opt,name=state,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"state,omitempty"` - Status v17.WorkflowExecutionStatus `protobuf:"varint,4,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"status,omitempty"` + return nil } -func (x *WorkflowExecutionState) Reset() { - *x = WorkflowExecutionState{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *WorkflowExecutionInfo) GetWorkflowExecutionTimerTaskStatus() int32 { + if x != nil { + return x.WorkflowExecutionTimerTaskStatus } + return 0 } -func (x *WorkflowExecutionState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WorkflowExecutionState) ProtoMessage() {} - -func (x *WorkflowExecutionState) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *WorkflowExecutionInfo) GetRootWorkflowId() string { + if x != nil { + return x.RootWorkflowId } - return mi.MessageOf(x) -} - -// Deprecated: Use WorkflowExecutionState.ProtoReflect.Descriptor instead. -func (*WorkflowExecutionState) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{3} + return "" } -func (x *WorkflowExecutionState) GetCreateRequestId() string { +func (x *WorkflowExecutionInfo) GetRootRunId() string { if x != nil { - return x.CreateRequestId + return x.RootRunId } return "" } -func (x *WorkflowExecutionState) GetRunId() string { +func (x *WorkflowExecutionInfo) GetStateMachineTimers() []*StateMachineTimerGroup { if x != nil { - return x.RunId + return x.StateMachineTimers } - return "" + return nil } -func (x *WorkflowExecutionState) GetState() v1.WorkflowExecutionState { +func (x *WorkflowExecutionInfo) GetTaskGenerationShardClockTimestamp() int64 { if x != nil { - return x.State + return x.TaskGenerationShardClockTimestamp } - return v1.WorkflowExecutionState(0) + return 0 } -func (x *WorkflowExecutionState) GetStatus() v17.WorkflowExecutionStatus { +func (x *WorkflowExecutionInfo) GetWorkflowTaskLastUpdateVersionedTransition() *VersionedTransition { if x != nil { - return x.Status + return x.WorkflowTaskLastUpdateVersionedTransition } - return v17.WorkflowExecutionStatus(0) + return nil } -// transfer column -type TransferTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` - TargetNamespaceId string `protobuf:"bytes,5,opt,name=target_namespace_id,json=targetNamespaceId,proto3" json:"target_namespace_id,omitempty"` - TargetWorkflowId string `protobuf:"bytes,6,opt,name=target_workflow_id,json=targetWorkflowId,proto3" json:"target_workflow_id,omitempty"` - TargetRunId string `protobuf:"bytes,7,opt,name=target_run_id,json=targetRunId,proto3" json:"target_run_id,omitempty"` - TaskQueue string `protobuf:"bytes,8,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - TargetChildWorkflowOnly bool `protobuf:"varint,9,opt,name=target_child_workflow_only,json=targetChildWorkflowOnly,proto3" json:"target_child_workflow_only,omitempty"` - ScheduledEventId int64 `protobuf:"varint,10,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - Version int64 `protobuf:"varint,11,opt,name=version,proto3" json:"version,omitempty"` - TaskId int64 `protobuf:"varint,12,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` - // (-- api-linter: core::0140::prepositions=disabled - // - // aip.dev/not-precedent: "after" is used to indicate sequence of actions. --) - DeleteAfterClose bool `protobuf:"varint,15,opt,name=delete_after_close,json=deleteAfterClose,proto3" json:"delete_after_close,omitempty"` - // Types that are assignable to TaskDetails: - // - // *TransferTaskInfo_CloseExecutionTaskDetails_ - TaskDetails isTransferTaskInfo_TaskDetails `protobuf_oneof:"task_details"` +func (x *WorkflowExecutionInfo) GetVisibilityLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.VisibilityLastUpdateVersionedTransition + } + return nil } -func (x *TransferTaskInfo) Reset() { - *x = TransferTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *WorkflowExecutionInfo) GetSignalRequestIdsLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.SignalRequestIdsLastUpdateVersionedTransition } + return nil } -func (x *TransferTaskInfo) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *WorkflowExecutionInfo) GetSubStateMachineTombstoneBatches() []*StateMachineTombstoneBatch { + if x != nil { + return x.SubStateMachineTombstoneBatches + } + return nil } -func (*TransferTaskInfo) ProtoMessage() {} - -func (x *TransferTaskInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *WorkflowExecutionInfo) GetWorkflowWasReset() bool { + if x != nil { + return x.WorkflowWasReset } - return mi.MessageOf(x) + return false } -// Deprecated: Use TransferTaskInfo.ProtoReflect.Descriptor instead. -func (*TransferTaskInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{4} +func (x *WorkflowExecutionInfo) GetResetRunId() string { + if x != nil { + return x.ResetRunId + } + return "" } -func (x *TransferTaskInfo) GetNamespaceId() string { +func (x *WorkflowExecutionInfo) GetVersioningInfo() *v12.WorkflowExecutionVersioningInfo { if x != nil { - return x.NamespaceId + return x.VersioningInfo } - return "" + return nil } -func (x *TransferTaskInfo) GetWorkflowId() string { +func (x *WorkflowExecutionInfo) GetOriginalExecutionRunId() string { if x != nil { - return x.WorkflowId + return x.OriginalExecutionRunId } return "" } -func (x *TransferTaskInfo) GetRunId() string { +func (x *WorkflowExecutionInfo) GetPreviousTransitionHistory() []*VersionedTransition { if x != nil { - return x.RunId + return x.PreviousTransitionHistory } - return "" + return nil } -func (x *TransferTaskInfo) GetTaskType() v1.TaskType { +func (x *WorkflowExecutionInfo) GetLastTransitionHistoryBreakPoint() *VersionedTransition { if x != nil { - return x.TaskType + return x.LastTransitionHistoryBreakPoint } - return v1.TaskType(0) + return nil } -func (x *TransferTaskInfo) GetTargetNamespaceId() string { +func (x *WorkflowExecutionInfo) GetChildrenInitializedPostResetPoint() map[string]*ResetChildInfo { if x != nil { - return x.TargetNamespaceId + return x.ChildrenInitializedPostResetPoint } - return "" + return nil } -func (x *TransferTaskInfo) GetTargetWorkflowId() string { +func (x *WorkflowExecutionInfo) GetWorkerDeploymentName() string { if x != nil { - return x.TargetWorkflowId + return x.WorkerDeploymentName } return "" } -func (x *TransferTaskInfo) GetTargetRunId() string { +func (x *WorkflowExecutionInfo) GetPriority() *v13.Priority { if x != nil { - return x.TargetRunId + return x.Priority } - return "" + return nil } -func (x *TransferTaskInfo) GetTaskQueue() string { +func (x *WorkflowExecutionInfo) GetSuccessorRunId() string { if x != nil { - return x.TaskQueue + return x.SuccessorRunId } return "" } -func (x *TransferTaskInfo) GetTargetChildWorkflowOnly() bool { +func (x *WorkflowExecutionInfo) GetPauseInfo() *WorkflowPauseInfo { if x != nil { - return x.TargetChildWorkflowOnly + return x.PauseInfo } - return false + return nil } -func (x *TransferTaskInfo) GetScheduledEventId() int64 { +func (x *WorkflowExecutionInfo) GetLastWorkflowTaskFailure() isWorkflowExecutionInfo_LastWorkflowTaskFailure { if x != nil { - return x.ScheduledEventId + return x.LastWorkflowTaskFailure } - return 0 + return nil } -func (x *TransferTaskInfo) GetVersion() int64 { +func (x *WorkflowExecutionInfo) GetLastWorkflowTaskFailureCause() v11.WorkflowTaskFailedCause { if x != nil { - return x.Version + if x, ok := x.LastWorkflowTaskFailure.(*WorkflowExecutionInfo_LastWorkflowTaskFailureCause); ok { + return x.LastWorkflowTaskFailureCause + } } - return 0 + return v11.WorkflowTaskFailedCause(0) } -func (x *TransferTaskInfo) GetTaskId() int64 { +func (x *WorkflowExecutionInfo) GetLastWorkflowTaskTimedOutType() v11.TimeoutType { if x != nil { - return x.TaskId + if x, ok := x.LastWorkflowTaskFailure.(*WorkflowExecutionInfo_LastWorkflowTaskTimedOutType); ok { + return x.LastWorkflowTaskTimedOutType + } } - return 0 + return v11.TimeoutType(0) } -func (x *TransferTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { +func (x *WorkflowExecutionInfo) GetLastNotifiedTargetVersion() *LastNotifiedTargetVersion { if x != nil { - return x.VisibilityTime + return x.LastNotifiedTargetVersion } return nil } -func (x *TransferTaskInfo) GetDeleteAfterClose() bool { +func (x *WorkflowExecutionInfo) GetDeclinedTargetVersionUpgrade() *v17.DeclinedTargetVersionUpgrade { if x != nil { - return x.DeleteAfterClose + return x.DeclinedTargetVersionUpgrade } - return false + return nil } -func (m *TransferTaskInfo) GetTaskDetails() isTransferTaskInfo_TaskDetails { - if m != nil { - return m.TaskDetails +func (x *WorkflowExecutionInfo) GetTimeSkippingInfo() *TimeSkippingInfo { + if x != nil { + return x.TimeSkippingInfo } return nil } -func (x *TransferTaskInfo) GetCloseExecutionTaskDetails() *TransferTaskInfo_CloseExecutionTaskDetails { - if x, ok := x.GetTaskDetails().(*TransferTaskInfo_CloseExecutionTaskDetails_); ok { - return x.CloseExecutionTaskDetails - } - return nil +type isWorkflowExecutionInfo_LastWorkflowTaskFailure interface { + isWorkflowExecutionInfo_LastWorkflowTaskFailure() } -type isTransferTaskInfo_TaskDetails interface { - isTransferTaskInfo_TaskDetails() +type WorkflowExecutionInfo_LastWorkflowTaskFailureCause struct { + LastWorkflowTaskFailureCause v11.WorkflowTaskFailedCause `protobuf:"varint,107,opt,name=last_workflow_task_failure_cause,json=lastWorkflowTaskFailureCause,proto3,enum=temporal.api.enums.v1.WorkflowTaskFailedCause,oneof"` } -type TransferTaskInfo_CloseExecutionTaskDetails_ struct { - CloseExecutionTaskDetails *TransferTaskInfo_CloseExecutionTaskDetails `protobuf:"bytes,16,opt,name=close_execution_task_details,json=closeExecutionTaskDetails,proto3,oneof"` +type WorkflowExecutionInfo_LastWorkflowTaskTimedOutType struct { + LastWorkflowTaskTimedOutType v11.TimeoutType `protobuf:"varint,108,opt,name=last_workflow_task_timed_out_type,json=lastWorkflowTaskTimedOutType,proto3,enum=temporal.api.enums.v1.TimeoutType,oneof"` } -func (*TransferTaskInfo_CloseExecutionTaskDetails_) isTransferTaskInfo_TaskDetails() {} +func (*WorkflowExecutionInfo_LastWorkflowTaskFailureCause) isWorkflowExecutionInfo_LastWorkflowTaskFailure() { +} -// replication column -type ReplicationTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (*WorkflowExecutionInfo_LastWorkflowTaskTimedOutType) isWorkflowExecutionInfo_LastWorkflowTaskFailure() { +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` - Version int64 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` - FirstEventId int64 `protobuf:"varint,6,opt,name=first_event_id,json=firstEventId,proto3" json:"first_event_id,omitempty"` - NextEventId int64 `protobuf:"varint,7,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` - ScheduledEventId int64 `protobuf:"varint,8,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - BranchToken []byte `protobuf:"bytes,11,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` - NewRunBranchToken []byte `protobuf:"bytes,13,opt,name=new_run_branch_token,json=newRunBranchToken,proto3" json:"new_run_branch_token,omitempty"` - TaskId int64 `protobuf:"varint,15,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` - NewRunId string `protobuf:"bytes,17,opt,name=new_run_id,json=newRunId,proto3" json:"new_run_id,omitempty"` +type TimeSkippingInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Current time-skipping configuration applied to the workflow. + Config *v12.TimeSkippingConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + // Total skipped duration for the current workflow execution run, including any + AccumulatedSkippedDuration *durationpb.Duration `protobuf:"bytes,2,opt,name=accumulated_skipped_duration,json=accumulatedSkippedDuration,proto3" json:"accumulated_skipped_duration,omitempty"` + // The current bound based on elapsed duration for time skipping. + CurrentElapsedDurationBound *TimeSkippingBoundInfo `protobuf:"bytes,3,opt,name=current_elapsed_duration_bound,json=currentElapsedDurationBound,proto3" json:"current_elapsed_duration_bound,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ReplicationTaskInfo) Reset() { - *x = ReplicationTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *TimeSkippingInfo) Reset() { + *x = TimeSkippingInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ReplicationTaskInfo) String() string { +func (x *TimeSkippingInfo) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ReplicationTaskInfo) ProtoMessage() {} +func (*TimeSkippingInfo) ProtoMessage() {} -func (x *ReplicationTaskInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { +func (x *TimeSkippingInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1127,136 +1210,254 @@ func (x *ReplicationTaskInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ReplicationTaskInfo.ProtoReflect.Descriptor instead. -func (*ReplicationTaskInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{5} +// Deprecated: Use TimeSkippingInfo.ProtoReflect.Descriptor instead. +func (*TimeSkippingInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{2} } -func (x *ReplicationTaskInfo) GetNamespaceId() string { +func (x *TimeSkippingInfo) GetConfig() *v12.TimeSkippingConfig { if x != nil { - return x.NamespaceId + return x.Config } - return "" + return nil } -func (x *ReplicationTaskInfo) GetWorkflowId() string { +func (x *TimeSkippingInfo) GetAccumulatedSkippedDuration() *durationpb.Duration { if x != nil { - return x.WorkflowId + return x.AccumulatedSkippedDuration } - return "" + return nil } -func (x *ReplicationTaskInfo) GetRunId() string { +func (x *TimeSkippingInfo) GetCurrentElapsedDurationBound() *TimeSkippingBoundInfo { if x != nil { - return x.RunId + return x.CurrentElapsedDurationBound } - return "" + return nil } -func (x *ReplicationTaskInfo) GetTaskType() v1.TaskType { +type TimeSkippingBoundInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Target time for a bound, expressed in virtual time. + TargetTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=target_time,json=targetTime,proto3" json:"target_time,omitempty"` + // Indicates whether this bound has already been reached, used for idempotency checks. + HasReached bool `protobuf:"varint,2,opt,name=has_reached,json=hasReached,proto3" json:"has_reached,omitempty"` + // Event ID of the WorkflowExecutionStartedEvent (always 1) or the most recent + // WorkflowExecutionOptionsUpdatedEvent that introduced the current time-skipping + // configuration. This is used as the task event ID for the time-skipping timer task, + // enabling reset and replication-conflict resolution to identify obsolete bound tasks + // via the standard staleness check. + SourceEventId int64 `protobuf:"varint,3,opt,name=source_event_id,json=sourceEventId,proto3" json:"source_event_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimeSkippingBoundInfo) Reset() { + *x = TimeSkippingBoundInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimeSkippingBoundInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimeSkippingBoundInfo) ProtoMessage() {} + +func (x *TimeSkippingBoundInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[3] if x != nil { - return x.TaskType + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return v1.TaskType(0) + return mi.MessageOf(x) } -func (x *ReplicationTaskInfo) GetVersion() int64 { +// Deprecated: Use TimeSkippingBoundInfo.ProtoReflect.Descriptor instead. +func (*TimeSkippingBoundInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{3} +} + +func (x *TimeSkippingBoundInfo) GetTargetTime() *timestamppb.Timestamp { if x != nil { - return x.Version + return x.TargetTime } - return 0 + return nil } -func (x *ReplicationTaskInfo) GetFirstEventId() int64 { +func (x *TimeSkippingBoundInfo) GetHasReached() bool { if x != nil { - return x.FirstEventId + return x.HasReached } - return 0 + return false } -func (x *ReplicationTaskInfo) GetNextEventId() int64 { +func (x *TimeSkippingBoundInfo) GetSourceEventId() int64 { if x != nil { - return x.NextEventId + return x.SourceEventId } return 0 } -func (x *ReplicationTaskInfo) GetScheduledEventId() int64 { +// Internal wrapper message to distinguish "never notified" (nil wrapper) from +// "notified about an unversioned target" (non-nil wrapper with nil deployment_version). +// Used only within server persistence; never flows to the public API. +type LastNotifiedTargetVersion struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentVersion *v18.WorkerDeploymentVersion `protobuf:"bytes,1,opt,name=deployment_version,json=deploymentVersion,proto3" json:"deployment_version,omitempty"` + // Revision number of the task queue routing config at the time the + // notification was sent. Carried forward to DeclinedTargetVersionUpgrade + // at continue-as-new time. + RevisionNumber int64 `protobuf:"varint,2,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LastNotifiedTargetVersion) Reset() { + *x = LastNotifiedTargetVersion{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LastNotifiedTargetVersion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LastNotifiedTargetVersion) ProtoMessage() {} + +func (x *LastNotifiedTargetVersion) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[4] if x != nil { - return x.ScheduledEventId + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (x *ReplicationTaskInfo) GetBranchToken() []byte { +// Deprecated: Use LastNotifiedTargetVersion.ProtoReflect.Descriptor instead. +func (*LastNotifiedTargetVersion) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{4} +} + +func (x *LastNotifiedTargetVersion) GetDeploymentVersion() *v18.WorkerDeploymentVersion { if x != nil { - return x.BranchToken + return x.DeploymentVersion } return nil } -func (x *ReplicationTaskInfo) GetNewRunBranchToken() []byte { +func (x *LastNotifiedTargetVersion) GetRevisionNumber() int64 { if x != nil { - return x.NewRunBranchToken + return x.RevisionNumber } - return nil + return 0 } -func (x *ReplicationTaskInfo) GetTaskId() int64 { +type ExecutionStats struct { + state protoimpl.MessageState `protogen:"open.v1"` + HistorySize int64 `protobuf:"varint,1,opt,name=history_size,json=historySize,proto3" json:"history_size,omitempty"` + // Total size in bytes of all external payloads referenced in the entire history tree of the execution, not just the current branch. + // This number doesn't include payloads in buffered events. + ExternalPayloadSize int64 `protobuf:"varint,2,opt,name=external_payload_size,json=externalPayloadSize,proto3" json:"external_payload_size,omitempty"` + // Total count of external payloads referenced in the entire history tree of the execution, not just the current branch. + // This number doesn't include payloads in buffered events. + ExternalPayloadCount int64 `protobuf:"varint,3,opt,name=external_payload_count,json=externalPayloadCount,proto3" json:"external_payload_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ExecutionStats) Reset() { + *x = ExecutionStats{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ExecutionStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecutionStats) ProtoMessage() {} + +func (x *ExecutionStats) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[5] if x != nil { - return x.TaskId + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecutionStats.ProtoReflect.Descriptor instead. +func (*ExecutionStats) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{5} +} + +func (x *ExecutionStats) GetHistorySize() int64 { + if x != nil { + return x.HistorySize } return 0 } -func (x *ReplicationTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { +func (x *ExecutionStats) GetExternalPayloadSize() int64 { if x != nil { - return x.VisibilityTime + return x.ExternalPayloadSize } - return nil + return 0 } -func (x *ReplicationTaskInfo) GetNewRunId() string { +func (x *ExecutionStats) GetExternalPayloadCount() int64 { if x != nil { - return x.NewRunId + return x.ExternalPayloadCount } - return "" + return 0 } -// visibility_task_data column -type VisibilityTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// execution_state column +type WorkflowExecutionState struct { + state protoimpl.MessageState `protogen:"open.v1"` + CreateRequestId string `protobuf:"bytes,1,opt,name=create_request_id,json=createRequestId,proto3" json:"create_request_id,omitempty"` + RunId string `protobuf:"bytes,2,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + State v1.WorkflowExecutionState `protobuf:"varint,3,opt,name=state,proto3,enum=temporal.server.api.enums.v1.WorkflowExecutionState" json:"state,omitempty"` + Status v11.WorkflowExecutionStatus `protobuf:"varint,4,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"status,omitempty"` + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,5,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + StartTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // Request IDs that are attached to the workflow execution. It can be the request ID that started + // the workflow execution or request IDs that were attached to an existing running workflow + // execution via StartWorkflowExecutionRequest.OnConflictOptions. + RequestIds map[string]*RequestIDInfo `protobuf:"bytes,7,rep,name=request_ids,json=requestIds,proto3" json:"request_ids,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` - Version int64 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` - TaskId int64 `protobuf:"varint,6,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` - CloseVisibilityTaskId int64 `protobuf:"varint,10,opt,name=close_visibility_task_id,json=closeVisibilityTaskId,proto3" json:"close_visibility_task_id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *VisibilityTaskInfo) Reset() { - *x = VisibilityTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *WorkflowExecutionState) Reset() { + *x = WorkflowExecutionState{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *VisibilityTaskInfo) String() string { +func (x *WorkflowExecutionState) String() string { return protoimpl.X.MessageStringOf(x) } -func (*VisibilityTaskInfo) ProtoMessage() {} +func (*WorkflowExecutionState) ProtoMessage() {} -func (x *VisibilityTaskInfo) ProtoReflect() protoreflect.Message { +func (x *WorkflowExecutionState) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1266,109 +1467,84 @@ func (x *VisibilityTaskInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use VisibilityTaskInfo.ProtoReflect.Descriptor instead. -func (*VisibilityTaskInfo) Descriptor() ([]byte, []int) { +// Deprecated: Use WorkflowExecutionState.ProtoReflect.Descriptor instead. +func (*WorkflowExecutionState) Descriptor() ([]byte, []int) { return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{6} } -func (x *VisibilityTaskInfo) GetNamespaceId() string { - if x != nil { - return x.NamespaceId - } - return "" -} - -func (x *VisibilityTaskInfo) GetWorkflowId() string { +func (x *WorkflowExecutionState) GetCreateRequestId() string { if x != nil { - return x.WorkflowId + return x.CreateRequestId } return "" } -func (x *VisibilityTaskInfo) GetRunId() string { +func (x *WorkflowExecutionState) GetRunId() string { if x != nil { return x.RunId } return "" } -func (x *VisibilityTaskInfo) GetTaskType() v1.TaskType { +func (x *WorkflowExecutionState) GetState() v1.WorkflowExecutionState { if x != nil { - return x.TaskType + return x.State } - return v1.TaskType(0) + return v1.WorkflowExecutionState(0) } -func (x *VisibilityTaskInfo) GetVersion() int64 { +func (x *WorkflowExecutionState) GetStatus() v11.WorkflowExecutionStatus { if x != nil { - return x.Version + return x.Status } - return 0 + return v11.WorkflowExecutionStatus(0) } -func (x *VisibilityTaskInfo) GetTaskId() int64 { +func (x *WorkflowExecutionState) GetLastUpdateVersionedTransition() *VersionedTransition { if x != nil { - return x.TaskId + return x.LastUpdateVersionedTransition } - return 0 + return nil } -func (x *VisibilityTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { +func (x *WorkflowExecutionState) GetStartTime() *timestamppb.Timestamp { if x != nil { - return x.VisibilityTime + return x.StartTime } return nil } -func (x *VisibilityTaskInfo) GetCloseVisibilityTaskId() int64 { +func (x *WorkflowExecutionState) GetRequestIds() map[string]*RequestIDInfo { if x != nil { - return x.CloseVisibilityTaskId + return x.RequestIds } - return 0 + return nil } -// timer column -type TimerTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type RequestIDInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + EventType v11.EventType `protobuf:"varint,1,opt,name=event_type,json=eventType,proto3,enum=temporal.api.enums.v1.EventType" json:"event_type,omitempty"` + EventId int64 `protobuf:"varint,2,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` - TimeoutType v17.TimeoutType `protobuf:"varint,5,opt,name=timeout_type,json=timeoutType,proto3,enum=temporal.api.enums.v1.TimeoutType" json:"timeout_type,omitempty"` - WorkflowBackoffType v1.WorkflowBackoffType `protobuf:"varint,6,opt,name=workflow_backoff_type,json=workflowBackoffType,proto3,enum=temporal.server.api.enums.v1.WorkflowBackoffType" json:"workflow_backoff_type,omitempty"` - Version int64 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` - ScheduleAttempt int32 `protobuf:"varint,8,opt,name=schedule_attempt,json=scheduleAttempt,proto3" json:"schedule_attempt,omitempty"` - EventId int64 `protobuf:"varint,9,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` - TaskId int64 `protobuf:"varint,10,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` - BranchToken []byte `protobuf:"bytes,12,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` - // If this is true, we can bypass archival before deleting. Only defined for DeleteHistoryEventTasks. - AlreadyArchived bool `protobuf:"varint,13,opt,name=already_archived,json=alreadyArchived,proto3" json:"already_archived,omitempty"` - // If task addresses a sub-statemachine (e.g. callback), this field will be set. - StateMachineInfo *StateMachineTaskInfo `protobuf:"bytes,14,opt,name=state_machine_info,json=stateMachineInfo,proto3" json:"state_machine_info,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *TimerTaskInfo) Reset() { - *x = TimerTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *RequestIDInfo) Reset() { + *x = RequestIDInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *TimerTaskInfo) String() string { +func (x *RequestIDInfo) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TimerTaskInfo) ProtoMessage() {} +func (*RequestIDInfo) ProtoMessage() {} -func (x *TimerTaskInfo) ProtoReflect() protoreflect.Message { +func (x *RequestIDInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1378,141 +1554,2172 @@ func (x *TimerTaskInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TimerTaskInfo.ProtoReflect.Descriptor instead. -func (*TimerTaskInfo) Descriptor() ([]byte, []int) { +// Deprecated: Use RequestIDInfo.ProtoReflect.Descriptor instead. +func (*RequestIDInfo) Descriptor() ([]byte, []int) { return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{7} } -func (x *TimerTaskInfo) GetNamespaceId() string { +func (x *RequestIDInfo) GetEventType() v11.EventType { if x != nil { - return x.NamespaceId + return x.EventType } - return "" + return v11.EventType(0) } -func (x *TimerTaskInfo) GetWorkflowId() string { +func (x *RequestIDInfo) GetEventId() int64 { if x != nil { - return x.WorkflowId + return x.EventId } - return "" + return 0 } -func (x *TimerTaskInfo) GetRunId() string { +// transfer column +type TransferTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` + TargetNamespaceId string `protobuf:"bytes,5,opt,name=target_namespace_id,json=targetNamespaceId,proto3" json:"target_namespace_id,omitempty"` + TargetWorkflowId string `protobuf:"bytes,6,opt,name=target_workflow_id,json=targetWorkflowId,proto3" json:"target_workflow_id,omitempty"` + TargetRunId string `protobuf:"bytes,7,opt,name=target_run_id,json=targetRunId,proto3" json:"target_run_id,omitempty"` + TaskQueue string `protobuf:"bytes,8,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TargetChildWorkflowOnly bool `protobuf:"varint,9,opt,name=target_child_workflow_only,json=targetChildWorkflowOnly,proto3" json:"target_child_workflow_only,omitempty"` + ScheduledEventId int64 `protobuf:"varint,10,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + Version int64 `protobuf:"varint,11,opt,name=version,proto3" json:"version,omitempty"` + TaskId int64 `protobuf:"varint,12,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "after" is used to indicate sequence of actions. --) + DeleteAfterClose bool `protobuf:"varint,15,opt,name=delete_after_close,json=deleteAfterClose,proto3" json:"delete_after_close,omitempty"` + // Types that are valid to be assigned to TaskDetails: + // + // *TransferTaskInfo_CloseExecutionTaskDetails_ + // *TransferTaskInfo_ChasmTaskInfo + TaskDetails isTransferTaskInfo_TaskDetails `protobuf_oneof:"task_details"` + // Stamp represents the "version" of the entity's internal state for which the transfer task was created. + // It increases monotonically when the entity's options are modified. + // It is used to check if a task is still relevant to the entity's corresponding state machine. + Stamp int32 `protobuf:"varint,17,opt,name=stamp,proto3" json:"stamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TransferTaskInfo) Reset() { + *x = TransferTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TransferTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransferTaskInfo) ProtoMessage() {} + +func (x *TransferTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransferTaskInfo.ProtoReflect.Descriptor instead. +func (*TransferTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{8} +} + +func (x *TransferTaskInfo) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *TransferTaskInfo) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *TransferTaskInfo) GetRunId() string { if x != nil { return x.RunId } return "" } -func (x *TimerTaskInfo) GetTaskType() v1.TaskType { +func (x *TransferTaskInfo) GetTaskType() v1.TaskType { if x != nil { return x.TaskType } return v1.TaskType(0) } -func (x *TimerTaskInfo) GetTimeoutType() v17.TimeoutType { +func (x *TransferTaskInfo) GetTargetNamespaceId() string { if x != nil { - return x.TimeoutType + return x.TargetNamespaceId } - return v17.TimeoutType(0) + return "" } -func (x *TimerTaskInfo) GetWorkflowBackoffType() v1.WorkflowBackoffType { +func (x *TransferTaskInfo) GetTargetWorkflowId() string { if x != nil { - return x.WorkflowBackoffType + return x.TargetWorkflowId } - return v1.WorkflowBackoffType(0) + return "" } -func (x *TimerTaskInfo) GetVersion() int64 { +func (x *TransferTaskInfo) GetTargetRunId() string { + if x != nil { + return x.TargetRunId + } + return "" +} + +func (x *TransferTaskInfo) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *TransferTaskInfo) GetTargetChildWorkflowOnly() bool { + if x != nil { + return x.TargetChildWorkflowOnly + } + return false +} + +func (x *TransferTaskInfo) GetScheduledEventId() int64 { + if x != nil { + return x.ScheduledEventId + } + return 0 +} + +func (x *TransferTaskInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *TransferTaskInfo) GetTaskId() int64 { + if x != nil { + return x.TaskId + } + return 0 +} + +func (x *TransferTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { + if x != nil { + return x.VisibilityTime + } + return nil +} + +func (x *TransferTaskInfo) GetDeleteAfterClose() bool { + if x != nil { + return x.DeleteAfterClose + } + return false +} + +func (x *TransferTaskInfo) GetTaskDetails() isTransferTaskInfo_TaskDetails { + if x != nil { + return x.TaskDetails + } + return nil +} + +func (x *TransferTaskInfo) GetCloseExecutionTaskDetails() *TransferTaskInfo_CloseExecutionTaskDetails { + if x != nil { + if x, ok := x.TaskDetails.(*TransferTaskInfo_CloseExecutionTaskDetails_); ok { + return x.CloseExecutionTaskDetails + } + } + return nil +} + +func (x *TransferTaskInfo) GetChasmTaskInfo() *ChasmTaskInfo { + if x != nil { + if x, ok := x.TaskDetails.(*TransferTaskInfo_ChasmTaskInfo); ok { + return x.ChasmTaskInfo + } + } + return nil +} + +func (x *TransferTaskInfo) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +type isTransferTaskInfo_TaskDetails interface { + isTransferTaskInfo_TaskDetails() +} + +type TransferTaskInfo_CloseExecutionTaskDetails_ struct { + CloseExecutionTaskDetails *TransferTaskInfo_CloseExecutionTaskDetails `protobuf:"bytes,16,opt,name=close_execution_task_details,json=closeExecutionTaskDetails,proto3,oneof"` +} + +type TransferTaskInfo_ChasmTaskInfo struct { + // If the task addresses a CHASM component, this field will be set. + ChasmTaskInfo *ChasmTaskInfo `protobuf:"bytes,18,opt,name=chasm_task_info,json=chasmTaskInfo,proto3,oneof"` +} + +func (*TransferTaskInfo_CloseExecutionTaskDetails_) isTransferTaskInfo_TaskDetails() {} + +func (*TransferTaskInfo_ChasmTaskInfo) isTransferTaskInfo_TaskDetails() {} + +// replication column +type ReplicationTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` + Version int64 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` + FirstEventId int64 `protobuf:"varint,6,opt,name=first_event_id,json=firstEventId,proto3" json:"first_event_id,omitempty"` + NextEventId int64 `protobuf:"varint,7,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` + ScheduledEventId int64 `protobuf:"varint,8,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + BranchToken []byte `protobuf:"bytes,11,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + NewRunBranchToken []byte `protobuf:"bytes,13,opt,name=new_run_branch_token,json=newRunBranchToken,proto3" json:"new_run_branch_token,omitempty"` + TaskId int64 `protobuf:"varint,15,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,16,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + NewRunId string `protobuf:"bytes,17,opt,name=new_run_id,json=newRunId,proto3" json:"new_run_id,omitempty"` + Priority v1.TaskPriority `protobuf:"varint,18,opt,name=priority,proto3,enum=temporal.server.api.enums.v1.TaskPriority" json:"priority,omitempty"` + VersionedTransition *VersionedTransition `protobuf:"bytes,19,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + // A list of event-based replication tasks that, together, are equivalent + // to this state-based task. + // TODO: Remove this field when state-based replication is stable and + // doesn't need to be disabled. + TaskEquivalents []*ReplicationTaskInfo `protobuf:"bytes,20,rep,name=task_equivalents,json=taskEquivalents,proto3" json:"task_equivalents,omitempty"` + LastVersionHistoryItem *v14.VersionHistoryItem `protobuf:"bytes,21,opt,name=last_version_history_item,json=lastVersionHistoryItem,proto3" json:"last_version_history_item,omitempty"` + IsFirstTask bool `protobuf:"varint,22,opt,name=is_first_task,json=isFirstTask,proto3" json:"is_first_task,omitempty"` + TargetClusters []string `protobuf:"bytes,23,rep,name=target_clusters,json=targetClusters,proto3" json:"target_clusters,omitempty"` + IsForceReplication bool `protobuf:"varint,24,opt,name=is_force_replication,json=isForceReplication,proto3" json:"is_force_replication,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,25,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReplicationTaskInfo) Reset() { + *x = ReplicationTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReplicationTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationTaskInfo) ProtoMessage() {} + +func (x *ReplicationTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationTaskInfo.ProtoReflect.Descriptor instead. +func (*ReplicationTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{9} +} + +func (x *ReplicationTaskInfo) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ReplicationTaskInfo) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *ReplicationTaskInfo) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *ReplicationTaskInfo) GetTaskType() v1.TaskType { + if x != nil { + return x.TaskType + } + return v1.TaskType(0) +} + +func (x *ReplicationTaskInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ReplicationTaskInfo) GetFirstEventId() int64 { + if x != nil { + return x.FirstEventId + } + return 0 +} + +func (x *ReplicationTaskInfo) GetNextEventId() int64 { + if x != nil { + return x.NextEventId + } + return 0 +} + +func (x *ReplicationTaskInfo) GetScheduledEventId() int64 { + if x != nil { + return x.ScheduledEventId + } + return 0 +} + +func (x *ReplicationTaskInfo) GetBranchToken() []byte { + if x != nil { + return x.BranchToken + } + return nil +} + +func (x *ReplicationTaskInfo) GetNewRunBranchToken() []byte { + if x != nil { + return x.NewRunBranchToken + } + return nil +} + +func (x *ReplicationTaskInfo) GetTaskId() int64 { + if x != nil { + return x.TaskId + } + return 0 +} + +func (x *ReplicationTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { + if x != nil { + return x.VisibilityTime + } + return nil +} + +func (x *ReplicationTaskInfo) GetNewRunId() string { + if x != nil { + return x.NewRunId + } + return "" +} + +func (x *ReplicationTaskInfo) GetPriority() v1.TaskPriority { + if x != nil { + return x.Priority + } + return v1.TaskPriority(0) +} + +func (x *ReplicationTaskInfo) GetVersionedTransition() *VersionedTransition { + if x != nil { + return x.VersionedTransition + } + return nil +} + +func (x *ReplicationTaskInfo) GetTaskEquivalents() []*ReplicationTaskInfo { + if x != nil { + return x.TaskEquivalents + } + return nil +} + +func (x *ReplicationTaskInfo) GetLastVersionHistoryItem() *v14.VersionHistoryItem { + if x != nil { + return x.LastVersionHistoryItem + } + return nil +} + +func (x *ReplicationTaskInfo) GetIsFirstTask() bool { + if x != nil { + return x.IsFirstTask + } + return false +} + +func (x *ReplicationTaskInfo) GetTargetClusters() []string { + if x != nil { + return x.TargetClusters + } + return nil +} + +func (x *ReplicationTaskInfo) GetIsForceReplication() bool { + if x != nil { + return x.IsForceReplication + } + return false +} + +func (x *ReplicationTaskInfo) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +// visibility_task_data column +type VisibilityTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` + Version int64 `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"` + TaskId int64 `protobuf:"varint,6,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + CloseVisibilityTaskId int64 `protobuf:"varint,10,opt,name=close_visibility_task_id,json=closeVisibilityTaskId,proto3" json:"close_visibility_task_id,omitempty"` + CloseTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"` + // Types that are valid to be assigned to TaskDetails: + // + // *VisibilityTaskInfo_ChasmTaskInfo + TaskDetails isVisibilityTaskInfo_TaskDetails `protobuf_oneof:"task_details"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VisibilityTaskInfo) Reset() { + *x = VisibilityTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VisibilityTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VisibilityTaskInfo) ProtoMessage() {} + +func (x *VisibilityTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VisibilityTaskInfo.ProtoReflect.Descriptor instead. +func (*VisibilityTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{10} +} + +func (x *VisibilityTaskInfo) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *VisibilityTaskInfo) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *VisibilityTaskInfo) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *VisibilityTaskInfo) GetTaskType() v1.TaskType { + if x != nil { + return x.TaskType + } + return v1.TaskType(0) +} + +func (x *VisibilityTaskInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *VisibilityTaskInfo) GetTaskId() int64 { + if x != nil { + return x.TaskId + } + return 0 +} + +func (x *VisibilityTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { + if x != nil { + return x.VisibilityTime + } + return nil +} + +func (x *VisibilityTaskInfo) GetCloseVisibilityTaskId() int64 { + if x != nil { + return x.CloseVisibilityTaskId + } + return 0 +} + +func (x *VisibilityTaskInfo) GetCloseTime() *timestamppb.Timestamp { + if x != nil { + return x.CloseTime + } + return nil +} + +func (x *VisibilityTaskInfo) GetTaskDetails() isVisibilityTaskInfo_TaskDetails { + if x != nil { + return x.TaskDetails + } + return nil +} + +func (x *VisibilityTaskInfo) GetChasmTaskInfo() *ChasmTaskInfo { + if x != nil { + if x, ok := x.TaskDetails.(*VisibilityTaskInfo_ChasmTaskInfo); ok { + return x.ChasmTaskInfo + } + } + return nil +} + +type isVisibilityTaskInfo_TaskDetails interface { + isVisibilityTaskInfo_TaskDetails() +} + +type VisibilityTaskInfo_ChasmTaskInfo struct { + // If the task addresses a CHASM component, this field will be set. + ChasmTaskInfo *ChasmTaskInfo `protobuf:"bytes,12,opt,name=chasm_task_info,json=chasmTaskInfo,proto3,oneof"` +} + +func (*VisibilityTaskInfo_ChasmTaskInfo) isVisibilityTaskInfo_TaskDetails() {} + +// timer column +type TimerTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` + TimeoutType v11.TimeoutType `protobuf:"varint,5,opt,name=timeout_type,json=timeoutType,proto3,enum=temporal.api.enums.v1.TimeoutType" json:"timeout_type,omitempty"` + WorkflowBackoffType v1.WorkflowBackoffType `protobuf:"varint,6,opt,name=workflow_backoff_type,json=workflowBackoffType,proto3,enum=temporal.server.api.enums.v1.WorkflowBackoffType" json:"workflow_backoff_type,omitempty"` + Version int64 `protobuf:"varint,7,opt,name=version,proto3" json:"version,omitempty"` + ScheduleAttempt int32 `protobuf:"varint,8,opt,name=schedule_attempt,json=scheduleAttempt,proto3" json:"schedule_attempt,omitempty"` + EventId int64 `protobuf:"varint,9,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` + TaskId int64 `protobuf:"varint,10,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + BranchToken []byte `protobuf:"bytes,12,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + // If this is true, we can bypass archival before deleting. Only defined for DeleteHistoryEventTasks. + AlreadyArchived bool `protobuf:"varint,13,opt,name=already_archived,json=alreadyArchived,proto3" json:"already_archived,omitempty"` + // Number of transitions on the corresponding mutable state object. Used to verify that a task is not referencing a + // stale state or, in some situations, that the task itself is not stale. + // If task addresses a sub-statemachine (e.g. callback), this field will be set. + MutableStateTransitionCount int64 `protobuf:"varint,14,opt,name=mutable_state_transition_count,json=mutableStateTransitionCount,proto3" json:"mutable_state_transition_count,omitempty"` + // If specified, the task is a for a workflow chain instead of a specific workflow run. + // A workflow chain is identified by the run_id of the first workflow in the chain. + FirstRunId string `protobuf:"bytes,15,opt,name=first_run_id,json=firstRunId,proto3" json:"first_run_id,omitempty"` + // Stamp represents the "version" of the entity's internal state for which the timer task was created. + // It increases monotonically when the entity's options are modified. + // It is used to check if a task is still relevant to the entity's corresponding state machine. + Stamp int32 `protobuf:"varint,16,opt,name=stamp,proto3" json:"stamp,omitempty"` + // Types that are valid to be assigned to TaskDetails: + // + // *TimerTaskInfo_ChasmTaskInfo + TaskDetails isTimerTaskInfo_TaskDetails `protobuf_oneof:"task_details"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimerTaskInfo) Reset() { + *x = TimerTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimerTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimerTaskInfo) ProtoMessage() {} + +func (x *TimerTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimerTaskInfo.ProtoReflect.Descriptor instead. +func (*TimerTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{11} +} + +func (x *TimerTaskInfo) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *TimerTaskInfo) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *TimerTaskInfo) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *TimerTaskInfo) GetTaskType() v1.TaskType { + if x != nil { + return x.TaskType + } + return v1.TaskType(0) +} + +func (x *TimerTaskInfo) GetTimeoutType() v11.TimeoutType { + if x != nil { + return x.TimeoutType + } + return v11.TimeoutType(0) +} + +func (x *TimerTaskInfo) GetWorkflowBackoffType() v1.WorkflowBackoffType { + if x != nil { + return x.WorkflowBackoffType + } + return v1.WorkflowBackoffType(0) +} + +func (x *TimerTaskInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *TimerTaskInfo) GetScheduleAttempt() int32 { + if x != nil { + return x.ScheduleAttempt + } + return 0 +} + +func (x *TimerTaskInfo) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +func (x *TimerTaskInfo) GetTaskId() int64 { + if x != nil { + return x.TaskId + } + return 0 +} + +func (x *TimerTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { + if x != nil { + return x.VisibilityTime + } + return nil +} + +func (x *TimerTaskInfo) GetBranchToken() []byte { + if x != nil { + return x.BranchToken + } + return nil +} + +func (x *TimerTaskInfo) GetAlreadyArchived() bool { + if x != nil { + return x.AlreadyArchived + } + return false +} + +func (x *TimerTaskInfo) GetMutableStateTransitionCount() int64 { + if x != nil { + return x.MutableStateTransitionCount + } + return 0 +} + +func (x *TimerTaskInfo) GetFirstRunId() string { + if x != nil { + return x.FirstRunId + } + return "" +} + +func (x *TimerTaskInfo) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +func (x *TimerTaskInfo) GetTaskDetails() isTimerTaskInfo_TaskDetails { + if x != nil { + return x.TaskDetails + } + return nil +} + +func (x *TimerTaskInfo) GetChasmTaskInfo() *ChasmTaskInfo { + if x != nil { + if x, ok := x.TaskDetails.(*TimerTaskInfo_ChasmTaskInfo); ok { + return x.ChasmTaskInfo + } + } + return nil +} + +type isTimerTaskInfo_TaskDetails interface { + isTimerTaskInfo_TaskDetails() +} + +type TimerTaskInfo_ChasmTaskInfo struct { + // If the task addresses a CHASM component, this field will be set. + ChasmTaskInfo *ChasmTaskInfo `protobuf:"bytes,17,opt,name=chasm_task_info,json=chasmTaskInfo,proto3,oneof"` +} + +func (*TimerTaskInfo_ChasmTaskInfo) isTimerTaskInfo_TaskDetails() {} + +type ArchivalTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskId int64 `protobuf:"varint,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,4,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + TaskType v1.TaskType `protobuf:"varint,5,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` + Version int64 `protobuf:"varint,6,opt,name=version,proto3" json:"version,omitempty"` + VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ArchivalTaskInfo) Reset() { + *x = ArchivalTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ArchivalTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArchivalTaskInfo) ProtoMessage() {} + +func (x *ArchivalTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArchivalTaskInfo.ProtoReflect.Descriptor instead. +func (*ArchivalTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{12} +} + +func (x *ArchivalTaskInfo) GetTaskId() int64 { + if x != nil { + return x.TaskId + } + return 0 +} + +func (x *ArchivalTaskInfo) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ArchivalTaskInfo) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *ArchivalTaskInfo) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *ArchivalTaskInfo) GetTaskType() v1.TaskType { + if x != nil { + return x.TaskType + } + return v1.TaskType(0) +} + +func (x *ArchivalTaskInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ArchivalTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { + if x != nil { + return x.VisibilityTime + } + return nil +} + +type OutboundTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` + TaskId int64 `protobuf:"varint,5,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + // Destination of this task (e.g. protocol+host+port for callbacks). + // Outbound tasks are grouped by this field (and the namespace ID) when scheduling. + Destination string `protobuf:"bytes,7,opt,name=destination,proto3" json:"destination,omitempty"` + // Types that are valid to be assigned to TaskDetails: + // + // *OutboundTaskInfo_StateMachineInfo + // *OutboundTaskInfo_ChasmTaskInfo + // *OutboundTaskInfo_WorkerCommandsTask + TaskDetails isOutboundTaskInfo_TaskDetails `protobuf_oneof:"task_details"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OutboundTaskInfo) Reset() { + *x = OutboundTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OutboundTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutboundTaskInfo) ProtoMessage() {} + +func (x *OutboundTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutboundTaskInfo.ProtoReflect.Descriptor instead. +func (*OutboundTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{13} +} + +func (x *OutboundTaskInfo) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *OutboundTaskInfo) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *OutboundTaskInfo) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *OutboundTaskInfo) GetTaskType() v1.TaskType { + if x != nil { + return x.TaskType + } + return v1.TaskType(0) +} + +func (x *OutboundTaskInfo) GetTaskId() int64 { + if x != nil { + return x.TaskId + } + return 0 +} + +func (x *OutboundTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { + if x != nil { + return x.VisibilityTime + } + return nil +} + +func (x *OutboundTaskInfo) GetDestination() string { + if x != nil { + return x.Destination + } + return "" +} + +func (x *OutboundTaskInfo) GetTaskDetails() isOutboundTaskInfo_TaskDetails { + if x != nil { + return x.TaskDetails + } + return nil +} + +func (x *OutboundTaskInfo) GetStateMachineInfo() *StateMachineTaskInfo { + if x != nil { + if x, ok := x.TaskDetails.(*OutboundTaskInfo_StateMachineInfo); ok { + return x.StateMachineInfo + } + } + return nil +} + +func (x *OutboundTaskInfo) GetChasmTaskInfo() *ChasmTaskInfo { + if x != nil { + if x, ok := x.TaskDetails.(*OutboundTaskInfo_ChasmTaskInfo); ok { + return x.ChasmTaskInfo + } + } + return nil +} + +func (x *OutboundTaskInfo) GetWorkerCommandsTask() *WorkerCommandsTask { + if x != nil { + if x, ok := x.TaskDetails.(*OutboundTaskInfo_WorkerCommandsTask); ok { + return x.WorkerCommandsTask + } + } + return nil +} + +type isOutboundTaskInfo_TaskDetails interface { + isOutboundTaskInfo_TaskDetails() +} + +type OutboundTaskInfo_StateMachineInfo struct { + // If task addresses a sub-statemachine (e.g. callback), this field will be set. + StateMachineInfo *StateMachineTaskInfo `protobuf:"bytes,8,opt,name=state_machine_info,json=stateMachineInfo,proto3,oneof"` +} + +type OutboundTaskInfo_ChasmTaskInfo struct { + // If the task addresses a CHASM component, this field will be set. + ChasmTaskInfo *ChasmTaskInfo `protobuf:"bytes,9,opt,name=chasm_task_info,json=chasmTaskInfo,proto3,oneof"` +} + +type OutboundTaskInfo_WorkerCommandsTask struct { + // If the task is a worker commands task. + WorkerCommandsTask *WorkerCommandsTask `protobuf:"bytes,10,opt,name=worker_commands_task,json=workerCommandsTask,proto3,oneof"` +} + +func (*OutboundTaskInfo_StateMachineInfo) isOutboundTaskInfo_TaskDetails() {} + +func (*OutboundTaskInfo_ChasmTaskInfo) isOutboundTaskInfo_TaskDetails() {} + +func (*OutboundTaskInfo_WorkerCommandsTask) isOutboundTaskInfo_TaskDetails() {} + +// WorkerCommandsTask contains worker commands to dispatch via Nexus. +type WorkerCommandsTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + Commands []*v19.WorkerCommand `protobuf:"bytes,1,rep,name=commands,proto3" json:"commands,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerCommandsTask) Reset() { + *x = WorkerCommandsTask{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerCommandsTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerCommandsTask) ProtoMessage() {} + +func (x *WorkerCommandsTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerCommandsTask.ProtoReflect.Descriptor instead. +func (*WorkerCommandsTask) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{14} +} + +func (x *WorkerCommandsTask) GetCommands() []*v19.WorkerCommand { + if x != nil { + return x.Commands + } + return nil +} + +type NexusInvocationTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusInvocationTaskInfo) Reset() { + *x = NexusInvocationTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusInvocationTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusInvocationTaskInfo) ProtoMessage() {} + +func (x *NexusInvocationTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusInvocationTaskInfo.ProtoReflect.Descriptor instead. +func (*NexusInvocationTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{15} +} + +func (x *NexusInvocationTaskInfo) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +type NexusCancelationTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusCancelationTaskInfo) Reset() { + *x = NexusCancelationTaskInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusCancelationTaskInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusCancelationTaskInfo) ProtoMessage() {} + +func (x *NexusCancelationTaskInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusCancelationTaskInfo.ProtoReflect.Descriptor instead. +func (*NexusCancelationTaskInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{16} +} + +func (x *NexusCancelationTaskInfo) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +// activity_map column +type ActivityInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + ScheduledEventBatchId int64 `protobuf:"varint,2,opt,name=scheduled_event_batch_id,json=scheduledEventBatchId,proto3" json:"scheduled_event_batch_id,omitempty"` + ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + StartedEventId int64 `protobuf:"varint,5,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` + StartedTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + ActivityId string `protobuf:"bytes,8,opt,name=activity_id,json=activityId,proto3" json:"activity_id,omitempty"` + RequestId string `protobuf:"bytes,9,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,10,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + ScheduleToCloseTimeout *durationpb.Duration `protobuf:"bytes,11,opt,name=schedule_to_close_timeout,json=scheduleToCloseTimeout,proto3" json:"schedule_to_close_timeout,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + StartToCloseTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3" json:"start_to_close_timeout,omitempty"` + HeartbeatTimeout *durationpb.Duration `protobuf:"bytes,13,opt,name=heartbeat_timeout,json=heartbeatTimeout,proto3" json:"heartbeat_timeout,omitempty"` + CancelRequested bool `protobuf:"varint,14,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` + CancelRequestId int64 `protobuf:"varint,15,opt,name=cancel_request_id,json=cancelRequestId,proto3" json:"cancel_request_id,omitempty"` + TimerTaskStatus int32 `protobuf:"varint,16,opt,name=timer_task_status,json=timerTaskStatus,proto3" json:"timer_task_status,omitempty"` + Attempt int32 `protobuf:"varint,17,opt,name=attempt,proto3" json:"attempt,omitempty"` + TaskQueue string `protobuf:"bytes,18,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + StartedIdentity string `protobuf:"bytes,19,opt,name=started_identity,json=startedIdentity,proto3" json:"started_identity,omitempty"` + HasRetryPolicy bool `protobuf:"varint,20,opt,name=has_retry_policy,json=hasRetryPolicy,proto3" json:"has_retry_policy,omitempty"` + RetryInitialInterval *durationpb.Duration `protobuf:"bytes,21,opt,name=retry_initial_interval,json=retryInitialInterval,proto3" json:"retry_initial_interval,omitempty"` + RetryMaximumInterval *durationpb.Duration `protobuf:"bytes,22,opt,name=retry_maximum_interval,json=retryMaximumInterval,proto3" json:"retry_maximum_interval,omitempty"` + RetryMaximumAttempts int32 `protobuf:"varint,23,opt,name=retry_maximum_attempts,json=retryMaximumAttempts,proto3" json:"retry_maximum_attempts,omitempty"` + RetryExpirationTime *timestamppb.Timestamp `protobuf:"bytes,24,opt,name=retry_expiration_time,json=retryExpirationTime,proto3" json:"retry_expiration_time,omitempty"` + RetryBackoffCoefficient float64 `protobuf:"fixed64,25,opt,name=retry_backoff_coefficient,json=retryBackoffCoefficient,proto3" json:"retry_backoff_coefficient,omitempty"` + RetryNonRetryableErrorTypes []string `protobuf:"bytes,26,rep,name=retry_non_retryable_error_types,json=retryNonRetryableErrorTypes,proto3" json:"retry_non_retryable_error_types,omitempty"` + RetryLastFailure *v110.Failure `protobuf:"bytes,27,opt,name=retry_last_failure,json=retryLastFailure,proto3" json:"retry_last_failure,omitempty"` + RetryLastWorkerIdentity string `protobuf:"bytes,28,opt,name=retry_last_worker_identity,json=retryLastWorkerIdentity,proto3" json:"retry_last_worker_identity,omitempty"` + ScheduledEventId int64 `protobuf:"varint,30,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + LastHeartbeatDetails *v13.Payloads `protobuf:"bytes,31,opt,name=last_heartbeat_details,json=lastHeartbeatDetails,proto3" json:"last_heartbeat_details,omitempty"` + LastHeartbeatUpdateTime *timestamppb.Timestamp `protobuf:"bytes,32,opt,name=last_heartbeat_update_time,json=lastHeartbeatUpdateTime,proto3" json:"last_heartbeat_update_time,omitempty"` + // When true, it means the activity is assigned to the build ID of its workflow (only set for old versioning) + // Deprecated. use `use_workflow_build_id` + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + UseCompatibleVersion bool `protobuf:"varint,33,opt,name=use_compatible_version,json=useCompatibleVersion,proto3" json:"use_compatible_version,omitempty"` + ActivityType *v13.ActivityType `protobuf:"bytes,34,opt,name=activity_type,json=activityType,proto3" json:"activity_type,omitempty"` + // Absence of `assigned_build_id` generally means this task is on an "unversioned" task queue. + // In rare cases, it can also mean that the task queue is versioned but we failed to write activity's + // independently-assigned build ID to the database. This case heals automatically once the task is dispatched. + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + // + // Types that are valid to be assigned to BuildIdInfo: + // + // *ActivityInfo_UseWorkflowBuildIdInfo_ + // *ActivityInfo_LastIndependentlyAssignedBuildId + BuildIdInfo isActivityInfo_BuildIdInfo `protobuf_oneof:"build_id_info"` + // The version stamp of the worker to whom this activity was most-recently dispatched + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + LastWorkerVersionStamp *v13.WorkerVersionStamp `protobuf:"bytes,37,opt,name=last_worker_version_stamp,json=lastWorkerVersionStamp,proto3" json:"last_worker_version_stamp,omitempty"` + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,38,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + // The first time the activity was scheduled. + FirstScheduledTime *timestamppb.Timestamp `protobuf:"bytes,39,opt,name=first_scheduled_time,json=firstScheduledTime,proto3" json:"first_scheduled_time,omitempty"` + // The last time an activity attempt completion was recorded by the server. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,40,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // Stamp represents the “version” of the activity's internal state and can/will be changed with Activity API. + // It increases monotonically when the activity's options are modified. + // It is used to check if an activity task is still relevant to the corresponding activity state machine. + Stamp int32 `protobuf:"varint,41,opt,name=stamp,proto3" json:"stamp,omitempty"` + // Paused state. When activity is paused it will not advance until unpaused. + // Iw will not be scheduled, timer tasks will not be processed, etc. + // Note: it still can be cancelled/completed. + Paused bool `protobuf:"varint,42,opt,name=paused,proto3" json:"paused,omitempty"` + // The deployment this activity was dispatched to most recently. Present only if the activity + // was dispatched to a versioned worker. + // Deprecated. Replaced by last_worker_deployment_version. + LastStartedDeployment *v18.Deployment `protobuf:"bytes,43,opt,name=last_started_deployment,json=lastStartedDeployment,proto3" json:"last_started_deployment,omitempty"` + // The deployment this activity was dispatched to most recently. Present only if the activity + // was dispatched to a versioned worker. + // Deprecated. Clean up with versioning-3.1. [cleanup-old-wv] + LastWorkerDeploymentVersion string `protobuf:"bytes,44,opt,name=last_worker_deployment_version,json=lastWorkerDeploymentVersion,proto3" json:"last_worker_deployment_version,omitempty"` + // The deployment version this activity was dispatched to most recently. Present only if the activity + // was dispatched to a versioned worker. + LastDeploymentVersion *v18.WorkerDeploymentVersion `protobuf:"bytes,49,opt,name=last_deployment_version,json=lastDeploymentVersion,proto3" json:"last_deployment_version,omitempty"` + // Priority metadata. If this message is not present, or any fields are not + // present, they inherit the values from the workflow. + Priority *v13.Priority `protobuf:"bytes,45,opt,name=priority,proto3" json:"priority,omitempty"` + PauseInfo *ActivityInfo_PauseInfo `protobuf:"bytes,46,opt,name=pause_info,json=pauseInfo,proto3" json:"pause_info,omitempty"` + // set to true if there was an activity reset while activity is still running on the worker + ActivityReset bool `protobuf:"varint,47,opt,name=activity_reset,json=activityReset,proto3" json:"activity_reset,omitempty"` + // set to true if reset heartbeat flag was set with an activity reset + ResetHeartbeats bool `protobuf:"varint,48,opt,name=reset_heartbeats,json=resetHeartbeats,proto3" json:"reset_heartbeats,omitempty"` + StartVersion int64 `protobuf:"varint,50,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + // A dedicated per-worker Nexus task queue on which the server sends control + // tasks (e.g. activity cancellation) to this specific worker instance. + WorkerControlTaskQueue string `protobuf:"bytes,51,opt,name=worker_control_task_queue,json=workerControlTaskQueue,proto3" json:"worker_control_task_queue,omitempty"` + // The shard clock at the time this activity was started (RecordActivityTaskStarted). + // Matching uses this clock to build the task token sent to the worker. Stored here so + // that history can later reconstruct the same task token (e.g. for cancel worker commands). + // + // IMPORTANT: The clock approach requires history to reconstruct the token using + // the same fields and logic as matching — if NewActivityTaskToken changes, both + // call sites must stay in sync or the tokens will silently diverge. An alternative + // is to store the full serialized task token (~150-300 bytes), which avoids + // reconstruction entirely and is immune to token format changes. We chose the + // clock approach to keep the per-activity memory footprint minimal (~24 bytes). + // + // Replication: This field is part of ActivityInfo and is automatically replicated + // via state-based replication. No special handling is needed. + StartedClock *v15.VectorClock `protobuf:"bytes,52,opt,name=started_clock,json=startedClock,proto3" json:"started_clock,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityInfo) Reset() { + *x = ActivityInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityInfo) ProtoMessage() {} + +func (x *ActivityInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityInfo.ProtoReflect.Descriptor instead. +func (*ActivityInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{17} +} + +func (x *ActivityInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ActivityInfo) GetScheduledEventBatchId() int64 { + if x != nil { + return x.ScheduledEventBatchId + } + return 0 +} + +func (x *ActivityInfo) GetScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduledTime + } + return nil +} + +func (x *ActivityInfo) GetStartedEventId() int64 { + if x != nil { + return x.StartedEventId + } + return 0 +} + +func (x *ActivityInfo) GetStartedTime() *timestamppb.Timestamp { + if x != nil { + return x.StartedTime + } + return nil +} + +func (x *ActivityInfo) GetActivityId() string { + if x != nil { + return x.ActivityId + } + return "" +} + +func (x *ActivityInfo) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *ActivityInfo) GetScheduleToStartTimeout() *durationpb.Duration { + if x != nil { + return x.ScheduleToStartTimeout + } + return nil +} + +func (x *ActivityInfo) GetScheduleToCloseTimeout() *durationpb.Duration { + if x != nil { + return x.ScheduleToCloseTimeout + } + return nil +} + +func (x *ActivityInfo) GetStartToCloseTimeout() *durationpb.Duration { + if x != nil { + return x.StartToCloseTimeout + } + return nil +} + +func (x *ActivityInfo) GetHeartbeatTimeout() *durationpb.Duration { + if x != nil { + return x.HeartbeatTimeout + } + return nil +} + +func (x *ActivityInfo) GetCancelRequested() bool { + if x != nil { + return x.CancelRequested + } + return false +} + +func (x *ActivityInfo) GetCancelRequestId() int64 { + if x != nil { + return x.CancelRequestId + } + return 0 +} + +func (x *ActivityInfo) GetTimerTaskStatus() int32 { + if x != nil { + return x.TimerTaskStatus + } + return 0 +} + +func (x *ActivityInfo) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +func (x *ActivityInfo) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *ActivityInfo) GetStartedIdentity() string { + if x != nil { + return x.StartedIdentity + } + return "" +} + +func (x *ActivityInfo) GetHasRetryPolicy() bool { + if x != nil { + return x.HasRetryPolicy + } + return false +} + +func (x *ActivityInfo) GetRetryInitialInterval() *durationpb.Duration { + if x != nil { + return x.RetryInitialInterval + } + return nil +} + +func (x *ActivityInfo) GetRetryMaximumInterval() *durationpb.Duration { + if x != nil { + return x.RetryMaximumInterval + } + return nil +} + +func (x *ActivityInfo) GetRetryMaximumAttempts() int32 { + if x != nil { + return x.RetryMaximumAttempts + } + return 0 +} + +func (x *ActivityInfo) GetRetryExpirationTime() *timestamppb.Timestamp { + if x != nil { + return x.RetryExpirationTime + } + return nil +} + +func (x *ActivityInfo) GetRetryBackoffCoefficient() float64 { + if x != nil { + return x.RetryBackoffCoefficient + } + return 0 +} + +func (x *ActivityInfo) GetRetryNonRetryableErrorTypes() []string { + if x != nil { + return x.RetryNonRetryableErrorTypes + } + return nil +} + +func (x *ActivityInfo) GetRetryLastFailure() *v110.Failure { + if x != nil { + return x.RetryLastFailure + } + return nil +} + +func (x *ActivityInfo) GetRetryLastWorkerIdentity() string { + if x != nil { + return x.RetryLastWorkerIdentity + } + return "" +} + +func (x *ActivityInfo) GetScheduledEventId() int64 { + if x != nil { + return x.ScheduledEventId + } + return 0 +} + +func (x *ActivityInfo) GetLastHeartbeatDetails() *v13.Payloads { + if x != nil { + return x.LastHeartbeatDetails + } + return nil +} + +func (x *ActivityInfo) GetLastHeartbeatUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.LastHeartbeatUpdateTime + } + return nil +} + +func (x *ActivityInfo) GetUseCompatibleVersion() bool { + if x != nil { + return x.UseCompatibleVersion + } + return false +} + +func (x *ActivityInfo) GetActivityType() *v13.ActivityType { + if x != nil { + return x.ActivityType + } + return nil +} + +func (x *ActivityInfo) GetBuildIdInfo() isActivityInfo_BuildIdInfo { + if x != nil { + return x.BuildIdInfo + } + return nil +} + +func (x *ActivityInfo) GetUseWorkflowBuildIdInfo() *ActivityInfo_UseWorkflowBuildIdInfo { + if x != nil { + if x, ok := x.BuildIdInfo.(*ActivityInfo_UseWorkflowBuildIdInfo_); ok { + return x.UseWorkflowBuildIdInfo + } + } + return nil +} + +func (x *ActivityInfo) GetLastIndependentlyAssignedBuildId() string { + if x != nil { + if x, ok := x.BuildIdInfo.(*ActivityInfo_LastIndependentlyAssignedBuildId); ok { + return x.LastIndependentlyAssignedBuildId + } + } + return "" +} + +func (x *ActivityInfo) GetLastWorkerVersionStamp() *v13.WorkerVersionStamp { + if x != nil { + return x.LastWorkerVersionStamp + } + return nil +} + +func (x *ActivityInfo) GetLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.LastUpdateVersionedTransition + } + return nil +} + +func (x *ActivityInfo) GetFirstScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.FirstScheduledTime + } + return nil +} + +func (x *ActivityInfo) GetLastAttemptCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.LastAttemptCompleteTime + } + return nil +} + +func (x *ActivityInfo) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +func (x *ActivityInfo) GetPaused() bool { + if x != nil { + return x.Paused + } + return false +} + +func (x *ActivityInfo) GetLastStartedDeployment() *v18.Deployment { + if x != nil { + return x.LastStartedDeployment + } + return nil +} + +func (x *ActivityInfo) GetLastWorkerDeploymentVersion() string { + if x != nil { + return x.LastWorkerDeploymentVersion + } + return "" +} + +func (x *ActivityInfo) GetLastDeploymentVersion() *v18.WorkerDeploymentVersion { + if x != nil { + return x.LastDeploymentVersion + } + return nil +} + +func (x *ActivityInfo) GetPriority() *v13.Priority { + if x != nil { + return x.Priority + } + return nil +} + +func (x *ActivityInfo) GetPauseInfo() *ActivityInfo_PauseInfo { + if x != nil { + return x.PauseInfo + } + return nil +} + +func (x *ActivityInfo) GetActivityReset() bool { + if x != nil { + return x.ActivityReset + } + return false +} + +func (x *ActivityInfo) GetResetHeartbeats() bool { + if x != nil { + return x.ResetHeartbeats + } + return false +} + +func (x *ActivityInfo) GetStartVersion() int64 { + if x != nil { + return x.StartVersion + } + return 0 +} + +func (x *ActivityInfo) GetWorkerControlTaskQueue() string { + if x != nil { + return x.WorkerControlTaskQueue + } + return "" +} + +func (x *ActivityInfo) GetStartedClock() *v15.VectorClock { + if x != nil { + return x.StartedClock + } + return nil +} + +type isActivityInfo_BuildIdInfo interface { + isActivityInfo_BuildIdInfo() +} + +type ActivityInfo_UseWorkflowBuildIdInfo_ struct { + // When present, it means this activity is assigned to the build ID of its workflow. + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + UseWorkflowBuildIdInfo *ActivityInfo_UseWorkflowBuildIdInfo `protobuf:"bytes,35,opt,name=use_workflow_build_id_info,json=useWorkflowBuildIdInfo,proto3,oneof"` +} + +type ActivityInfo_LastIndependentlyAssignedBuildId struct { + // This means the activity is independently versioned and not bound to the build ID of its workflow. + // If the task fails and is scheduled again, the assigned build ID may change according to the latest versioning + // rules. This value also updates if a redirect rule is applied to the activity task to reflect the build ID + // of the worker who received the task. + // Deprecated. Clean up with versioning-2. [cleanup-old-wv] + LastIndependentlyAssignedBuildId string `protobuf:"bytes,36,opt,name=last_independently_assigned_build_id,json=lastIndependentlyAssignedBuildId,proto3,oneof"` +} + +func (*ActivityInfo_UseWorkflowBuildIdInfo_) isActivityInfo_BuildIdInfo() {} + +func (*ActivityInfo_LastIndependentlyAssignedBuildId) isActivityInfo_BuildIdInfo() {} + +// timer_map column +type TimerInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + StartedEventId int64 `protobuf:"varint,2,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` + ExpiryTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiry_time,json=expiryTime,proto3" json:"expiry_time,omitempty"` + TaskStatus int64 `protobuf:"varint,4,opt,name=task_status,json=taskStatus,proto3" json:"task_status,omitempty"` + // timerId serves the purpose of indicating whether a timer task is generated for this timer info. + TimerId string `protobuf:"bytes,5,opt,name=timer_id,json=timerId,proto3" json:"timer_id,omitempty"` + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,6,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TimerInfo) Reset() { + *x = TimerInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TimerInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TimerInfo) ProtoMessage() {} + +func (x *TimerInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TimerInfo.ProtoReflect.Descriptor instead. +func (*TimerInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{18} +} + +func (x *TimerInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *TimerInfo) GetStartedEventId() int64 { + if x != nil { + return x.StartedEventId + } + return 0 +} + +func (x *TimerInfo) GetExpiryTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpiryTime + } + return nil +} + +func (x *TimerInfo) GetTaskStatus() int64 { + if x != nil { + return x.TaskStatus + } + return 0 +} + +func (x *TimerInfo) GetTimerId() string { + if x != nil { + return x.TimerId + } + return "" +} + +func (x *TimerInfo) GetLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.LastUpdateVersionedTransition + } + return nil +} + +// child_executions_map column +type ChildExecutionInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + InitiatedEventBatchId int64 `protobuf:"varint,2,opt,name=initiated_event_batch_id,json=initiatedEventBatchId,proto3" json:"initiated_event_batch_id,omitempty"` + StartedEventId int64 `protobuf:"varint,3,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` + StartedWorkflowId string `protobuf:"bytes,5,opt,name=started_workflow_id,json=startedWorkflowId,proto3" json:"started_workflow_id,omitempty"` + StartedRunId string `protobuf:"bytes,6,opt,name=started_run_id,json=startedRunId,proto3" json:"started_run_id,omitempty"` + CreateRequestId string `protobuf:"bytes,8,opt,name=create_request_id,json=createRequestId,proto3" json:"create_request_id,omitempty"` + Namespace string `protobuf:"bytes,9,opt,name=namespace,proto3" json:"namespace,omitempty"` + WorkflowTypeName string `protobuf:"bytes,10,opt,name=workflow_type_name,json=workflowTypeName,proto3" json:"workflow_type_name,omitempty"` + ParentClosePolicy v11.ParentClosePolicy `protobuf:"varint,11,opt,name=parent_close_policy,json=parentClosePolicy,proto3,enum=temporal.api.enums.v1.ParentClosePolicy" json:"parent_close_policy,omitempty"` + InitiatedEventId int64 `protobuf:"varint,12,opt,name=initiated_event_id,json=initiatedEventId,proto3" json:"initiated_event_id,omitempty"` + Clock *v15.VectorClock `protobuf:"bytes,13,opt,name=clock,proto3" json:"clock,omitempty"` + NamespaceId string `protobuf:"bytes,14,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,15,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + Priority *v13.Priority `protobuf:"bytes,16,opt,name=priority,proto3" json:"priority,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ChildExecutionInfo) Reset() { + *x = ChildExecutionInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ChildExecutionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ChildExecutionInfo) ProtoMessage() {} + +func (x *ChildExecutionInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ChildExecutionInfo.ProtoReflect.Descriptor instead. +func (*ChildExecutionInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{19} +} + +func (x *ChildExecutionInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *ChildExecutionInfo) GetInitiatedEventBatchId() int64 { + if x != nil { + return x.InitiatedEventBatchId + } + return 0 +} + +func (x *ChildExecutionInfo) GetStartedEventId() int64 { + if x != nil { + return x.StartedEventId + } + return 0 +} + +func (x *ChildExecutionInfo) GetStartedWorkflowId() string { + if x != nil { + return x.StartedWorkflowId + } + return "" +} + +func (x *ChildExecutionInfo) GetStartedRunId() string { + if x != nil { + return x.StartedRunId + } + return "" +} + +func (x *ChildExecutionInfo) GetCreateRequestId() string { + if x != nil { + return x.CreateRequestId + } + return "" +} + +func (x *ChildExecutionInfo) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ChildExecutionInfo) GetWorkflowTypeName() string { + if x != nil { + return x.WorkflowTypeName + } + return "" +} + +func (x *ChildExecutionInfo) GetParentClosePolicy() v11.ParentClosePolicy { + if x != nil { + return x.ParentClosePolicy + } + return v11.ParentClosePolicy(0) +} + +func (x *ChildExecutionInfo) GetInitiatedEventId() int64 { + if x != nil { + return x.InitiatedEventId + } + return 0 +} + +func (x *ChildExecutionInfo) GetClock() *v15.VectorClock { + if x != nil { + return x.Clock + } + return nil +} + +func (x *ChildExecutionInfo) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ChildExecutionInfo) GetLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.LastUpdateVersionedTransition + } + return nil +} + +func (x *ChildExecutionInfo) GetPriority() *v13.Priority { + if x != nil { + return x.Priority + } + return nil +} + +// request_cancel_map column +type RequestCancelInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + InitiatedEventBatchId int64 `protobuf:"varint,2,opt,name=initiated_event_batch_id,json=initiatedEventBatchId,proto3" json:"initiated_event_batch_id,omitempty"` + CancelRequestId string `protobuf:"bytes,3,opt,name=cancel_request_id,json=cancelRequestId,proto3" json:"cancel_request_id,omitempty"` + InitiatedEventId int64 `protobuf:"varint,4,opt,name=initiated_event_id,json=initiatedEventId,proto3" json:"initiated_event_id,omitempty"` + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,5,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestCancelInfo) Reset() { + *x = RequestCancelInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestCancelInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestCancelInfo) ProtoMessage() {} + +func (x *RequestCancelInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestCancelInfo.ProtoReflect.Descriptor instead. +func (*RequestCancelInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{20} +} + +func (x *RequestCancelInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *RequestCancelInfo) GetInitiatedEventBatchId() int64 { + if x != nil { + return x.InitiatedEventBatchId + } + return 0 +} + +func (x *RequestCancelInfo) GetCancelRequestId() string { + if x != nil { + return x.CancelRequestId + } + return "" +} + +func (x *RequestCancelInfo) GetInitiatedEventId() int64 { + if x != nil { + return x.InitiatedEventId + } + return 0 +} + +func (x *RequestCancelInfo) GetLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.LastUpdateVersionedTransition + } + return nil +} + +// signal_map column +type SignalInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + InitiatedEventBatchId int64 `protobuf:"varint,2,opt,name=initiated_event_batch_id,json=initiatedEventBatchId,proto3" json:"initiated_event_batch_id,omitempty"` + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + InitiatedEventId int64 `protobuf:"varint,7,opt,name=initiated_event_id,json=initiatedEventId,proto3" json:"initiated_event_id,omitempty"` + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,9,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SignalInfo) Reset() { + *x = SignalInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SignalInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignalInfo) ProtoMessage() {} + +func (x *SignalInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignalInfo.ProtoReflect.Descriptor instead. +func (*SignalInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{21} +} + +func (x *SignalInfo) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +func (x *SignalInfo) GetInitiatedEventBatchId() int64 { if x != nil { - return x.Version + return x.InitiatedEventBatchId } return 0 } -func (x *TimerTaskInfo) GetScheduleAttempt() int32 { +func (x *SignalInfo) GetRequestId() string { if x != nil { - return x.ScheduleAttempt + return x.RequestId } - return 0 + return "" } -func (x *TimerTaskInfo) GetEventId() int64 { +func (x *SignalInfo) GetInitiatedEventId() int64 { if x != nil { - return x.EventId + return x.InitiatedEventId } return 0 } -func (x *TimerTaskInfo) GetTaskId() int64 { +func (x *SignalInfo) GetLastUpdateVersionedTransition() *VersionedTransition { if x != nil { - return x.TaskId + return x.LastUpdateVersionedTransition } - return 0 + return nil } -func (x *TimerTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { +// checksum column +type Checksum struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Flavor v1.ChecksumFlavor `protobuf:"varint,2,opt,name=flavor,proto3,enum=temporal.server.api.enums.v1.ChecksumFlavor" json:"flavor,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Checksum) Reset() { + *x = Checksum{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Checksum) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Checksum) ProtoMessage() {} + +func (x *Checksum) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[22] if x != nil { - return x.VisibilityTime + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (x *TimerTaskInfo) GetBranchToken() []byte { +// Deprecated: Use Checksum.ProtoReflect.Descriptor instead. +func (*Checksum) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{22} +} + +func (x *Checksum) GetVersion() int32 { if x != nil { - return x.BranchToken + return x.Version } - return nil + return 0 } -func (x *TimerTaskInfo) GetAlreadyArchived() bool { +func (x *Checksum) GetFlavor() v1.ChecksumFlavor { if x != nil { - return x.AlreadyArchived + return x.Flavor } - return false + return v1.ChecksumFlavor(0) } -func (x *TimerTaskInfo) GetStateMachineInfo() *StateMachineTaskInfo { +func (x *Checksum) GetValue() []byte { if x != nil { - return x.StateMachineInfo + return x.Value } return nil } -type ArchivalTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Callback struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Variant: + // + // *Callback_Nexus_ + // *Callback_Hsm + Variant isCallback_Variant `protobuf_oneof:"variant"` + Links []*v13.Link `protobuf:"bytes,100,rep,name=links,proto3" json:"links,omitempty"` unknownFields protoimpl.UnknownFields - - TaskId int64 `protobuf:"varint,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,4,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - TaskType v1.TaskType `protobuf:"varint,5,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` - Version int64 `protobuf:"varint,6,opt,name=version,proto3" json:"version,omitempty"` - VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *ArchivalTaskInfo) Reset() { - *x = ArchivalTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *Callback) Reset() { + *x = Callback{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ArchivalTaskInfo) String() string { +func (x *Callback) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ArchivalTaskInfo) ProtoMessage() {} +func (*Callback) ProtoMessage() {} -func (x *ArchivalTaskInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { +func (x *Callback) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[23] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1522,96 +3729,89 @@ func (x *ArchivalTaskInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ArchivalTaskInfo.ProtoReflect.Descriptor instead. -func (*ArchivalTaskInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{8} +// Deprecated: Use Callback.ProtoReflect.Descriptor instead. +func (*Callback) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{23} } -func (x *ArchivalTaskInfo) GetTaskId() int64 { +func (x *Callback) GetVariant() isCallback_Variant { if x != nil { - return x.TaskId + return x.Variant } - return 0 + return nil } -func (x *ArchivalTaskInfo) GetNamespaceId() string { +func (x *Callback) GetNexus() *Callback_Nexus { if x != nil { - return x.NamespaceId + if x, ok := x.Variant.(*Callback_Nexus_); ok { + return x.Nexus + } } - return "" + return nil } -func (x *ArchivalTaskInfo) GetWorkflowId() string { +func (x *Callback) GetHsm() *Callback_HSM { if x != nil { - return x.WorkflowId + if x, ok := x.Variant.(*Callback_Hsm); ok { + return x.Hsm + } } - return "" + return nil } -func (x *ArchivalTaskInfo) GetRunId() string { +func (x *Callback) GetLinks() []*v13.Link { if x != nil { - return x.RunId + return x.Links } - return "" + return nil } -func (x *ArchivalTaskInfo) GetTaskType() v1.TaskType { - if x != nil { - return x.TaskType - } - return v1.TaskType(0) +type isCallback_Variant interface { + isCallback_Variant() } -func (x *ArchivalTaskInfo) GetVersion() int64 { - if x != nil { - return x.Version - } - return 0 +type Callback_Nexus_ struct { + Nexus *Callback_Nexus `protobuf:"bytes,2,opt,name=nexus,proto3,oneof"` } -func (x *ArchivalTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { - if x != nil { - return x.VisibilityTime - } - return nil +type Callback_Hsm struct { + Hsm *Callback_HSM `protobuf:"bytes,3,opt,name=hsm,proto3,oneof"` } -type OutboundTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (*Callback_Nexus_) isCallback_Variant() {} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` - TaskId int64 `protobuf:"varint,5,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` - // Destination of this task (e.g. protocol+host+port for callbacks). - // Outbound tasks are grouped by this field (and the namespace ID) when scheduling. - Destination string `protobuf:"bytes,7,opt,name=destination,proto3" json:"destination,omitempty"` - // If task addresses a sub-statemachine (e.g. callback), this field will be set. - StateMachineInfo *StateMachineTaskInfo `protobuf:"bytes,8,opt,name=state_machine_info,json=stateMachineInfo,proto3" json:"state_machine_info,omitempty"` +func (*Callback_Hsm) isCallback_Variant() {} + +type HSMCompletionCallbackArg struct { + state protoimpl.MessageState `protogen:"open.v1"` + // namespace ID of the workflow that just completed. + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // ID of the workflow that just completed. + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + // run ID of the workflow that just completed. + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + // Last event of the completed workflow. + LastEvent *v17.HistoryEvent `protobuf:"bytes,4,opt,name=last_event,json=lastEvent,proto3" json:"last_event,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *OutboundTaskInfo) Reset() { - *x = OutboundTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *HSMCompletionCallbackArg) Reset() { + *x = HSMCompletionCallbackArg{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *OutboundTaskInfo) String() string { +func (x *HSMCompletionCallbackArg) String() string { return protoimpl.X.MessageStringOf(x) } -func (*OutboundTaskInfo) ProtoMessage() {} +func (*HSMCompletionCallbackArg) ProtoMessage() {} -func (x *OutboundTaskInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { +func (x *HSMCompletionCallbackArg) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[24] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1621,396 +3821,483 @@ func (x *OutboundTaskInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use OutboundTaskInfo.ProtoReflect.Descriptor instead. -func (*OutboundTaskInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{9} +// Deprecated: Use HSMCompletionCallbackArg.ProtoReflect.Descriptor instead. +func (*HSMCompletionCallbackArg) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{24} } -func (x *OutboundTaskInfo) GetNamespaceId() string { +func (x *HSMCompletionCallbackArg) GetNamespaceId() string { if x != nil { return x.NamespaceId } return "" } -func (x *OutboundTaskInfo) GetWorkflowId() string { +func (x *HSMCompletionCallbackArg) GetWorkflowId() string { if x != nil { return x.WorkflowId } return "" } -func (x *OutboundTaskInfo) GetRunId() string { +func (x *HSMCompletionCallbackArg) GetRunId() string { if x != nil { return x.RunId } return "" } -func (x *OutboundTaskInfo) GetTaskType() v1.TaskType { +func (x *HSMCompletionCallbackArg) GetLastEvent() *v17.HistoryEvent { if x != nil { - return x.TaskType + return x.LastEvent } - return v1.TaskType(0) + return nil } -func (x *OutboundTaskInfo) GetTaskId() int64 { - if x != nil { - return x.TaskId - } - return 0 +type CallbackInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Information on how this callback should be invoked (e.g. its URL and type). + Callback *Callback `protobuf:"bytes,1,opt,name=callback,proto3" json:"callback,omitempty"` + // Trigger for this callback. + Trigger *CallbackInfo_Trigger `protobuf:"bytes,2,opt,name=trigger,proto3" json:"trigger,omitempty"` + // The time when the callback was registered. + RegistrationTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=registration_time,json=registrationTime,proto3" json:"registration_time,omitempty"` + State v1.CallbackState `protobuf:"varint,4,opt,name=state,proto3,enum=temporal.server.api.enums.v1.CallbackState" json:"state,omitempty"` + // The number of attempts made to deliver the callback. + // This number represents a minimum bound since the attempt is incremented after the callback request completes. + Attempt int32 `protobuf:"varint,5,opt,name=attempt,proto3" json:"attempt,omitempty"` + // The time when the last attempt completed. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // The last attempt's failure, if any. + LastAttemptFailure *v110.Failure `protobuf:"bytes,7,opt,name=last_attempt_failure,json=lastAttemptFailure,proto3" json:"last_attempt_failure,omitempty"` + // The time when the next attempt is scheduled. + NextAttemptScheduleTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=next_attempt_schedule_time,json=nextAttemptScheduleTime,proto3" json:"next_attempt_schedule_time,omitempty"` + // Request ID that added the callback. + RequestId string `protobuf:"bytes,9,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *OutboundTaskInfo) GetVisibilityTime() *timestamppb.Timestamp { - if x != nil { - return x.VisibilityTime - } - return nil +func (x *CallbackInfo) Reset() { + *x = CallbackInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *OutboundTaskInfo) GetDestination() string { - if x != nil { - return x.Destination - } - return "" +func (x *CallbackInfo) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *OutboundTaskInfo) GetStateMachineInfo() *StateMachineTaskInfo { +func (*CallbackInfo) ProtoMessage() {} + +func (x *CallbackInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[25] if x != nil { - return x.StateMachineInfo + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -// activity_map column -type ActivityInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - ScheduledEventBatchId int64 `protobuf:"varint,2,opt,name=scheduled_event_batch_id,json=scheduledEventBatchId,proto3" json:"scheduled_event_batch_id,omitempty"` - ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` - StartedEventId int64 `protobuf:"varint,5,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` - StartedTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` - ActivityId string `protobuf:"bytes,8,opt,name=activity_id,json=activityId,proto3" json:"activity_id,omitempty"` - RequestId string `protobuf:"bytes,9,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - // (-- api-linter: core::0140::prepositions=disabled - // - // aip.dev/not-precedent: "to" is used to indicate interval. --) - ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,10,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` - // (-- api-linter: core::0140::prepositions=disabled - // - // aip.dev/not-precedent: "to" is used to indicate interval. --) - ScheduleToCloseTimeout *durationpb.Duration `protobuf:"bytes,11,opt,name=schedule_to_close_timeout,json=scheduleToCloseTimeout,proto3" json:"schedule_to_close_timeout,omitempty"` - // (-- api-linter: core::0140::prepositions=disabled - // - // aip.dev/not-precedent: "to" is used to indicate interval. --) - StartToCloseTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3" json:"start_to_close_timeout,omitempty"` - HeartbeatTimeout *durationpb.Duration `protobuf:"bytes,13,opt,name=heartbeat_timeout,json=heartbeatTimeout,proto3" json:"heartbeat_timeout,omitempty"` - CancelRequested bool `protobuf:"varint,14,opt,name=cancel_requested,json=cancelRequested,proto3" json:"cancel_requested,omitempty"` - CancelRequestId int64 `protobuf:"varint,15,opt,name=cancel_request_id,json=cancelRequestId,proto3" json:"cancel_request_id,omitempty"` - TimerTaskStatus int32 `protobuf:"varint,16,opt,name=timer_task_status,json=timerTaskStatus,proto3" json:"timer_task_status,omitempty"` - Attempt int32 `protobuf:"varint,17,opt,name=attempt,proto3" json:"attempt,omitempty"` - TaskQueue string `protobuf:"bytes,18,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - StartedIdentity string `protobuf:"bytes,19,opt,name=started_identity,json=startedIdentity,proto3" json:"started_identity,omitempty"` - HasRetryPolicy bool `protobuf:"varint,20,opt,name=has_retry_policy,json=hasRetryPolicy,proto3" json:"has_retry_policy,omitempty"` - RetryInitialInterval *durationpb.Duration `protobuf:"bytes,21,opt,name=retry_initial_interval,json=retryInitialInterval,proto3" json:"retry_initial_interval,omitempty"` - RetryMaximumInterval *durationpb.Duration `protobuf:"bytes,22,opt,name=retry_maximum_interval,json=retryMaximumInterval,proto3" json:"retry_maximum_interval,omitempty"` - RetryMaximumAttempts int32 `protobuf:"varint,23,opt,name=retry_maximum_attempts,json=retryMaximumAttempts,proto3" json:"retry_maximum_attempts,omitempty"` - RetryExpirationTime *timestamppb.Timestamp `protobuf:"bytes,24,opt,name=retry_expiration_time,json=retryExpirationTime,proto3" json:"retry_expiration_time,omitempty"` - RetryBackoffCoefficient float64 `protobuf:"fixed64,25,opt,name=retry_backoff_coefficient,json=retryBackoffCoefficient,proto3" json:"retry_backoff_coefficient,omitempty"` - RetryNonRetryableErrorTypes []string `protobuf:"bytes,26,rep,name=retry_non_retryable_error_types,json=retryNonRetryableErrorTypes,proto3" json:"retry_non_retryable_error_types,omitempty"` - RetryLastFailure *v18.Failure `protobuf:"bytes,27,opt,name=retry_last_failure,json=retryLastFailure,proto3" json:"retry_last_failure,omitempty"` - RetryLastWorkerIdentity string `protobuf:"bytes,28,opt,name=retry_last_worker_identity,json=retryLastWorkerIdentity,proto3" json:"retry_last_worker_identity,omitempty"` - ScheduledEventId int64 `protobuf:"varint,30,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` - LastHeartbeatDetails *v12.Payloads `protobuf:"bytes,31,opt,name=last_heartbeat_details,json=lastHeartbeatDetails,proto3" json:"last_heartbeat_details,omitempty"` - LastHeartbeatUpdateTime *timestamppb.Timestamp `protobuf:"bytes,32,opt,name=last_heartbeat_update_time,json=lastHeartbeatUpdateTime,proto3" json:"last_heartbeat_update_time,omitempty"` - UseCompatibleVersion bool `protobuf:"varint,33,opt,name=use_compatible_version,json=useCompatibleVersion,proto3" json:"use_compatible_version,omitempty"` - ActivityType *v12.ActivityType `protobuf:"bytes,34,opt,name=activity_type,json=activityType,proto3" json:"activity_type,omitempty"` +// Deprecated: Use CallbackInfo.ProtoReflect.Descriptor instead. +func (*CallbackInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{25} } -func (x *ActivityInfo) Reset() { - *x = ActivityInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *CallbackInfo) GetCallback() *Callback { + if x != nil { + return x.Callback } + return nil } -func (x *ActivityInfo) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *CallbackInfo) GetTrigger() *CallbackInfo_Trigger { + if x != nil { + return x.Trigger + } + return nil } -func (*ActivityInfo) ProtoMessage() {} - -func (x *ActivityInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *CallbackInfo) GetRegistrationTime() *timestamppb.Timestamp { + if x != nil { + return x.RegistrationTime } - return mi.MessageOf(x) + return nil } -// Deprecated: Use ActivityInfo.ProtoReflect.Descriptor instead. -func (*ActivityInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{10} +func (x *CallbackInfo) GetState() v1.CallbackState { + if x != nil { + return x.State + } + return v1.CallbackState(0) } -func (x *ActivityInfo) GetVersion() int64 { +func (x *CallbackInfo) GetAttempt() int32 { if x != nil { - return x.Version + return x.Attempt } return 0 } -func (x *ActivityInfo) GetScheduledEventBatchId() int64 { +func (x *CallbackInfo) GetLastAttemptCompleteTime() *timestamppb.Timestamp { if x != nil { - return x.ScheduledEventBatchId + return x.LastAttemptCompleteTime } - return 0 + return nil } -func (x *ActivityInfo) GetScheduledTime() *timestamppb.Timestamp { +func (x *CallbackInfo) GetLastAttemptFailure() *v110.Failure { if x != nil { - return x.ScheduledTime + return x.LastAttemptFailure } return nil } -func (x *ActivityInfo) GetStartedEventId() int64 { +func (x *CallbackInfo) GetNextAttemptScheduleTime() *timestamppb.Timestamp { if x != nil { - return x.StartedEventId + return x.NextAttemptScheduleTime } - return 0 + return nil } -func (x *ActivityInfo) GetStartedTime() *timestamppb.Timestamp { +func (x *CallbackInfo) GetRequestId() string { if x != nil { - return x.StartedTime + return x.RequestId } - return nil + return "" } -func (x *ActivityInfo) GetActivityId() string { +// NexusOperationInfo contains the state of a nexus operation. +type NexusOperationInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Endpoint name. + // Resolved the endpoint registry for this workflow's namespace. + Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // Service name. + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + // Operation name. + Operation string `protobuf:"bytes,3,opt,name=operation,proto3" json:"operation,omitempty"` + // Token for fetching the scheduled event. + ScheduledEventToken []byte `protobuf:"bytes,5,opt,name=scheduled_event_token,json=scheduledEventToken,proto3" json:"scheduled_event_token,omitempty"` + // Operation token. Only set for asynchronous operations after a successful StartOperation call. + OperationToken string `protobuf:"bytes,6,opt,name=operation_token,json=operationToken,proto3" json:"operation_token,omitempty"` + // Schedule-to-close timeout for this operation. + // This is the only timeout settable by a workflow. + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "since" is needed here. --) + ScheduleToCloseTimeout *durationpb.Duration `protobuf:"bytes,7,opt,name=schedule_to_close_timeout,json=scheduleToCloseTimeout,proto3" json:"schedule_to_close_timeout,omitempty"` + // The time when the operation was scheduled. + ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + // Unique request ID allocated for all retry attempts of the StartOperation request. + RequestId string `protobuf:"bytes,9,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + State v1.NexusOperationState `protobuf:"varint,10,opt,name=state,proto3,enum=temporal.server.api.enums.v1.NexusOperationState" json:"state,omitempty"` + // The number of attempts made to deliver the start operation request. + // This number represents a minimum bound since the attempt is incremented after the request completes. + Attempt int32 `protobuf:"varint,11,opt,name=attempt,proto3" json:"attempt,omitempty"` + // The time when the last attempt completed. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // The last attempt's failure, if any. + LastAttemptFailure *v110.Failure `protobuf:"bytes,13,opt,name=last_attempt_failure,json=lastAttemptFailure,proto3" json:"last_attempt_failure,omitempty"` + // The time when the next attempt is scheduled. + NextAttemptScheduleTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=next_attempt_schedule_time,json=nextAttemptScheduleTime,proto3" json:"next_attempt_schedule_time,omitempty"` + // Endpoint ID, the name is also stored here (field 1) but we use the ID internally to avoid failing operation + // requests when an endpoint is renamed. + EndpointId string `protobuf:"bytes,15,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"` + // Schedule-to-start timeout for this operation. + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,16,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` + // Start-to-close timeout for this operation. + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + StartToCloseTimeout *durationpb.Duration `protobuf:"bytes,17,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3" json:"start_to_close_timeout,omitempty"` + // Time the operation was started (only available for async operations). + StartedTime *timestamppb.Timestamp `protobuf:"bytes,18,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusOperationInfo) Reset() { + *x = NexusOperationInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusOperationInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusOperationInfo) ProtoMessage() {} + +func (x *NexusOperationInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[26] if x != nil { - return x.ActivityId + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusOperationInfo.ProtoReflect.Descriptor instead. +func (*NexusOperationInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{26} +} + +func (x *NexusOperationInfo) GetEndpoint() string { + if x != nil { + return x.Endpoint } return "" } -func (x *ActivityInfo) GetRequestId() string { +func (x *NexusOperationInfo) GetService() string { if x != nil { - return x.RequestId + return x.Service } return "" } -func (x *ActivityInfo) GetScheduleToStartTimeout() *durationpb.Duration { +func (x *NexusOperationInfo) GetOperation() string { if x != nil { - return x.ScheduleToStartTimeout + return x.Operation } - return nil + return "" } -func (x *ActivityInfo) GetScheduleToCloseTimeout() *durationpb.Duration { +func (x *NexusOperationInfo) GetScheduledEventToken() []byte { if x != nil { - return x.ScheduleToCloseTimeout + return x.ScheduledEventToken } return nil } -func (x *ActivityInfo) GetStartToCloseTimeout() *durationpb.Duration { +func (x *NexusOperationInfo) GetOperationToken() string { if x != nil { - return x.StartToCloseTimeout + return x.OperationToken } - return nil + return "" } -func (x *ActivityInfo) GetHeartbeatTimeout() *durationpb.Duration { +func (x *NexusOperationInfo) GetScheduleToCloseTimeout() *durationpb.Duration { if x != nil { - return x.HeartbeatTimeout + return x.ScheduleToCloseTimeout } return nil } -func (x *ActivityInfo) GetCancelRequested() bool { +func (x *NexusOperationInfo) GetScheduledTime() *timestamppb.Timestamp { if x != nil { - return x.CancelRequested + return x.ScheduledTime } - return false + return nil } -func (x *ActivityInfo) GetCancelRequestId() int64 { +func (x *NexusOperationInfo) GetRequestId() string { if x != nil { - return x.CancelRequestId + return x.RequestId } - return 0 + return "" } -func (x *ActivityInfo) GetTimerTaskStatus() int32 { +func (x *NexusOperationInfo) GetState() v1.NexusOperationState { if x != nil { - return x.TimerTaskStatus + return x.State } - return 0 + return v1.NexusOperationState(0) } -func (x *ActivityInfo) GetAttempt() int32 { +func (x *NexusOperationInfo) GetAttempt() int32 { if x != nil { return x.Attempt } return 0 } -func (x *ActivityInfo) GetTaskQueue() string { +func (x *NexusOperationInfo) GetLastAttemptCompleteTime() *timestamppb.Timestamp { if x != nil { - return x.TaskQueue + return x.LastAttemptCompleteTime } - return "" + return nil } -func (x *ActivityInfo) GetStartedIdentity() string { +func (x *NexusOperationInfo) GetLastAttemptFailure() *v110.Failure { if x != nil { - return x.StartedIdentity + return x.LastAttemptFailure } - return "" + return nil } -func (x *ActivityInfo) GetHasRetryPolicy() bool { +func (x *NexusOperationInfo) GetNextAttemptScheduleTime() *timestamppb.Timestamp { if x != nil { - return x.HasRetryPolicy + return x.NextAttemptScheduleTime } - return false + return nil } -func (x *ActivityInfo) GetRetryInitialInterval() *durationpb.Duration { +func (x *NexusOperationInfo) GetEndpointId() string { if x != nil { - return x.RetryInitialInterval + return x.EndpointId } - return nil + return "" } -func (x *ActivityInfo) GetRetryMaximumInterval() *durationpb.Duration { +func (x *NexusOperationInfo) GetScheduleToStartTimeout() *durationpb.Duration { if x != nil { - return x.RetryMaximumInterval + return x.ScheduleToStartTimeout } return nil } -func (x *ActivityInfo) GetRetryMaximumAttempts() int32 { +func (x *NexusOperationInfo) GetStartToCloseTimeout() *durationpb.Duration { if x != nil { - return x.RetryMaximumAttempts + return x.StartToCloseTimeout } - return 0 + return nil } -func (x *ActivityInfo) GetRetryExpirationTime() *timestamppb.Timestamp { +func (x *NexusOperationInfo) GetStartedTime() *timestamppb.Timestamp { if x != nil { - return x.RetryExpirationTime + return x.StartedTime } return nil } -func (x *ActivityInfo) GetRetryBackoffCoefficient() float64 { - if x != nil { - return x.RetryBackoffCoefficient - } - return 0 +// NexusOperationCancellationInfo contains the state of a nexus operation cancelation. +type NexusOperationCancellationInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The time when cancelation was requested. + RequestedTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=requested_time,json=requestedTime,proto3" json:"requested_time,omitempty"` + State v11.NexusOperationCancellationState `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.api.enums.v1.NexusOperationCancellationState" json:"state,omitempty"` + // The number of attempts made to deliver the cancel operation request. + // This number represents a minimum bound since the attempt is incremented after the request completes. + Attempt int32 `protobuf:"varint,3,opt,name=attempt,proto3" json:"attempt,omitempty"` + // The time when the last attempt completed. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // The last attempt's failure, if any. + LastAttemptFailure *v110.Failure `protobuf:"bytes,5,opt,name=last_attempt_failure,json=lastAttemptFailure,proto3" json:"last_attempt_failure,omitempty"` + // The time when the next attempt is scheduled. + NextAttemptScheduleTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=next_attempt_schedule_time,json=nextAttemptScheduleTime,proto3" json:"next_attempt_schedule_time,omitempty"` + // The event ID of the NEXUS_OPERATION_CANCEL_REQUESTED event for this cancelation. + RequestedEventId int64 `protobuf:"varint,7,opt,name=requested_event_id,json=requestedEventId,proto3" json:"requested_event_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusOperationCancellationInfo) Reset() { + *x = NexusOperationCancellationInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusOperationCancellationInfo) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *ActivityInfo) GetRetryNonRetryableErrorTypes() []string { +func (*NexusOperationCancellationInfo) ProtoMessage() {} + +func (x *NexusOperationCancellationInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[27] if x != nil { - return x.RetryNonRetryableErrorTypes + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) +} + +// Deprecated: Use NexusOperationCancellationInfo.ProtoReflect.Descriptor instead. +func (*NexusOperationCancellationInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{27} } -func (x *ActivityInfo) GetRetryLastFailure() *v18.Failure { +func (x *NexusOperationCancellationInfo) GetRequestedTime() *timestamppb.Timestamp { if x != nil { - return x.RetryLastFailure + return x.RequestedTime } return nil } -func (x *ActivityInfo) GetRetryLastWorkerIdentity() string { +func (x *NexusOperationCancellationInfo) GetState() v11.NexusOperationCancellationState { if x != nil { - return x.RetryLastWorkerIdentity + return x.State } - return "" + return v11.NexusOperationCancellationState(0) } -func (x *ActivityInfo) GetScheduledEventId() int64 { +func (x *NexusOperationCancellationInfo) GetAttempt() int32 { if x != nil { - return x.ScheduledEventId + return x.Attempt } return 0 } -func (x *ActivityInfo) GetLastHeartbeatDetails() *v12.Payloads { +func (x *NexusOperationCancellationInfo) GetLastAttemptCompleteTime() *timestamppb.Timestamp { if x != nil { - return x.LastHeartbeatDetails + return x.LastAttemptCompleteTime } return nil } -func (x *ActivityInfo) GetLastHeartbeatUpdateTime() *timestamppb.Timestamp { +func (x *NexusOperationCancellationInfo) GetLastAttemptFailure() *v110.Failure { if x != nil { - return x.LastHeartbeatUpdateTime + return x.LastAttemptFailure } return nil } -func (x *ActivityInfo) GetUseCompatibleVersion() bool { +func (x *NexusOperationCancellationInfo) GetNextAttemptScheduleTime() *timestamppb.Timestamp { if x != nil { - return x.UseCompatibleVersion + return x.NextAttemptScheduleTime } - return false + return nil } -func (x *ActivityInfo) GetActivityType() *v12.ActivityType { +func (x *NexusOperationCancellationInfo) GetRequestedEventId() int64 { if x != nil { - return x.ActivityType + return x.RequestedEventId } - return nil + return 0 } -// timer_map column -type TimerInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - StartedEventId int64 `protobuf:"varint,2,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` - ExpiryTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expiry_time,json=expiryTime,proto3" json:"expiry_time,omitempty"` - TaskStatus int64 `protobuf:"varint,4,opt,name=task_status,json=taskStatus,proto3" json:"task_status,omitempty"` - // timerId serves the purpose of indicating whether a timer task is generated for this timer info. - TimerId string `protobuf:"bytes,5,opt,name=timer_id,json=timerId,proto3" json:"timer_id,omitempty"` +// ResetChildInfo contains the state and actions to be performed on children when a parent workflow resumes after reset. +type ResetChildInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // If true, the parent workflow should terminate the child before starting it. + ShouldTerminateAndStart bool `protobuf:"varint,1,opt,name=should_terminate_and_start,json=shouldTerminateAndStart,proto3" json:"should_terminate_and_start,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *TimerInfo) Reset() { - *x = TimerInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *ResetChildInfo) Reset() { + *x = ResetChildInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *TimerInfo) String() string { +func (x *ResetChildInfo) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TimerInfo) ProtoMessage() {} +func (*ResetChildInfo) ProtoMessage() {} -func (x *TimerInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ResetChildInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[28] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2020,84 +4307,115 @@ func (x *TimerInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use TimerInfo.ProtoReflect.Descriptor instead. -func (*TimerInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{11} +// Deprecated: Use ResetChildInfo.ProtoReflect.Descriptor instead. +func (*ResetChildInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{28} } -func (x *TimerInfo) GetVersion() int64 { +func (x *ResetChildInfo) GetShouldTerminateAndStart() bool { if x != nil { - return x.Version + return x.ShouldTerminateAndStart } - return 0 + return false } -func (x *TimerInfo) GetStartedEventId() int64 { +type WorkflowPauseInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The time when the workflow was paused. + PauseTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=pause_time,json=pauseTime,proto3" json:"pause_time,omitempty"` + // The identity of the actor that paused the workflow. + Identity string `protobuf:"bytes,2,opt,name=identity,proto3" json:"identity,omitempty"` + // The reason for pausing the workflow. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // A unique identifier for this pause request (for idempotency checks) + RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkflowPauseInfo) Reset() { + *x = WorkflowPauseInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkflowPauseInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowPauseInfo) ProtoMessage() {} + +func (x *WorkflowPauseInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[29] if x != nil { - return x.StartedEventId + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (x *TimerInfo) GetExpiryTime() *timestamppb.Timestamp { +// Deprecated: Use WorkflowPauseInfo.ProtoReflect.Descriptor instead. +func (*WorkflowPauseInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{29} +} + +func (x *WorkflowPauseInfo) GetPauseTime() *timestamppb.Timestamp { if x != nil { - return x.ExpiryTime + return x.PauseTime } return nil } -func (x *TimerInfo) GetTaskStatus() int64 { +func (x *WorkflowPauseInfo) GetIdentity() string { if x != nil { - return x.TaskStatus + return x.Identity } - return 0 + return "" } -func (x *TimerInfo) GetTimerId() string { +func (x *WorkflowPauseInfo) GetReason() string { if x != nil { - return x.TimerId + return x.Reason } return "" } -// child_executions_map column -type ChildExecutionInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *WorkflowPauseInfo) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - InitiatedEventBatchId int64 `protobuf:"varint,2,opt,name=initiated_event_batch_id,json=initiatedEventBatchId,proto3" json:"initiated_event_batch_id,omitempty"` - StartedEventId int64 `protobuf:"varint,3,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` - StartedWorkflowId string `protobuf:"bytes,5,opt,name=started_workflow_id,json=startedWorkflowId,proto3" json:"started_workflow_id,omitempty"` - StartedRunId string `protobuf:"bytes,6,opt,name=started_run_id,json=startedRunId,proto3" json:"started_run_id,omitempty"` - CreateRequestId string `protobuf:"bytes,8,opt,name=create_request_id,json=createRequestId,proto3" json:"create_request_id,omitempty"` - Namespace string `protobuf:"bytes,9,opt,name=namespace,proto3" json:"namespace,omitempty"` - WorkflowTypeName string `protobuf:"bytes,10,opt,name=workflow_type_name,json=workflowTypeName,proto3" json:"workflow_type_name,omitempty"` - ParentClosePolicy v17.ParentClosePolicy `protobuf:"varint,11,opt,name=parent_close_policy,json=parentClosePolicy,proto3,enum=temporal.api.enums.v1.ParentClosePolicy" json:"parent_close_policy,omitempty"` - InitiatedEventId int64 `protobuf:"varint,12,opt,name=initiated_event_id,json=initiatedEventId,proto3" json:"initiated_event_id,omitempty"` - Clock *v14.VectorClock `protobuf:"bytes,13,opt,name=clock,proto3" json:"clock,omitempty"` - NamespaceId string `protobuf:"bytes,14,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` +type TransferTaskInfo_CloseExecutionTaskDetails struct { + state protoimpl.MessageState `protogen:"open.v1"` + // can_skip_visibility_archival is set to true when we can guarantee that visibility records will be archived + // by some other task, so this task doesn't need to worry about it. + CanSkipVisibilityArchival bool `protobuf:"varint,1,opt,name=can_skip_visibility_archival,json=canSkipVisibilityArchival,proto3" json:"can_skip_visibility_archival,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ChildExecutionInfo) Reset() { - *x = ChildExecutionInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *TransferTaskInfo_CloseExecutionTaskDetails) Reset() { + *x = TransferTaskInfo_CloseExecutionTaskDetails{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ChildExecutionInfo) String() string { +func (x *TransferTaskInfo_CloseExecutionTaskDetails) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ChildExecutionInfo) ProtoMessage() {} +func (*TransferTaskInfo_CloseExecutionTaskDetails) ProtoMessage() {} -func (x *ChildExecutionInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { +func (x *TransferTaskInfo_CloseExecutionTaskDetails) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[38] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2107,125 +4425,193 @@ func (x *ChildExecutionInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ChildExecutionInfo.ProtoReflect.Descriptor instead. -func (*ChildExecutionInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{12} +// Deprecated: Use TransferTaskInfo_CloseExecutionTaskDetails.ProtoReflect.Descriptor instead. +func (*TransferTaskInfo_CloseExecutionTaskDetails) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{8, 0} } -func (x *ChildExecutionInfo) GetVersion() int64 { +func (x *TransferTaskInfo_CloseExecutionTaskDetails) GetCanSkipVisibilityArchival() bool { if x != nil { - return x.Version + return x.CanSkipVisibilityArchival } - return 0 + return false } -func (x *ChildExecutionInfo) GetInitiatedEventBatchId() int64 { +// Deprecated. Clean up with versioning-2. [cleanup-old-wv] +type ActivityInfo_UseWorkflowBuildIdInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // build ID of the wf when this activity started last time (which is the build ID of + // the worker who received this activity) + LastUsedBuildId string `protobuf:"bytes,1,opt,name=last_used_build_id,json=lastUsedBuildId,proto3" json:"last_used_build_id,omitempty"` + // workflows redirect_counter value when this activity started last time + LastRedirectCounter int64 `protobuf:"varint,2,opt,name=last_redirect_counter,json=lastRedirectCounter,proto3" json:"last_redirect_counter,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityInfo_UseWorkflowBuildIdInfo) Reset() { + *x = ActivityInfo_UseWorkflowBuildIdInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityInfo_UseWorkflowBuildIdInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityInfo_UseWorkflowBuildIdInfo) ProtoMessage() {} + +func (x *ActivityInfo_UseWorkflowBuildIdInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[39] if x != nil { - return x.InitiatedEventBatchId + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (x *ChildExecutionInfo) GetStartedEventId() int64 { - if x != nil { - return x.StartedEventId - } - return 0 +// Deprecated: Use ActivityInfo_UseWorkflowBuildIdInfo.ProtoReflect.Descriptor instead. +func (*ActivityInfo_UseWorkflowBuildIdInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{17, 0} } -func (x *ChildExecutionInfo) GetStartedWorkflowId() string { +func (x *ActivityInfo_UseWorkflowBuildIdInfo) GetLastUsedBuildId() string { if x != nil { - return x.StartedWorkflowId + return x.LastUsedBuildId } return "" } -func (x *ChildExecutionInfo) GetStartedRunId() string { +func (x *ActivityInfo_UseWorkflowBuildIdInfo) GetLastRedirectCounter() int64 { if x != nil { - return x.StartedRunId + return x.LastRedirectCounter } - return "" + return 0 } -func (x *ChildExecutionInfo) GetCreateRequestId() string { - if x != nil { - return x.CreateRequestId - } - return "" +type ActivityInfo_PauseInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The time when the activity was paused. + PauseTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=pause_time,json=pauseTime,proto3" json:"pause_time,omitempty"` + // Types that are valid to be assigned to PausedBy: + // + // *ActivityInfo_PauseInfo_Manual_ + // *ActivityInfo_PauseInfo_RuleId + PausedBy isActivityInfo_PauseInfo_PausedBy `protobuf_oneof:"paused_by"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *ChildExecutionInfo) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" +func (x *ActivityInfo_PauseInfo) Reset() { + *x = ActivityInfo_PauseInfo{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *ChildExecutionInfo) GetWorkflowTypeName() string { +func (x *ActivityInfo_PauseInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityInfo_PauseInfo) ProtoMessage() {} + +func (x *ActivityInfo_PauseInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[40] if x != nil { - return x.WorkflowTypeName + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityInfo_PauseInfo.ProtoReflect.Descriptor instead. +func (*ActivityInfo_PauseInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{17, 1} } -func (x *ChildExecutionInfo) GetParentClosePolicy() v17.ParentClosePolicy { +func (x *ActivityInfo_PauseInfo) GetPauseTime() *timestamppb.Timestamp { if x != nil { - return x.ParentClosePolicy + return x.PauseTime } - return v17.ParentClosePolicy(0) + return nil } -func (x *ChildExecutionInfo) GetInitiatedEventId() int64 { +func (x *ActivityInfo_PauseInfo) GetPausedBy() isActivityInfo_PauseInfo_PausedBy { if x != nil { - return x.InitiatedEventId + return x.PausedBy } - return 0 + return nil } -func (x *ChildExecutionInfo) GetClock() *v14.VectorClock { +func (x *ActivityInfo_PauseInfo) GetManual() *ActivityInfo_PauseInfo_Manual { if x != nil { - return x.Clock + if x, ok := x.PausedBy.(*ActivityInfo_PauseInfo_Manual_); ok { + return x.Manual + } } return nil } -func (x *ChildExecutionInfo) GetNamespaceId() string { +func (x *ActivityInfo_PauseInfo) GetRuleId() string { if x != nil { - return x.NamespaceId + if x, ok := x.PausedBy.(*ActivityInfo_PauseInfo_RuleId); ok { + return x.RuleId + } } return "" } -// request_cancel_map column -type RequestCancelInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type isActivityInfo_PauseInfo_PausedBy interface { + isActivityInfo_PauseInfo_PausedBy() +} - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - InitiatedEventBatchId int64 `protobuf:"varint,2,opt,name=initiated_event_batch_id,json=initiatedEventBatchId,proto3" json:"initiated_event_batch_id,omitempty"` - CancelRequestId string `protobuf:"bytes,3,opt,name=cancel_request_id,json=cancelRequestId,proto3" json:"cancel_request_id,omitempty"` - InitiatedEventId int64 `protobuf:"varint,4,opt,name=initiated_event_id,json=initiatedEventId,proto3" json:"initiated_event_id,omitempty"` +type ActivityInfo_PauseInfo_Manual_ struct { + // activity was paused by the manual intervention + Manual *ActivityInfo_PauseInfo_Manual `protobuf:"bytes,2,opt,name=manual,proto3,oneof"` } -func (x *RequestCancelInfo) Reset() { - *x = RequestCancelInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +type ActivityInfo_PauseInfo_RuleId struct { + // Id of the rule that paused the activity. + RuleId string `protobuf:"bytes,3,opt,name=rule_id,json=ruleId,proto3,oneof"` } -func (x *RequestCancelInfo) String() string { +func (*ActivityInfo_PauseInfo_Manual_) isActivityInfo_PauseInfo_PausedBy() {} + +func (*ActivityInfo_PauseInfo_RuleId) isActivityInfo_PauseInfo_PausedBy() {} + +type ActivityInfo_PauseInfo_Manual struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The identity of the actor that paused the activity. + Identity string `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"` + // Reason for pausing the activity. + Reason string `protobuf:"bytes,2,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityInfo_PauseInfo_Manual) Reset() { + *x = ActivityInfo_PauseInfo_Manual{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityInfo_PauseInfo_Manual) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RequestCancelInfo) ProtoMessage() {} +func (*ActivityInfo_PauseInfo_Manual) ProtoMessage() {} -func (x *RequestCancelInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { +func (x *ActivityInfo_PauseInfo_Manual) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[41] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2235,69 +4621,54 @@ func (x *RequestCancelInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RequestCancelInfo.ProtoReflect.Descriptor instead. -func (*RequestCancelInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{13} -} - -func (x *RequestCancelInfo) GetVersion() int64 { - if x != nil { - return x.Version - } - return 0 -} - -func (x *RequestCancelInfo) GetInitiatedEventBatchId() int64 { - if x != nil { - return x.InitiatedEventBatchId - } - return 0 +// Deprecated: Use ActivityInfo_PauseInfo_Manual.ProtoReflect.Descriptor instead. +func (*ActivityInfo_PauseInfo_Manual) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{17, 1, 0} } -func (x *RequestCancelInfo) GetCancelRequestId() string { +func (x *ActivityInfo_PauseInfo_Manual) GetIdentity() string { if x != nil { - return x.CancelRequestId + return x.Identity } return "" } -func (x *RequestCancelInfo) GetInitiatedEventId() int64 { +func (x *ActivityInfo_PauseInfo_Manual) GetReason() string { if x != nil { - return x.InitiatedEventId + return x.Reason } - return 0 + return "" } -// signal_map column -type SignalInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Callback_Nexus struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Callback URL. + // (-- api-linter: core::0140::uri=disabled + // + // aip.dev/not-precedent: Not respecting aip here. --) + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + // Header to attach to callback request. + Header map[string]string `protobuf:"bytes,2,rep,name=header,proto3" json:"header,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - InitiatedEventBatchId int64 `protobuf:"varint,2,opt,name=initiated_event_batch_id,json=initiatedEventBatchId,proto3" json:"initiated_event_batch_id,omitempty"` - RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - InitiatedEventId int64 `protobuf:"varint,7,opt,name=initiated_event_id,json=initiatedEventId,proto3" json:"initiated_event_id,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *SignalInfo) Reset() { - *x = SignalInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *Callback_Nexus) Reset() { + *x = Callback_Nexus{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *SignalInfo) String() string { +func (x *Callback_Nexus) String() string { return protoimpl.X.MessageStringOf(x) } -func (*SignalInfo) ProtoMessage() {} +func (*Callback_Nexus) ProtoMessage() {} -func (x *SignalInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { +func (x *Callback_Nexus) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[42] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2307,68 +4678,59 @@ func (x *SignalInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use SignalInfo.ProtoReflect.Descriptor instead. -func (*SignalInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{14} -} - -func (x *SignalInfo) GetVersion() int64 { - if x != nil { - return x.Version - } - return 0 -} - -func (x *SignalInfo) GetInitiatedEventBatchId() int64 { - if x != nil { - return x.InitiatedEventBatchId - } - return 0 +// Deprecated: Use Callback_Nexus.ProtoReflect.Descriptor instead. +func (*Callback_Nexus) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{23, 0} } -func (x *SignalInfo) GetRequestId() string { +func (x *Callback_Nexus) GetUrl() string { if x != nil { - return x.RequestId + return x.Url } return "" } -func (x *SignalInfo) GetInitiatedEventId() int64 { +func (x *Callback_Nexus) GetHeader() map[string]string { if x != nil { - return x.InitiatedEventId + return x.Header } - return 0 + return nil } -// checksum column -type Checksum struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Callback_HSM struct { + state protoimpl.MessageState `protogen:"open.v1"` + // namespace id of the target state machine. + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // ID of the workflow that the target state machine is attached to. + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + // Run id of said workflow. + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + // A reference to the state machine. + Ref *StateMachineRef `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"` + // The method name to invoke. Methods must be explicitly registered for the target state machine in the state + // machine registry, and accept an argument type of HistoryEvent that is the completion event of the completed + // workflow. + Method string `protobuf:"bytes,5,opt,name=method,proto3" json:"method,omitempty"` unknownFields protoimpl.UnknownFields - - Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Flavor v1.ChecksumFlavor `protobuf:"varint,2,opt,name=flavor,proto3,enum=temporal.server.api.enums.v1.ChecksumFlavor" json:"flavor,omitempty"` - Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *Checksum) Reset() { - *x = Checksum{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *Callback_HSM) Reset() { + *x = Callback_HSM{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *Checksum) String() string { +func (x *Callback_HSM) String() string { return protoimpl.X.MessageStringOf(x) } -func (*Checksum) ProtoMessage() {} +func (*Callback_HSM) ProtoMessage() {} -func (x *Checksum) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { +func (x *Callback_HSM) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[43] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2378,58 +4740,69 @@ func (x *Checksum) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use Checksum.ProtoReflect.Descriptor instead. -func (*Checksum) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{15} +// Deprecated: Use Callback_HSM.ProtoReflect.Descriptor instead. +func (*Callback_HSM) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{23, 1} } -func (x *Checksum) GetVersion() int32 { +func (x *Callback_HSM) GetNamespaceId() string { if x != nil { - return x.Version + return x.NamespaceId } - return 0 + return "" } -func (x *Checksum) GetFlavor() v1.ChecksumFlavor { +func (x *Callback_HSM) GetWorkflowId() string { if x != nil { - return x.Flavor + return x.WorkflowId } - return v1.ChecksumFlavor(0) + return "" } -func (x *Checksum) GetValue() []byte { +func (x *Callback_HSM) GetRunId() string { if x != nil { - return x.Value + return x.RunId + } + return "" +} + +func (x *Callback_HSM) GetRef() *StateMachineRef { + if x != nil { + return x.Ref } return nil } -type CallbackInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *Callback_HSM) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} - PublicInfo *v11.CallbackInfo `protobuf:"bytes,1,opt,name=public_info,json=publicInfo,proto3" json:"public_info,omitempty"` +// Trigger for when the workflow is closed. +type CallbackInfo_WorkflowClosed struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *CallbackInfo) Reset() { - *x = CallbackInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CallbackInfo_WorkflowClosed) Reset() { + *x = CallbackInfo_WorkflowClosed{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *CallbackInfo) String() string { +func (x *CallbackInfo_WorkflowClosed) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CallbackInfo) ProtoMessage() {} +func (*CallbackInfo_WorkflowClosed) ProtoMessage() {} -func (x *CallbackInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CallbackInfo_WorkflowClosed) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[45] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2439,46 +4812,37 @@ func (x *CallbackInfo) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CallbackInfo.ProtoReflect.Descriptor instead. -func (*CallbackInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{16} -} - -func (x *CallbackInfo) GetPublicInfo() *v11.CallbackInfo { - if x != nil { - return x.PublicInfo - } - return nil +// Deprecated: Use CallbackInfo_WorkflowClosed.ProtoReflect.Descriptor instead. +func (*CallbackInfo_WorkflowClosed) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{25, 0} } -type TransferTaskInfo_CloseExecutionTaskDetails struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type CallbackInfo_Trigger struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Variant: + // + // *CallbackInfo_Trigger_WorkflowClosed + Variant isCallbackInfo_Trigger_Variant `protobuf_oneof:"variant"` unknownFields protoimpl.UnknownFields - - // can_skip_visibility_archival is set to true when we can guarantee that visibility records will be archived - // by some other task, so this task doesn't need to worry about it. - CanSkipVisibilityArchival bool `protobuf:"varint,1,opt,name=can_skip_visibility_archival,json=canSkipVisibilityArchival,proto3" json:"can_skip_visibility_archival,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *TransferTaskInfo_CloseExecutionTaskDetails) Reset() { - *x = TransferTaskInfo_CloseExecutionTaskDetails{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *CallbackInfo_Trigger) Reset() { + *x = CallbackInfo_Trigger{} + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *TransferTaskInfo_CloseExecutionTaskDetails) String() string { +func (x *CallbackInfo_Trigger) String() string { return protoimpl.X.MessageStringOf(x) } -func (*TransferTaskInfo_CloseExecutionTaskDetails) ProtoMessage() {} +func (*CallbackInfo_Trigger) ProtoMessage() {} -func (x *TransferTaskInfo_CloseExecutionTaskDetails) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { +func (x *CallbackInfo_Trigger) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_executions_proto_msgTypes[46] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2488,1017 +4852,798 @@ func (x *TransferTaskInfo_CloseExecutionTaskDetails) ProtoReflect() protoreflect return mi.MessageOf(x) } -// Deprecated: Use TransferTaskInfo_CloseExecutionTaskDetails.ProtoReflect.Descriptor instead. -func (*TransferTaskInfo_CloseExecutionTaskDetails) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{4, 0} +// Deprecated: Use CallbackInfo_Trigger.ProtoReflect.Descriptor instead. +func (*CallbackInfo_Trigger) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP(), []int{25, 1} } -func (x *TransferTaskInfo_CloseExecutionTaskDetails) GetCanSkipVisibilityArchival() bool { +func (x *CallbackInfo_Trigger) GetVariant() isCallbackInfo_Trigger_Variant { if x != nil { - return x.CanSkipVisibilityArchival + return x.Variant } - return false + return nil } -var File_temporal_server_api_persistence_v1_executions_proto protoreflect.FileDescriptor +func (x *CallbackInfo_Trigger) GetWorkflowClosed() *CallbackInfo_WorkflowClosed { + if x != nil { + if x, ok := x.Variant.(*CallbackInfo_Trigger_WorkflowClosed); ok { + return x.WorkflowClosed + } + } + return nil +} + +type isCallbackInfo_Trigger_Variant interface { + isCallbackInfo_Trigger_Variant() +} -var file_temporal_server_api_persistence_v1_executions_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x35, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, - 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2f, 0x76, 0x31, 0x2f, 0x68, 0x73, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2b, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xcf, 0x05, 0x0a, 0x09, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, - 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, - 0x08, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x6f, 0x77, 0x6e, - 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x74, 0x6f, 0x6c, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x6e, 0x63, - 0x65, 0x5f, 0x72, 0x65, 0x6e, 0x65, 0x77, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x73, 0x74, - 0x6f, 0x6c, 0x65, 0x6e, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x6e, 0x65, 0x77, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x88, - 0x01, 0x0a, 0x19, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x6c, - 0x71, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x49, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x6c, 0x71, 0x41, 0x63, 0x6b, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x44, 0x6c, 0x71, 0x41, 0x63, 0x6b, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x65, 0x0a, 0x0c, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, - 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x61, 0x72, 0x64, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x1a, 0x51, 0x0a, 0x1b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, - 0x6c, 0x71, 0x41, 0x63, 0x6b, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x76, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, - 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x08, 0x10, - 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, 0x08, 0x0b, - 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, - 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x22, 0xd0, 0x2b, 0x0a, 0x15, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, - 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, - 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x16, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, - 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x1a, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x4f, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, - 0x75, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x60, 0x0a, 0x1d, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1a, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2f, 0x0a, 0x12, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, - 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x31, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, - 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x69, 0x72, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4f, 0x0a, 0x23, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x1e, 0x6c, 0x61, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, - 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, - 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x36, 0x0a, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x1c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x46, 0x0a, 0x1e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, - 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x51, 0x0a, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x36, 0x0a, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x65, 0x6d, - 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x17, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5f, 0x0a, 0x1c, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x19, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, - 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x70, 0x0a, 0x25, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, - 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1e, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x21, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x72, 0x69, 0x67, 0x69, - 0x6e, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x1f, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x15, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x60, 0x0a, 0x12, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x44, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, - 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x53, 0x0a, 0x25, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x5f, - 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x5f, 0x61, 0x73, 0x5f, 0x6e, 0x65, 0x77, 0x18, 0x45, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, - 0x6b, 0x53, 0x75, 0x67, 0x67, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x41, - 0x73, 0x4e, 0x65, 0x77, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x46, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x1c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x2d, 0x0a, 0x10, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x65, 0x64, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x63, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x73, 0x74, 0x69, 0x63, - 0x6b, 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x21, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x65, 0x0a, 0x20, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1c, 0x73, 0x74, 0x69, 0x63, 0x6b, 0x79, 0x53, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x6f, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, - 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, - 0x18, 0x23, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x53, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x53, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x38, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x26, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x41, 0x74, 0x74, - 0x65, 0x6d, 0x70, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x63, 0x6f, 0x65, 0x66, 0x66, 0x69, 0x63, - 0x69, 0x65, 0x6e, 0x74, 0x18, 0x27, 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x43, 0x6f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, - 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6b, 0x0a, 0x22, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x1f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, 0x0a, 0x1f, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x29, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x1b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4e, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x61, - 0x62, 0x6c, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x2c, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x68, 0x61, 0x73, 0x52, 0x65, 0x74, - 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x27, 0x0a, 0x0d, 0x63, - 0x72, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x2b, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x63, 0x72, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x18, 0x2e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x47, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x61, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x36, 0x0a, 0x15, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x48, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x63, 0x68, - 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x49, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x75, 0x73, - 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x45, 0x0a, 0x1d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x4a, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x1a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, - 0x6c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x36, 0x0a, 0x15, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x4b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x4d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x61, 0x75, - 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x73, 0x65, 0x74, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x33, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x0f, 0x61, 0x75, 0x74, 0x6f, 0x52, 0x65, - 0x73, 0x65, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x80, 0x01, 0x0a, - 0x11, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x18, 0x34, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x04, 0x6d, 0x65, 0x6d, 0x6f, 0x18, - 0x35, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x6d, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x61, - 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, - 0x65, 0x73, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x52, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x37, 0x0a, 0x16, 0x66, - 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x75, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x37, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x66, 0x69, 0x72, 0x73, 0x74, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x5f, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x73, 0x18, 0x38, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5f, 0x0a, 0x1c, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x39, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x19, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, - 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x78, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x3a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x69, 0x72, 0x73, 0x74, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x78, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, - 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x3b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x3c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x33, 0x0a, 0x14, 0x6e, 0x65, 0x77, 0x5f, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x3d, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x6e, 0x65, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x50, 0x0a, 0x0c, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x3e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x6f, - 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x3f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x37, 0x0a, 0x16, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, - 0x72, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x40, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, - 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x76, 0x69, - 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, - 0x41, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x56, 0x69, 0x73, 0x69, 0x62, - 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, - 0x0a, 0x0a, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x42, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x6c, 0x6f, - 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x49, 0x0a, 0x1f, 0x63, 0x6c, 0x6f, - 0x73, 0x65, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x43, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x1c, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x66, 0x0a, 0x13, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x4c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x65, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x62, 0x61, - 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x4e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x68, 0x00, 0x12, 0x71, - 0x0a, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x4f, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x4a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, 0x0a, 0x12, 0x74, 0x72, 0x61, - 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x50, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, - 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x95, 0x01, 0x0a, 0x1a, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x63, - 0x68, 0x69, 0x6e, 0x65, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x51, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x55, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x75, 0x62, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x42, 0x79, 0x54, 0x79, 0x70, 0x65, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x73, 0x75, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, - 0x68, 0x69, 0x6e, 0x65, 0x73, 0x42, 0x79, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x6c, - 0x0a, 0x15, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x60, 0x0a, 0x09, 0x4d, 0x65, 0x6d, 0x6f, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, - 0x01, 0x1a, 0x71, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x75, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x86, 0x01, - 0x0a, 0x1b, 0x53, 0x75, 0x62, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, - 0x73, 0x42, 0x79, 0x54, 0x79, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4d, 0x61, 0x70, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, - 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, - 0x08, 0x10, 0x10, 0x11, 0x4a, 0x04, 0x08, 0x2c, 0x10, 0x2d, 0x4a, 0x04, 0x08, 0x2d, 0x10, 0x2e, 0x4a, - 0x04, 0x08, 0x2f, 0x10, 0x30, 0x4a, 0x04, 0x08, 0x30, 0x10, 0x31, 0x4a, 0x04, 0x08, 0x31, 0x10, 0x32, - 0x4a, 0x04, 0x08, 0x32, 0x10, 0x33, 0x22, 0x37, 0x0a, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0c, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x68, 0x69, 0x73, - 0x74, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xff, 0x01, 0x0a, 0x16, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2e, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, - 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xac, - 0x07, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x47, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, - 0x1a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x17, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x4f, 0x6e, 0x6c, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x47, 0x0a, 0x0f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x61, - 0x66, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x66, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x6f, 0x73, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x95, 0x01, 0x0a, 0x1c, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x66, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x43, 0x6c, 0x6f, 0x73, - 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x44, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x48, 0x00, 0x52, 0x19, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x1a, 0x60, 0x0a, 0x19, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x43, - 0x0a, 0x1c, 0x63, 0x61, 0x6e, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x53, 0x6b, 0x69, 0x70, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x42, 0x02, 0x68, 0x00, 0x42, 0x0e, - 0x0a, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x4a, 0x04, 0x08, - 0x0e, 0x10, 0x0f, 0x22, 0xe3, 0x04, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, - 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x66, - 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0c, 0x66, 0x69, 0x72, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x72, 0x61, - 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x33, 0x0a, 0x14, 0x6e, - 0x65, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x6e, 0x65, 0x77, 0x52, 0x75, 0x6e, 0x42, - 0x72, 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, - 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, - 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0f, 0x76, 0x69, 0x73, 0x69, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x76, 0x69, 0x73, 0x69, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, - 0x0a, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6e, 0x65, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, - 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, - 0x08, 0x0e, 0x10, 0x0f, 0x22, 0x91, 0x03, 0x0a, 0x12, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, - 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, - 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x63, 0x6c, - 0x6f, 0x73, 0x65, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x61, - 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x63, 0x6c, 0x6f, 0x73, - 0x65, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, - 0x89, 0x06, 0x0a, 0x0d, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, - 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x47, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x49, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x69, 0x0a, 0x15, 0x77, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x61, - 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x54, 0x79, 0x70, 0x65, 0x52, 0x13, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x41, 0x74, - 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x47, 0x0a, 0x0f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x72, 0x61, 0x6e, 0x63, - 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x61, 0x6c, 0x72, - 0x65, 0x61, 0x64, 0x79, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x64, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, - 0x65, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, 0x0a, 0x12, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6d, - 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x10, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, - 0x68, 0x69, 0x6e, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc6, 0x02, 0x0a, 0x10, - 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x09, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0f, 0x76, - 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x76, - 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xba, 0x03, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, - 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, - 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, - 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x0f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x79, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x24, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, 0x0a, 0x12, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, - 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x10, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, - 0x68, 0x00, 0x22, 0xc7, 0x0f, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, - 0x0a, 0x18, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, - 0x79, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x58, 0x0a, 0x19, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x16, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x54, 0x6f, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x58, 0x0a, 0x19, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x16, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x54, 0x6f, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x52, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x6f, 0x5f, 0x63, 0x6c, 0x6f, - 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, - 0x6f, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4a, 0x0a, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x10, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x54, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x63, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x2e, 0x0a, 0x11, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x72, - 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, - 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x61, - 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x13, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x68, 0x61, 0x73, 0x5f, 0x72, 0x65, - 0x74, 0x72, 0x79, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0e, 0x68, 0x61, 0x73, 0x52, 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x53, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x53, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4d, - 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x38, 0x0a, 0x16, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x14, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x41, 0x74, 0x74, 0x65, - 0x6d, 0x70, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x52, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x13, 0x72, 0x65, 0x74, 0x72, 0x79, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, - 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, 0x19, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, - 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x63, 0x6f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, - 0x6e, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x01, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x61, - 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x43, 0x6f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x74, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x48, 0x0a, 0x1f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x74, 0x72, 0x79, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1b, 0x72, 0x65, 0x74, 0x72, 0x79, - 0x4e, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x52, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x72, - 0x79, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x52, 0x10, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4c, 0x61, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, - 0x75, 0x72, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x1a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, - 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x72, 0x65, 0x74, 0x72, 0x79, 0x4c, - 0x61, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, - 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x5a, 0x0a, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, - 0x62, 0x65, 0x61, 0x74, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x73, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x1a, 0x6c, 0x61, 0x73, - 0x74, 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, - 0x62, 0x65, 0x61, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x38, 0x0a, 0x16, 0x75, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, - 0x6c, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x14, 0x75, 0x73, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x1d, - 0x10, 0x1e, 0x22, 0xdc, 0x01, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x22, 0x87, 0x05, 0x0a, 0x12, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, - 0x0a, 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x65, 0x64, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x2e, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x30, 0x0a, 0x12, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5c, - 0x0a, 0x13, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x5f, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, - 0x2e, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x0d, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, - 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, - 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0xd0, 0x01, 0x0a, 0x11, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, - 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x2e, 0x0a, 0x11, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x61, 0x6e, 0x63, 0x65, - 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, - 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, - 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xd4, 0x01, 0x0a, - 0x0a, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x18, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x21, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x30, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, - 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x8c, 0x01, 0x0a, 0x08, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, - 0x0a, 0x06, 0x66, 0x6c, 0x61, 0x76, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x46, 0x6c, 0x61, 0x76, 0x6f, 0x72, 0x52, 0x06, 0x66, 0x6c, 0x61, 0x76, 0x6f, 0x72, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x5b, 0x0a, 0x0c, - 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4b, 0x0a, 0x0b, 0x70, - 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, - 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x49, 0x6e, 0x66, 0x6f, - 0x42, 0x02, 0x68, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +type CallbackInfo_Trigger_WorkflowClosed struct { + WorkflowClosed *CallbackInfo_WorkflowClosed `protobuf:"bytes,1,opt,name=workflow_closed,json=workflowClosed,proto3,oneof"` } +func (*CallbackInfo_Trigger_WorkflowClosed) isCallbackInfo_Trigger_Variant() {} + +var File_temporal_server_api_persistence_v1_executions_proto protoreflect.FileDescriptor + +const file_temporal_server_api_persistence_v1_executions_proto_rawDesc = "" + + "\n" + + "3temporal/server/api/persistence/v1/executions.proto\x12\"temporal.server.api.persistence.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a(temporal/api/deployment/v1/message.proto\x1a\"temporal/api/enums/v1/common.proto\x1a&temporal/api/enums/v1/event_type.proto\x1a(temporal/api/enums/v1/failed_cause.proto\x1a$temporal/api/enums/v1/workflow.proto\x1a%temporal/api/failure/v1/message.proto\x1a%temporal/api/history/v1/message.proto\x1a$temporal/api/worker/v1/message.proto\x1a&temporal/api/workflow/v1/message.proto\x1a*temporal/server/api/clock/v1/message.proto\x1a)temporal/server/api/enums/v1/common.proto\x1a(temporal/server/api/enums/v1/nexus.proto\x1a'temporal/server/api/enums/v1/task.proto\x1a+temporal/server/api/enums/v1/workflow.proto\x1a5temporal/server/api/enums/v1/workflow_task_type.proto\x1a,temporal/server/api/history/v1/message.proto\x1a.temporal/server/api/persistence/v1/chasm.proto\x1a,temporal/server/api/persistence/v1/hsm.proto\x1a/temporal/server/api/persistence/v1/queues.proto\x1a/temporal/server/api/persistence/v1/update.proto\x1a-temporal/server/api/workflow/v1/message.proto\"\xa3\x05\n" + + "\tShardInfo\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x12\x19\n" + + "\brange_id\x18\x02 \x01(\x03R\arangeId\x12\x14\n" + + "\x05owner\x18\x03 \x01(\tR\x05owner\x12,\n" + + "\x12stolen_since_renew\x18\x06 \x01(\x05R\x10stolenSinceRenew\x12;\n" + + "\vupdate_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "updateTime\x12\x84\x01\n" + + "\x19replication_dlq_ack_level\x18\r \x03(\v2I.temporal.server.api.persistence.v1.ShardInfo.ReplicationDlqAckLevelEntryR\x16replicationDlqAckLevel\x12a\n" + + "\fqueue_states\x18\x11 \x03(\v2>.temporal.server.api.persistence.v1.ShardInfo.QueueStatesEntryR\vqueueStates\x1aI\n" + + "\x1bReplicationDlqAckLevelEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01\x1an\n" + + "\x10QueueStatesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x05R\x03key\x12D\n" + + "\x05value\x18\x02 \x01(\v2..temporal.server.api.persistence.v1.QueueStateR\x05value:\x028\x01J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "J\x04\b\n" + + "\x10\vJ\x04\b\v\x10\fJ\x04\b\f\x10\rJ\x04\b\x0e\x10\x0fJ\x04\b\x0f\x10\x10J\x04\b\x10\x10\x11\"\xbfC\n" + + "\x15WorkflowExecutionInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12.\n" + + "\x13parent_namespace_id\x18\x03 \x01(\tR\x11parentNamespaceId\x12,\n" + + "\x12parent_workflow_id\x18\x04 \x01(\tR\x10parentWorkflowId\x12\"\n" + + "\rparent_run_id\x18\x05 \x01(\tR\vparentRunId\x12.\n" + + "\x13parent_initiated_id\x18\x06 \x01(\x03R\x11parentInitiatedId\x129\n" + + "\x19completion_event_batch_id\x18\a \x01(\x03R\x16completionEventBatchId\x12\x1d\n" + + "\n" + + "task_queue\x18\t \x01(\tR\ttaskQueue\x12,\n" + + "\x12workflow_type_name\x18\n" + + " \x01(\tR\x10workflowTypeName\x12W\n" + + "\x1aworkflow_execution_timeout\x18\v \x01(\v2\x19.google.protobuf.DurationR\x18workflowExecutionTimeout\x12K\n" + + "\x14workflow_run_timeout\x18\f \x01(\v2\x19.google.protobuf.DurationR\x12workflowRunTimeout\x12\\\n" + + "\x1ddefault_workflow_task_timeout\x18\r \x01(\v2\x19.google.protobuf.DurationR\x1adefaultWorkflowTaskTimeout\x12,\n" + + "\x12last_running_clock\x18\x11 \x01(\x03R\x10lastRunningClock\x12-\n" + + "\x13last_first_event_id\x18\x12 \x01(\x03R\x10lastFirstEventId\x12^\n" + + "-last_completed_workflow_task_started_event_id\x18\x13 \x01(\x03R'lastCompletedWorkflowTaskStartedEventId\x129\n" + + "\n" + + "start_time\x18\x14 \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x12D\n" + + "\x10last_update_time\x18\x15 \x01(\v2\x1a.google.protobuf.TimestampR\x0elastUpdateTime\x122\n" + + "\x15workflow_task_version\x18\x16 \x01(\x03R\x13workflowTaskVersion\x12F\n" + + " workflow_task_scheduled_event_id\x18\x17 \x01(\x03R\x1cworkflowTaskScheduledEventId\x12B\n" + + "\x1eworkflow_task_started_event_id\x18\x18 \x01(\x03R\x1aworkflowTaskStartedEventId\x12M\n" + + "\x15workflow_task_timeout\x18\x19 \x01(\v2\x19.google.protobuf.DurationR\x13workflowTaskTimeout\x122\n" + + "\x15workflow_task_attempt\x18\x1a \x01(\x05R\x13workflowTaskAttempt\x12W\n" + + "\x1aworkflow_task_started_time\x18\x1b \x01(\v2\x1a.google.protobuf.TimestampR\x17workflowTaskStartedTime\x12[\n" + + "\x1cworkflow_task_scheduled_time\x18\x1c \x01(\v2\x1a.google.protobuf.TimestampR\x19workflowTaskScheduledTime\x12l\n" + + "%workflow_task_original_scheduled_time\x18\x1e \x01(\v2\x1a.google.protobuf.TimestampR!workflowTaskOriginalScheduledTime\x127\n" + + "\x18workflow_task_request_id\x18\x1f \x01(\tR\x15workflowTaskRequestId\x12\\\n" + + "\x12workflow_task_type\x18D \x01(\x0e2..temporal.server.api.enums.v1.WorkflowTaskTypeR\x10workflowTaskType\x12O\n" + + "%workflow_task_suggest_continue_as_new\x18E \x01(\bR workflowTaskSuggestContinueAsNew\x12\x91\x01\n" + + "-workflow_task_suggest_continue_as_new_reasons\x18n \x03(\x0e21.temporal.api.enums.v1.SuggestContinueAsNewReasonR'workflowTaskSuggestContinueAsNewReasons\x12p\n" + + "6workflow_task_target_worker_deployment_version_changed\x18p \x01(\bR0workflowTaskTargetWorkerDeploymentVersionChanged\x12F\n" + + " workflow_task_history_size_bytes\x18F \x01(\x03R\x1cworkflowTaskHistorySizeBytes\x123\n" + + "\x16workflow_task_build_id\x18X \x01(\tR\x13workflowTaskBuildId\x12S\n" + + "'workflow_task_build_id_redirect_counter\x18Y \x01(\x03R\"workflowTaskBuildIdRedirectCounter\x12.\n" + + "\x13workflow_task_stamp\x18m \x01(\x05R\x11workflowTaskStamp\x12W\n" + + ")workflow_task_attempts_since_last_success\x18o \x01(\x05R$workflowTaskAttemptsSinceLastSuccess\x12)\n" + + "\x10cancel_requested\x18\x1d \x01(\bR\x0fcancelRequested\x12*\n" + + "\x11cancel_request_id\x18 \x01(\tR\x0fcancelRequestId\x12*\n" + + "\x11sticky_task_queue\x18! \x01(\tR\x0fstickyTaskQueue\x12a\n" + + " sticky_schedule_to_start_timeout\x18\" \x01(\v2\x19.google.protobuf.DurationR\x1cstickyScheduleToStartTimeout\x12\x18\n" + + "\aattempt\x18# \x01(\x05R\aattempt\x12O\n" + + "\x16retry_initial_interval\x18$ \x01(\v2\x19.google.protobuf.DurationR\x14retryInitialInterval\x12O\n" + + "\x16retry_maximum_interval\x18% \x01(\v2\x19.google.protobuf.DurationR\x14retryMaximumInterval\x124\n" + + "\x16retry_maximum_attempts\x18& \x01(\x05R\x14retryMaximumAttempts\x12:\n" + + "\x19retry_backoff_coefficient\x18' \x01(\x01R\x17retryBackoffCoefficient\x12g\n" + + "\"workflow_execution_expiration_time\x18( \x01(\v2\x1a.google.protobuf.TimestampR\x1fworkflowExecutionExpirationTime\x12D\n" + + "\x1fretry_non_retryable_error_types\x18) \x03(\tR\x1bretryNonRetryableErrorTypes\x12(\n" + + "\x10has_retry_policy\x18* \x01(\bR\x0ehasRetryPolicy\x12#\n" + + "\rcron_schedule\x18+ \x01(\tR\fcronSchedule\x12!\n" + + "\fsignal_count\x18. \x01(\x03R\vsignalCount\x12%\n" + + "\x0eactivity_count\x18G \x01(\x03R\ractivityCount\x122\n" + + "\x15child_execution_count\x18H \x01(\x03R\x13childExecutionCount\x12(\n" + + "\x10user_timer_count\x18I \x01(\x03R\x0euserTimerCount\x12A\n" + + "\x1drequest_cancel_external_count\x18J \x01(\x03R\x1arequestCancelExternalCount\x122\n" + + "\x15signal_external_count\x18K \x01(\x03R\x13signalExternalCount\x12!\n" + + "\fupdate_count\x18M \x01(\x03R\vupdateCount\x12Q\n" + + "\x11auto_reset_points\x183 \x01(\v2%.temporal.api.workflow.v1.ResetPointsR\x0fautoResetPoints\x12|\n" + + "\x11search_attributes\x184 \x03(\v2O.temporal.server.api.persistence.v1.WorkflowExecutionInfo.SearchAttributesEntryR\x10searchAttributes\x12W\n" + + "\x04memo\x185 \x03(\v2C.temporal.server.api.persistence.v1.WorkflowExecutionInfo.MemoEntryR\x04memo\x12]\n" + + "\x11version_histories\x186 \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistories\x123\n" + + "\x16first_execution_run_id\x187 \x01(\tR\x13firstExecutionRunId\x12[\n" + + "\x0fexecution_stats\x188 \x01(\v22.temporal.server.api.persistence.v1.ExecutionStatsR\x0eexecutionStats\x12[\n" + + "\x1cworkflow_run_expiration_time\x189 \x01(\v2\x1a.google.protobuf.TimestampR\x19workflowRunExpirationTime\x124\n" + + "\x17last_first_event_txn_id\x18: \x01(\x03R\x13lastFirstEventTxnId\x124\n" + + "\x16state_transition_count\x18; \x01(\x03R\x14stateTransitionCount\x12A\n" + + "\x0eexecution_time\x18< \x01(\v2\x1a.google.protobuf.TimestampR\rexecutionTime\x12/\n" + + "\x14new_execution_run_id\x18= \x01(\tR\x11newExecutionRunId\x12L\n" + + "\fparent_clock\x18> \x01(\v2).temporal.server.api.clock.v1.VectorClockR\vparentClock\x128\n" + + "\x18parent_initiated_version\x18? \x01(\x03R\x16parentInitiatedVersion\x123\n" + + "\x16close_transfer_task_id\x18@ \x01(\x03R\x13closeTransferTaskId\x127\n" + + "\x18close_visibility_task_id\x18A \x01(\x03R\x15closeVisibilityTaskId\x129\n" + + "\n" + + "close_time\x18B \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTime\x12D\n" + + "\x1erelocatable_attributes_removed\x18C \x01(\bR\x1crelocatableAttributesRemoved\x12b\n" + + "\x13base_execution_info\x18L \x01(\v22.temporal.server.api.workflow.v1.BaseExecutionInfoR\x11baseExecutionInfo\x12r\n" + + " most_recent_worker_version_stamp\x18N \x01(\v2*.temporal.api.common.v1.WorkerVersionStampR\x1cmostRecentWorkerVersionStamp\x12*\n" + + "\x11assigned_build_id\x18U \x01(\tR\x0fassignedBuildId\x12,\n" + + "\x12inherited_build_id\x18V \x01(\tR\x10inheritedBuildId\x129\n" + + "\x19build_id_redirect_counter\x18W \x01(\x03R\x16buildIdRedirectCounter\x12m\n" + + "\fupdate_infos\x18O \x03(\v2J.temporal.server.api.persistence.v1.WorkflowExecutionInfo.UpdateInfosEntryR\vupdateInfos\x12f\n" + + "\x12transition_history\x18P \x03(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x11transitionHistory\x12\x91\x01\n" + + "\x1asub_state_machines_by_type\x18Q \x03(\v2U.temporal.server.api.persistence.v1.WorkflowExecutionInfo.SubStateMachinesByTypeEntryR\x16subStateMachinesByType\x12N\n" + + "$workflow_execution_timer_task_status\x18R \x01(\x05R workflowExecutionTimerTaskStatus\x12(\n" + + "\x10root_workflow_id\x18S \x01(\tR\x0erootWorkflowId\x12\x1e\n" + + "\vroot_run_id\x18T \x01(\tR\trootRunId\x12l\n" + + "\x14state_machine_timers\x18Z \x03(\v2:.temporal.server.api.persistence.v1.StateMachineTimerGroupR\x12stateMachineTimers\x12P\n" + + "%task_generation_shard_clock_timestamp\x18[ \x01(\x03R!taskGenerationShardClockTimestamp\x12\x9a\x01\n" + + ".workflow_task_last_update_versioned_transition\x18\\ \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR)workflowTaskLastUpdateVersionedTransition\x12\x95\x01\n" + + "+visibility_last_update_versioned_transition\x18] \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR'visibilityLastUpdateVersionedTransition\x12\xa3\x01\n" + + "3signal_request_ids_last_update_versioned_transition\x18^ \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR-signalRequestIdsLastUpdateVersionedTransition\x12\x8c\x01\n" + + "#sub_state_machine_tombstone_batches\x18_ \x03(\v2>.temporal.server.api.persistence.v1.StateMachineTombstoneBatchR\x1fsubStateMachineTombstoneBatches\x12,\n" + + "\x12workflow_was_reset\x18` \x01(\bR\x10workflowWasReset\x12 \n" + + "\freset_run_id\x18a \x01(\tR\n" + + "resetRunId\x12b\n" + + "\x0fversioning_info\x18b \x01(\v29.temporal.api.workflow.v1.WorkflowExecutionVersioningInfoR\x0eversioningInfo\x129\n" + + "\x19original_execution_run_id\x18c \x01(\tR\x16originalExecutionRunId\x12w\n" + + "\x1bprevious_transition_history\x18d \x03(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x19previousTransitionHistory\x12\x85\x01\n" + + "#last_transition_history_break_point\x18e \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1flastTransitionHistoryBreakPoint\x12\xb2\x01\n" + + "%children_initialized_post_reset_point\x18f \x03(\v2`.temporal.server.api.persistence.v1.WorkflowExecutionInfo.ChildrenInitializedPostResetPointEntryR!childrenInitializedPostResetPoint\x124\n" + + "\x16worker_deployment_name\x18g \x01(\tR\x14workerDeploymentName\x12<\n" + + "\bpriority\x18h \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12(\n" + + "\x10successor_run_id\x18i \x01(\tR\x0esuccessorRunId\x12T\n" + + "\n" + + "pause_info\x18j \x01(\v25.temporal.server.api.persistence.v1.WorkflowPauseInfoR\tpauseInfo\x12x\n" + + " last_workflow_task_failure_cause\x18k \x01(\x0e2..temporal.api.enums.v1.WorkflowTaskFailedCauseH\x00R\x1clastWorkflowTaskFailureCause\x12m\n" + + "!last_workflow_task_timed_out_type\x18l \x01(\x0e2\".temporal.api.enums.v1.TimeoutTypeH\x00R\x1clastWorkflowTaskTimedOutType\x12~\n" + + "\x1clast_notified_target_version\x18q \x01(\v2=.temporal.server.api.persistence.v1.LastNotifiedTargetVersionR\x19lastNotifiedTargetVersion\x12|\n" + + "\x1fdeclined_target_version_upgrade\x18r \x01(\v25.temporal.api.history.v1.DeclinedTargetVersionUpgradeR\x1cdeclinedTargetVersionUpgrade\x12b\n" + + "\x12time_skipping_info\x18s \x01(\v24.temporal.server.api.persistence.v1.TimeSkippingInfoR\x10timeSkippingInfo\x1ad\n" + + "\x15SearchAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x125\n" + + "\x05value\x18\x02 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\x05value:\x028\x01\x1aX\n" + + "\tMemoEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x125\n" + + "\x05value\x18\x02 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\x05value:\x028\x01\x1an\n" + + "\x10UpdateInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12D\n" + + "\x05value\x18\x02 \x01(\v2..temporal.server.api.persistence.v1.UpdateInfoR\x05value:\x028\x01\x1a~\n" + + "\x1bSubStateMachinesByTypeEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12I\n" + + "\x05value\x18\x02 \x01(\v23.temporal.server.api.persistence.v1.StateMachineMapR\x05value:\x028\x01\x1a\x88\x01\n" + + "&ChildrenInitializedPostResetPointEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12H\n" + + "\x05value\x18\x02 \x01(\v22.temporal.server.api.persistence.v1.ResetChildInfoR\x05value:\x028\x01B\x1c\n" + + "\x1alast_workflow_task_failureJ\x04\b\b\x10\tJ\x04\b\x0e\x10\x0fJ\x04\b\x0f\x10\x10J\x04\b\x10\x10\x11J\x04\b,\x10-J\x04\b-\x10.J\x04\b/\x100J\x04\b0\x101J\x04\b1\x102J\x04\b2\x103\"\xb5\x02\n" + + "\x10TimeSkippingInfo\x12D\n" + + "\x06config\x18\x01 \x01(\v2,.temporal.api.workflow.v1.TimeSkippingConfigR\x06config\x12[\n" + + "\x1caccumulated_skipped_duration\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x1aaccumulatedSkippedDuration\x12~\n" + + "\x1ecurrent_elapsed_duration_bound\x18\x03 \x01(\v29.temporal.server.api.persistence.v1.TimeSkippingBoundInfoR\x1bcurrentElapsedDurationBound\"\x9d\x01\n" + + "\x15TimeSkippingBoundInfo\x12;\n" + + "\vtarget_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "targetTime\x12\x1f\n" + + "\vhas_reached\x18\x02 \x01(\bR\n" + + "hasReached\x12&\n" + + "\x0fsource_event_id\x18\x03 \x01(\x03R\rsourceEventId\"\xa8\x01\n" + + "\x19LastNotifiedTargetVersion\x12b\n" + + "\x12deployment_version\x18\x01 \x01(\v23.temporal.api.deployment.v1.WorkerDeploymentVersionR\x11deploymentVersion\x12'\n" + + "\x0frevision_number\x18\x02 \x01(\x03R\x0erevisionNumber\"\x9d\x01\n" + + "\x0eExecutionStats\x12!\n" + + "\fhistory_size\x18\x01 \x01(\x03R\vhistorySize\x122\n" + + "\x15external_payload_size\x18\x02 \x01(\x03R\x13externalPayloadSize\x124\n" + + "\x16external_payload_count\x18\x03 \x01(\x03R\x14externalPayloadCount\"\x8c\x05\n" + + "\x16WorkflowExecutionState\x12*\n" + + "\x11create_request_id\x18\x01 \x01(\tR\x0fcreateRequestId\x12\x15\n" + + "\x06run_id\x18\x02 \x01(\tR\x05runId\x12J\n" + + "\x05state\x18\x03 \x01(\x0e24.temporal.server.api.enums.v1.WorkflowExecutionStateR\x05state\x12F\n" + + "\x06status\x18\x04 \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x06status\x12\x80\x01\n" + + " last_update_versioned_transition\x18\x05 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransition\x129\n" + + "\n" + + "start_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x12k\n" + + "\vrequest_ids\x18\a \x03(\v2J.temporal.server.api.persistence.v1.WorkflowExecutionState.RequestIdsEntryR\n" + + "requestIds\x1ap\n" + + "\x0fRequestIdsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12G\n" + + "\x05value\x18\x02 \x01(\v21.temporal.server.api.persistence.v1.RequestIDInfoR\x05value:\x028\x01\"k\n" + + "\rRequestIDInfo\x12?\n" + + "\n" + + "event_type\x18\x01 \x01(\x0e2 .temporal.api.enums.v1.EventTypeR\teventType\x12\x19\n" + + "\bevent_id\x18\x02 \x01(\x03R\aeventId\"\xdf\a\n" + + "\x10TransferTaskInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12C\n" + + "\ttask_type\x18\x04 \x01(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\btaskType\x12.\n" + + "\x13target_namespace_id\x18\x05 \x01(\tR\x11targetNamespaceId\x12,\n" + + "\x12target_workflow_id\x18\x06 \x01(\tR\x10targetWorkflowId\x12\"\n" + + "\rtarget_run_id\x18\a \x01(\tR\vtargetRunId\x12\x1d\n" + + "\n" + + "task_queue\x18\b \x01(\tR\ttaskQueue\x12;\n" + + "\x1atarget_child_workflow_only\x18\t \x01(\bR\x17targetChildWorkflowOnly\x12,\n" + + "\x12scheduled_event_id\x18\n" + + " \x01(\x03R\x10scheduledEventId\x12\x18\n" + + "\aversion\x18\v \x01(\x03R\aversion\x12\x17\n" + + "\atask_id\x18\f \x01(\x03R\x06taskId\x12C\n" + + "\x0fvisibility_time\x18\r \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime\x12,\n" + + "\x12delete_after_close\x18\x0f \x01(\bR\x10deleteAfterClose\x12\x91\x01\n" + + "\x1cclose_execution_task_details\x18\x10 \x01(\v2N.temporal.server.api.persistence.v1.TransferTaskInfo.CloseExecutionTaskDetailsH\x00R\x19closeExecutionTaskDetails\x12[\n" + + "\x0fchasm_task_info\x18\x12 \x01(\v21.temporal.server.api.persistence.v1.ChasmTaskInfoH\x00R\rchasmTaskInfo\x12\x14\n" + + "\x05stamp\x18\x11 \x01(\x05R\x05stamp\x1a\\\n" + + "\x19CloseExecutionTaskDetails\x12?\n" + + "\x1ccan_skip_visibility_archival\x18\x01 \x01(\bR\x19canSkipVisibilityArchivalB\x0e\n" + + "\ftask_detailsJ\x04\b\x0e\x10\x0f\"\xd8\b\n" + + "\x13ReplicationTaskInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12C\n" + + "\ttask_type\x18\x04 \x01(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\btaskType\x12\x18\n" + + "\aversion\x18\x05 \x01(\x03R\aversion\x12$\n" + + "\x0efirst_event_id\x18\x06 \x01(\x03R\ffirstEventId\x12\"\n" + + "\rnext_event_id\x18\a \x01(\x03R\vnextEventId\x12,\n" + + "\x12scheduled_event_id\x18\b \x01(\x03R\x10scheduledEventId\x12!\n" + + "\fbranch_token\x18\v \x01(\fR\vbranchToken\x12/\n" + + "\x14new_run_branch_token\x18\r \x01(\fR\x11newRunBranchToken\x12\x17\n" + + "\atask_id\x18\x0f \x01(\x03R\x06taskId\x12C\n" + + "\x0fvisibility_time\x18\x10 \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime\x12\x1c\n" + + "\n" + + "new_run_id\x18\x11 \x01(\tR\bnewRunId\x12F\n" + + "\bpriority\x18\x12 \x01(\x0e2*.temporal.server.api.enums.v1.TaskPriorityR\bpriority\x12j\n" + + "\x14versioned_transition\x18\x13 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransition\x12b\n" + + "\x10task_equivalents\x18\x14 \x03(\v27.temporal.server.api.persistence.v1.ReplicationTaskInfoR\x0ftaskEquivalents\x12m\n" + + "\x19last_version_history_item\x18\x15 \x01(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x16lastVersionHistoryItem\x12\"\n" + + "\ris_first_task\x18\x16 \x01(\bR\visFirstTask\x12'\n" + + "\x0ftarget_clusters\x18\x17 \x03(\tR\x0etargetClusters\x120\n" + + "\x14is_force_replication\x18\x18 \x01(\bR\x12isForceReplication\x12!\n" + + "\farchetype_id\x18\x19 \x01(\rR\varchetypeIdJ\x04\b\t\x10\n" + + "J\x04\b\n" + + "\x10\vJ\x04\b\f\x10\rJ\x04\b\x0e\x10\x0f\"\x99\x04\n" + + "\x12VisibilityTaskInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12C\n" + + "\ttask_type\x18\x04 \x01(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\btaskType\x12\x18\n" + + "\aversion\x18\x05 \x01(\x03R\aversion\x12\x17\n" + + "\atask_id\x18\x06 \x01(\x03R\x06taskId\x12C\n" + + "\x0fvisibility_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime\x127\n" + + "\x18close_visibility_task_id\x18\n" + + " \x01(\x03R\x15closeVisibilityTaskId\x129\n" + + "\n" + + "close_time\x18\v \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTime\x12[\n" + + "\x0fchasm_task_info\x18\f \x01(\v21.temporal.server.api.persistence.v1.ChasmTaskInfoH\x00R\rchasmTaskInfoB\x0e\n" + + "\ftask_detailsJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "\"\xd3\x06\n" + + "\rTimerTaskInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12C\n" + + "\ttask_type\x18\x04 \x01(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\btaskType\x12E\n" + + "\ftimeout_type\x18\x05 \x01(\x0e2\".temporal.api.enums.v1.TimeoutTypeR\vtimeoutType\x12e\n" + + "\x15workflow_backoff_type\x18\x06 \x01(\x0e21.temporal.server.api.enums.v1.WorkflowBackoffTypeR\x13workflowBackoffType\x12\x18\n" + + "\aversion\x18\a \x01(\x03R\aversion\x12)\n" + + "\x10schedule_attempt\x18\b \x01(\x05R\x0fscheduleAttempt\x12\x19\n" + + "\bevent_id\x18\t \x01(\x03R\aeventId\x12\x17\n" + + "\atask_id\x18\n" + + " \x01(\x03R\x06taskId\x12C\n" + + "\x0fvisibility_time\x18\v \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime\x12!\n" + + "\fbranch_token\x18\f \x01(\fR\vbranchToken\x12)\n" + + "\x10already_archived\x18\r \x01(\bR\x0falreadyArchived\x12C\n" + + "\x1emutable_state_transition_count\x18\x0e \x01(\x03R\x1bmutableStateTransitionCount\x12 \n" + + "\ffirst_run_id\x18\x0f \x01(\tR\n" + + "firstRunId\x12\x14\n" + + "\x05stamp\x18\x10 \x01(\x05R\x05stamp\x12[\n" + + "\x0fchasm_task_info\x18\x11 \x01(\v21.temporal.server.api.persistence.v1.ChasmTaskInfoH\x00R\rchasmTaskInfoB\x0e\n" + + "\ftask_details\"\xaa\x02\n" + + "\x10ArchivalTaskInfo\x12\x17\n" + + "\atask_id\x18\x01 \x01(\x03R\x06taskId\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x03 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x04 \x01(\tR\x05runId\x12C\n" + + "\ttask_type\x18\x05 \x01(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\btaskType\x12\x18\n" + + "\aversion\x18\x06 \x01(\x03R\aversion\x12C\n" + + "\x0fvisibility_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime\"\xf5\x04\n" + + "\x10OutboundTaskInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12C\n" + + "\ttask_type\x18\x04 \x01(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\btaskType\x12\x17\n" + + "\atask_id\x18\x05 \x01(\x03R\x06taskId\x12C\n" + + "\x0fvisibility_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime\x12 \n" + + "\vdestination\x18\a \x01(\tR\vdestination\x12h\n" + + "\x12state_machine_info\x18\b \x01(\v28.temporal.server.api.persistence.v1.StateMachineTaskInfoH\x00R\x10stateMachineInfo\x12[\n" + + "\x0fchasm_task_info\x18\t \x01(\v21.temporal.server.api.persistence.v1.ChasmTaskInfoH\x00R\rchasmTaskInfo\x12j\n" + + "\x14worker_commands_task\x18\n" + + " \x01(\v26.temporal.server.api.persistence.v1.WorkerCommandsTaskH\x00R\x12workerCommandsTaskB\x0e\n" + + "\ftask_details\"W\n" + + "\x12WorkerCommandsTask\x12A\n" + + "\bcommands\x18\x01 \x03(\v2%.temporal.api.worker.v1.WorkerCommandR\bcommands\"3\n" + + "\x17NexusInvocationTaskInfo\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\"4\n" + + "\x18NexusCancelationTaskInfo\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\"\xa9\x1c\n" + + "\fActivityInfo\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x127\n" + + "\x18scheduled_event_batch_id\x18\x02 \x01(\x03R\x15scheduledEventBatchId\x12A\n" + + "\x0escheduled_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12(\n" + + "\x10started_event_id\x18\x05 \x01(\x03R\x0estartedEventId\x12=\n" + + "\fstarted_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12\x1f\n" + + "\vactivity_id\x18\b \x01(\tR\n" + + "activityId\x12\x1d\n" + + "\n" + + "request_id\x18\t \x01(\tR\trequestId\x12T\n" + + "\x19schedule_to_start_timeout\x18\n" + + " \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToStartTimeout\x12T\n" + + "\x19schedule_to_close_timeout\x18\v \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToCloseTimeout\x12N\n" + + "\x16start_to_close_timeout\x18\f \x01(\v2\x19.google.protobuf.DurationR\x13startToCloseTimeout\x12F\n" + + "\x11heartbeat_timeout\x18\r \x01(\v2\x19.google.protobuf.DurationR\x10heartbeatTimeout\x12)\n" + + "\x10cancel_requested\x18\x0e \x01(\bR\x0fcancelRequested\x12*\n" + + "\x11cancel_request_id\x18\x0f \x01(\x03R\x0fcancelRequestId\x12*\n" + + "\x11timer_task_status\x18\x10 \x01(\x05R\x0ftimerTaskStatus\x12\x18\n" + + "\aattempt\x18\x11 \x01(\x05R\aattempt\x12\x1d\n" + + "\n" + + "task_queue\x18\x12 \x01(\tR\ttaskQueue\x12)\n" + + "\x10started_identity\x18\x13 \x01(\tR\x0fstartedIdentity\x12(\n" + + "\x10has_retry_policy\x18\x14 \x01(\bR\x0ehasRetryPolicy\x12O\n" + + "\x16retry_initial_interval\x18\x15 \x01(\v2\x19.google.protobuf.DurationR\x14retryInitialInterval\x12O\n" + + "\x16retry_maximum_interval\x18\x16 \x01(\v2\x19.google.protobuf.DurationR\x14retryMaximumInterval\x124\n" + + "\x16retry_maximum_attempts\x18\x17 \x01(\x05R\x14retryMaximumAttempts\x12N\n" + + "\x15retry_expiration_time\x18\x18 \x01(\v2\x1a.google.protobuf.TimestampR\x13retryExpirationTime\x12:\n" + + "\x19retry_backoff_coefficient\x18\x19 \x01(\x01R\x17retryBackoffCoefficient\x12D\n" + + "\x1fretry_non_retryable_error_types\x18\x1a \x03(\tR\x1bretryNonRetryableErrorTypes\x12N\n" + + "\x12retry_last_failure\x18\x1b \x01(\v2 .temporal.api.failure.v1.FailureR\x10retryLastFailure\x12;\n" + + "\x1aretry_last_worker_identity\x18\x1c \x01(\tR\x17retryLastWorkerIdentity\x12,\n" + + "\x12scheduled_event_id\x18\x1e \x01(\x03R\x10scheduledEventId\x12V\n" + + "\x16last_heartbeat_details\x18\x1f \x01(\v2 .temporal.api.common.v1.PayloadsR\x14lastHeartbeatDetails\x12W\n" + + "\x1alast_heartbeat_update_time\x18 \x01(\v2\x1a.google.protobuf.TimestampR\x17lastHeartbeatUpdateTime\x124\n" + + "\x16use_compatible_version\x18! \x01(\bR\x14useCompatibleVersion\x12I\n" + + "\ractivity_type\x18\" \x01(\v2$.temporal.api.common.v1.ActivityTypeR\factivityType\x12\x85\x01\n" + + "\x1ause_workflow_build_id_info\x18# \x01(\v2G.temporal.server.api.persistence.v1.ActivityInfo.UseWorkflowBuildIdInfoH\x00R\x16useWorkflowBuildIdInfo\x12P\n" + + "$last_independently_assigned_build_id\x18$ \x01(\tH\x00R lastIndependentlyAssignedBuildId\x12e\n" + + "\x19last_worker_version_stamp\x18% \x01(\v2*.temporal.api.common.v1.WorkerVersionStampR\x16lastWorkerVersionStamp\x12\x80\x01\n" + + " last_update_versioned_transition\x18& \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransition\x12L\n" + + "\x14first_scheduled_time\x18' \x01(\v2\x1a.google.protobuf.TimestampR\x12firstScheduledTime\x12W\n" + + "\x1alast_attempt_complete_time\x18( \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12\x14\n" + + "\x05stamp\x18) \x01(\x05R\x05stamp\x12\x16\n" + + "\x06paused\x18* \x01(\bR\x06paused\x12^\n" + + "\x17last_started_deployment\x18+ \x01(\v2&.temporal.api.deployment.v1.DeploymentR\x15lastStartedDeployment\x12C\n" + + "\x1elast_worker_deployment_version\x18, \x01(\tR\x1blastWorkerDeploymentVersion\x12k\n" + + "\x17last_deployment_version\x181 \x01(\v23.temporal.api.deployment.v1.WorkerDeploymentVersionR\x15lastDeploymentVersion\x12<\n" + + "\bpriority\x18- \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12Y\n" + + "\n" + + "pause_info\x18. \x01(\v2:.temporal.server.api.persistence.v1.ActivityInfo.PauseInfoR\tpauseInfo\x12%\n" + + "\x0eactivity_reset\x18/ \x01(\bR\ractivityReset\x12)\n" + + "\x10reset_heartbeats\x180 \x01(\bR\x0fresetHeartbeats\x12#\n" + + "\rstart_version\x182 \x01(\x03R\fstartVersion\x129\n" + + "\x19worker_control_task_queue\x183 \x01(\tR\x16workerControlTaskQueue\x12N\n" + + "\rstarted_clock\x184 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\fstartedClock\x1ay\n" + + "\x16UseWorkflowBuildIdInfo\x12+\n" + + "\x12last_used_build_id\x18\x01 \x01(\tR\x0flastUsedBuildId\x122\n" + + "\x15last_redirect_counter\x18\x02 \x01(\x03R\x13lastRedirectCounter\x1a\x89\x02\n" + + "\tPauseInfo\x129\n" + + "\n" + + "pause_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\tpauseTime\x12[\n" + + "\x06manual\x18\x02 \x01(\v2A.temporal.server.api.persistence.v1.ActivityInfo.PauseInfo.ManualH\x00R\x06manual\x12\x19\n" + + "\arule_id\x18\x03 \x01(\tH\x00R\x06ruleId\x1a<\n" + + "\x06Manual\x12\x1a\n" + + "\bidentity\x18\x01 \x01(\tR\bidentity\x12\x16\n" + + "\x06reason\x18\x02 \x01(\tR\x06reasonB\v\n" + + "\tpaused_byB\x0f\n" + + "\rbuild_id_infoJ\x04\b\x03\x10\x04J\x04\b\x06\x10\aJ\x04\b\x1d\x10\x1e\"\xcb\x02\n" + + "\tTimerInfo\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x12(\n" + + "\x10started_event_id\x18\x02 \x01(\x03R\x0estartedEventId\x12;\n" + + "\vexpiry_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "expiryTime\x12\x1f\n" + + "\vtask_status\x18\x04 \x01(\x03R\n" + + "taskStatus\x12\x19\n" + + "\btimer_id\x18\x05 \x01(\tR\atimerId\x12\x80\x01\n" + + " last_update_versioned_transition\x18\x06 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransition\"\x98\x06\n" + + "\x12ChildExecutionInfo\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x127\n" + + "\x18initiated_event_batch_id\x18\x02 \x01(\x03R\x15initiatedEventBatchId\x12(\n" + + "\x10started_event_id\x18\x03 \x01(\x03R\x0estartedEventId\x12.\n" + + "\x13started_workflow_id\x18\x05 \x01(\tR\x11startedWorkflowId\x12$\n" + + "\x0estarted_run_id\x18\x06 \x01(\tR\fstartedRunId\x12*\n" + + "\x11create_request_id\x18\b \x01(\tR\x0fcreateRequestId\x12\x1c\n" + + "\tnamespace\x18\t \x01(\tR\tnamespace\x12,\n" + + "\x12workflow_type_name\x18\n" + + " \x01(\tR\x10workflowTypeName\x12X\n" + + "\x13parent_close_policy\x18\v \x01(\x0e2(.temporal.api.enums.v1.ParentClosePolicyR\x11parentClosePolicy\x12,\n" + + "\x12initiated_event_id\x18\f \x01(\x03R\x10initiatedEventId\x12?\n" + + "\x05clock\x18\r \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12!\n" + + "\fnamespace_id\x18\x0e \x01(\tR\vnamespaceId\x12\x80\x01\n" + + " last_update_versioned_transition\x18\x0f \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransition\x12<\n" + + "\bpriority\x18\x10 \x01(\v2 .temporal.api.common.v1.PriorityR\bpriorityJ\x04\b\x04\x10\x05J\x04\b\a\x10\b\"\xc3\x02\n" + + "\x11RequestCancelInfo\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x127\n" + + "\x18initiated_event_batch_id\x18\x02 \x01(\x03R\x15initiatedEventBatchId\x12*\n" + + "\x11cancel_request_id\x18\x03 \x01(\tR\x0fcancelRequestId\x12,\n" + + "\x12initiated_event_id\x18\x04 \x01(\x03R\x10initiatedEventId\x12\x80\x01\n" + + " last_update_versioned_transition\x18\x05 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransition\"\xc7\x02\n" + + "\n" + + "SignalInfo\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x127\n" + + "\x18initiated_event_batch_id\x18\x02 \x01(\x03R\x15initiatedEventBatchId\x12\x1d\n" + + "\n" + + "request_id\x18\x03 \x01(\tR\trequestId\x12,\n" + + "\x12initiated_event_id\x18\a \x01(\x03R\x10initiatedEventId\x12\x80\x01\n" + + " last_update_versioned_transition\x18\t \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransitionJ\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\t\"\x80\x01\n" + + "\bChecksum\x12\x18\n" + + "\aversion\x18\x01 \x01(\x05R\aversion\x12D\n" + + "\x06flavor\x18\x02 \x01(\x0e2,.temporal.server.api.enums.v1.ChecksumFlavorR\x06flavor\x12\x14\n" + + "\x05value\x18\x03 \x01(\fR\x05value\"\xd2\x04\n" + + "\bCallback\x12J\n" + + "\x05nexus\x18\x02 \x01(\v22.temporal.server.api.persistence.v1.Callback.NexusH\x00R\x05nexus\x12D\n" + + "\x03hsm\x18\x03 \x01(\v20.temporal.server.api.persistence.v1.Callback.HSMH\x00R\x03hsm\x122\n" + + "\x05links\x18d \x03(\v2\x1c.temporal.api.common.v1.LinkR\x05links\x1a\xac\x01\n" + + "\x05Nexus\x12\x10\n" + + "\x03url\x18\x01 \x01(\tR\x03url\x12V\n" + + "\x06header\x18\x02 \x03(\v2>.temporal.server.api.persistence.v1.Callback.Nexus.HeaderEntryR\x06header\x1a9\n" + + "\vHeaderEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a\xbf\x01\n" + + "\x03HSM\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12E\n" + + "\x03ref\x18\x04 \x01(\v23.temporal.server.api.persistence.v1.StateMachineRefR\x03ref\x12\x16\n" + + "\x06method\x18\x05 \x01(\tR\x06methodB\t\n" + + "\avariantJ\x04\b\x01\x10\x02\"\xbb\x01\n" + + "\x18HSMCompletionCallbackArg\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12D\n" + + "\n" + + "last_event\x18\x04 \x01(\v2%.temporal.api.history.v1.HistoryEventR\tlastEvent\"\x8c\x06\n" + + "\fCallbackInfo\x12H\n" + + "\bcallback\x18\x01 \x01(\v2,.temporal.server.api.persistence.v1.CallbackR\bcallback\x12R\n" + + "\atrigger\x18\x02 \x01(\v28.temporal.server.api.persistence.v1.CallbackInfo.TriggerR\atrigger\x12G\n" + + "\x11registration_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x10registrationTime\x12A\n" + + "\x05state\x18\x04 \x01(\x0e2+.temporal.server.api.enums.v1.CallbackStateR\x05state\x12\x18\n" + + "\aattempt\x18\x05 \x01(\x05R\aattempt\x12W\n" + + "\x1alast_attempt_complete_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12R\n" + + "\x14last_attempt_failure\x18\a \x01(\v2 .temporal.api.failure.v1.FailureR\x12lastAttemptFailure\x12W\n" + + "\x1anext_attempt_schedule_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\x17nextAttemptScheduleTime\x12\x1d\n" + + "\n" + + "request_id\x18\t \x01(\tR\trequestId\x1a\x10\n" + + "\x0eWorkflowClosed\x1a\x80\x01\n" + + "\aTrigger\x12j\n" + + "\x0fworkflow_closed\x18\x01 \x01(\v2?.temporal.server.api.persistence.v1.CallbackInfo.WorkflowClosedH\x00R\x0eworkflowClosedB\t\n" + + "\avariant\"\xf2\a\n" + + "\x12NexusOperationInfo\x12\x1a\n" + + "\bendpoint\x18\x01 \x01(\tR\bendpoint\x12\x18\n" + + "\aservice\x18\x02 \x01(\tR\aservice\x12\x1c\n" + + "\toperation\x18\x03 \x01(\tR\toperation\x122\n" + + "\x15scheduled_event_token\x18\x05 \x01(\fR\x13scheduledEventToken\x12'\n" + + "\x0foperation_token\x18\x06 \x01(\tR\x0eoperationToken\x12T\n" + + "\x19schedule_to_close_timeout\x18\a \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToCloseTimeout\x12A\n" + + "\x0escheduled_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12\x1d\n" + + "\n" + + "request_id\x18\t \x01(\tR\trequestId\x12G\n" + + "\x05state\x18\n" + + " \x01(\x0e21.temporal.server.api.enums.v1.NexusOperationStateR\x05state\x12\x18\n" + + "\aattempt\x18\v \x01(\x05R\aattempt\x12W\n" + + "\x1alast_attempt_complete_time\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12R\n" + + "\x14last_attempt_failure\x18\r \x01(\v2 .temporal.api.failure.v1.FailureR\x12lastAttemptFailure\x12W\n" + + "\x1anext_attempt_schedule_time\x18\x0e \x01(\v2\x1a.google.protobuf.TimestampR\x17nextAttemptScheduleTime\x12\x1f\n" + + "\vendpoint_id\x18\x0f \x01(\tR\n" + + "endpointId\x12T\n" + + "\x19schedule_to_start_timeout\x18\x10 \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToStartTimeout\x12N\n" + + "\x16start_to_close_timeout\x18\x11 \x01(\v2\x19.google.protobuf.DurationR\x13startToCloseTimeout\x12=\n" + + "\fstarted_time\x18\x12 \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTimeJ\x04\b\x04\x10\x05\"\xff\x03\n" + + "\x1eNexusOperationCancellationInfo\x12A\n" + + "\x0erequested_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\rrequestedTime\x12L\n" + + "\x05state\x18\x02 \x01(\x0e26.temporal.api.enums.v1.NexusOperationCancellationStateR\x05state\x12\x18\n" + + "\aattempt\x18\x03 \x01(\x05R\aattempt\x12W\n" + + "\x1alast_attempt_complete_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12R\n" + + "\x14last_attempt_failure\x18\x05 \x01(\v2 .temporal.api.failure.v1.FailureR\x12lastAttemptFailure\x12W\n" + + "\x1anext_attempt_schedule_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x17nextAttemptScheduleTime\x12,\n" + + "\x12requested_event_id\x18\a \x01(\x03R\x10requestedEventId\"M\n" + + "\x0eResetChildInfo\x12;\n" + + "\x1ashould_terminate_and_start\x18\x01 \x01(\bR\x17shouldTerminateAndStart\"\xa1\x01\n" + + "\x11WorkflowPauseInfo\x129\n" + + "\n" + + "pause_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\tpauseTime\x12\x1a\n" + + "\bidentity\x18\x02 \x01(\tR\bidentity\x12\x16\n" + + "\x06reason\x18\x03 \x01(\tR\x06reason\x12\x1d\n" + + "\n" + + "request_id\x18\x04 \x01(\tR\trequestIdB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" + var ( file_temporal_server_api_persistence_v1_executions_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_executions_proto_rawDescData = file_temporal_server_api_persistence_v1_executions_proto_rawDesc + file_temporal_server_api_persistence_v1_executions_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_executions_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_executions_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_executions_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_executions_proto_rawDescData) + file_temporal_server_api_persistence_v1_executions_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_executions_proto_rawDesc), len(file_temporal_server_api_persistence_v1_executions_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_executions_proto_rawDescData } -var file_temporal_server_api_persistence_v1_executions_proto_msgTypes = make([]protoimpl.MessageInfo, 24) -var file_temporal_server_api_persistence_v1_executions_proto_goTypes = []interface{}{ - (*ShardInfo)(nil), // 0: temporal.server.api.persistence.v1.ShardInfo - (*WorkflowExecutionInfo)(nil), // 1: temporal.server.api.persistence.v1.WorkflowExecutionInfo - (*ExecutionStats)(nil), // 2: temporal.server.api.persistence.v1.ExecutionStats - (*WorkflowExecutionState)(nil), // 3: temporal.server.api.persistence.v1.WorkflowExecutionState - (*TransferTaskInfo)(nil), // 4: temporal.server.api.persistence.v1.TransferTaskInfo - (*ReplicationTaskInfo)(nil), // 5: temporal.server.api.persistence.v1.ReplicationTaskInfo - (*VisibilityTaskInfo)(nil), // 6: temporal.server.api.persistence.v1.VisibilityTaskInfo - (*TimerTaskInfo)(nil), // 7: temporal.server.api.persistence.v1.TimerTaskInfo - (*ArchivalTaskInfo)(nil), // 8: temporal.server.api.persistence.v1.ArchivalTaskInfo - (*OutboundTaskInfo)(nil), // 9: temporal.server.api.persistence.v1.OutboundTaskInfo - (*ActivityInfo)(nil), // 10: temporal.server.api.persistence.v1.ActivityInfo - (*TimerInfo)(nil), // 11: temporal.server.api.persistence.v1.TimerInfo - (*ChildExecutionInfo)(nil), // 12: temporal.server.api.persistence.v1.ChildExecutionInfo - (*RequestCancelInfo)(nil), // 13: temporal.server.api.persistence.v1.RequestCancelInfo - (*SignalInfo)(nil), // 14: temporal.server.api.persistence.v1.SignalInfo - (*Checksum)(nil), // 15: temporal.server.api.persistence.v1.Checksum - (*CallbackInfo)(nil), // 16: temporal.server.api.persistence.v1.CallbackInfo - nil, // 17: temporal.server.api.persistence.v1.ShardInfo.ReplicationDlqAckLevelEntry - nil, // 18: temporal.server.api.persistence.v1.ShardInfo.QueueStatesEntry - nil, // 19: temporal.server.api.persistence.v1.WorkflowExecutionInfo.SearchAttributesEntry - nil, // 20: temporal.server.api.persistence.v1.WorkflowExecutionInfo.MemoEntry - nil, // 21: temporal.server.api.persistence.v1.WorkflowExecutionInfo.UpdateInfosEntry - nil, // 22: temporal.server.api.persistence.v1.WorkflowExecutionInfo.SubStateMachinesByTypeEntry - (*TransferTaskInfo_CloseExecutionTaskDetails)(nil), // 23: temporal.server.api.persistence.v1.TransferTaskInfo.CloseExecutionTaskDetails - (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 25: google.protobuf.Duration - (v1.WorkflowTaskType)(0), // 26: temporal.server.api.enums.v1.WorkflowTaskType - (*v11.ResetPoints)(nil), // 27: temporal.api.workflow.v1.ResetPoints - (*v13.VersionHistories)(nil), // 28: temporal.server.api.history.v1.VersionHistories - (*v14.VectorClock)(nil), // 29: temporal.server.api.clock.v1.VectorClock - (*v15.BaseExecutionInfo)(nil), // 30: temporal.server.api.workflow.v1.BaseExecutionInfo - (*v12.WorkerVersionStamp)(nil), // 31: temporal.api.common.v1.WorkerVersionStamp - (*VersionedTransition)(nil), // 32: temporal.server.api.persistence.v1.VersionedTransition - (v1.WorkflowExecutionState)(0), // 33: temporal.server.api.enums.v1.WorkflowExecutionState - (v17.WorkflowExecutionStatus)(0), // 34: temporal.api.enums.v1.WorkflowExecutionStatus - (v1.TaskType)(0), // 35: temporal.server.api.enums.v1.TaskType - (v17.TimeoutType)(0), // 36: temporal.api.enums.v1.TimeoutType - (v1.WorkflowBackoffType)(0), // 37: temporal.server.api.enums.v1.WorkflowBackoffType - (*StateMachineTaskInfo)(nil), // 38: temporal.server.api.persistence.v1.StateMachineTaskInfo - (*v18.Failure)(nil), // 39: temporal.api.failure.v1.Failure - (*v12.Payloads)(nil), // 40: temporal.api.common.v1.Payloads - (*v12.ActivityType)(nil), // 41: temporal.api.common.v1.ActivityType - (v17.ParentClosePolicy)(0), // 42: temporal.api.enums.v1.ParentClosePolicy - (v1.ChecksumFlavor)(0), // 43: temporal.server.api.enums.v1.ChecksumFlavor - (*v11.CallbackInfo)(nil), // 44: temporal.api.workflow.v1.CallbackInfo - (*QueueState)(nil), // 45: temporal.server.api.persistence.v1.QueueState - (*v12.Payload)(nil), // 46: temporal.api.common.v1.Payload - (*v16.UpdateInfo)(nil), // 47: temporal.server.api.update.v1.UpdateInfo - (*StateMachineMap)(nil), // 48: temporal.server.api.persistence.v1.StateMachineMap +var file_temporal_server_api_persistence_v1_executions_proto_msgTypes = make([]protoimpl.MessageInfo, 47) +var file_temporal_server_api_persistence_v1_executions_proto_goTypes = []any{ + (*ShardInfo)(nil), // 0: temporal.server.api.persistence.v1.ShardInfo + (*WorkflowExecutionInfo)(nil), // 1: temporal.server.api.persistence.v1.WorkflowExecutionInfo + (*TimeSkippingInfo)(nil), // 2: temporal.server.api.persistence.v1.TimeSkippingInfo + (*TimeSkippingBoundInfo)(nil), // 3: temporal.server.api.persistence.v1.TimeSkippingBoundInfo + (*LastNotifiedTargetVersion)(nil), // 4: temporal.server.api.persistence.v1.LastNotifiedTargetVersion + (*ExecutionStats)(nil), // 5: temporal.server.api.persistence.v1.ExecutionStats + (*WorkflowExecutionState)(nil), // 6: temporal.server.api.persistence.v1.WorkflowExecutionState + (*RequestIDInfo)(nil), // 7: temporal.server.api.persistence.v1.RequestIDInfo + (*TransferTaskInfo)(nil), // 8: temporal.server.api.persistence.v1.TransferTaskInfo + (*ReplicationTaskInfo)(nil), // 9: temporal.server.api.persistence.v1.ReplicationTaskInfo + (*VisibilityTaskInfo)(nil), // 10: temporal.server.api.persistence.v1.VisibilityTaskInfo + (*TimerTaskInfo)(nil), // 11: temporal.server.api.persistence.v1.TimerTaskInfo + (*ArchivalTaskInfo)(nil), // 12: temporal.server.api.persistence.v1.ArchivalTaskInfo + (*OutboundTaskInfo)(nil), // 13: temporal.server.api.persistence.v1.OutboundTaskInfo + (*WorkerCommandsTask)(nil), // 14: temporal.server.api.persistence.v1.WorkerCommandsTask + (*NexusInvocationTaskInfo)(nil), // 15: temporal.server.api.persistence.v1.NexusInvocationTaskInfo + (*NexusCancelationTaskInfo)(nil), // 16: temporal.server.api.persistence.v1.NexusCancelationTaskInfo + (*ActivityInfo)(nil), // 17: temporal.server.api.persistence.v1.ActivityInfo + (*TimerInfo)(nil), // 18: temporal.server.api.persistence.v1.TimerInfo + (*ChildExecutionInfo)(nil), // 19: temporal.server.api.persistence.v1.ChildExecutionInfo + (*RequestCancelInfo)(nil), // 20: temporal.server.api.persistence.v1.RequestCancelInfo + (*SignalInfo)(nil), // 21: temporal.server.api.persistence.v1.SignalInfo + (*Checksum)(nil), // 22: temporal.server.api.persistence.v1.Checksum + (*Callback)(nil), // 23: temporal.server.api.persistence.v1.Callback + (*HSMCompletionCallbackArg)(nil), // 24: temporal.server.api.persistence.v1.HSMCompletionCallbackArg + (*CallbackInfo)(nil), // 25: temporal.server.api.persistence.v1.CallbackInfo + (*NexusOperationInfo)(nil), // 26: temporal.server.api.persistence.v1.NexusOperationInfo + (*NexusOperationCancellationInfo)(nil), // 27: temporal.server.api.persistence.v1.NexusOperationCancellationInfo + (*ResetChildInfo)(nil), // 28: temporal.server.api.persistence.v1.ResetChildInfo + (*WorkflowPauseInfo)(nil), // 29: temporal.server.api.persistence.v1.WorkflowPauseInfo + nil, // 30: temporal.server.api.persistence.v1.ShardInfo.ReplicationDlqAckLevelEntry + nil, // 31: temporal.server.api.persistence.v1.ShardInfo.QueueStatesEntry + nil, // 32: temporal.server.api.persistence.v1.WorkflowExecutionInfo.SearchAttributesEntry + nil, // 33: temporal.server.api.persistence.v1.WorkflowExecutionInfo.MemoEntry + nil, // 34: temporal.server.api.persistence.v1.WorkflowExecutionInfo.UpdateInfosEntry + nil, // 35: temporal.server.api.persistence.v1.WorkflowExecutionInfo.SubStateMachinesByTypeEntry + nil, // 36: temporal.server.api.persistence.v1.WorkflowExecutionInfo.ChildrenInitializedPostResetPointEntry + nil, // 37: temporal.server.api.persistence.v1.WorkflowExecutionState.RequestIdsEntry + (*TransferTaskInfo_CloseExecutionTaskDetails)(nil), // 38: temporal.server.api.persistence.v1.TransferTaskInfo.CloseExecutionTaskDetails + (*ActivityInfo_UseWorkflowBuildIdInfo)(nil), // 39: temporal.server.api.persistence.v1.ActivityInfo.UseWorkflowBuildIdInfo + (*ActivityInfo_PauseInfo)(nil), // 40: temporal.server.api.persistence.v1.ActivityInfo.PauseInfo + (*ActivityInfo_PauseInfo_Manual)(nil), // 41: temporal.server.api.persistence.v1.ActivityInfo.PauseInfo.Manual + (*Callback_Nexus)(nil), // 42: temporal.server.api.persistence.v1.Callback.Nexus + (*Callback_HSM)(nil), // 43: temporal.server.api.persistence.v1.Callback.HSM + nil, // 44: temporal.server.api.persistence.v1.Callback.Nexus.HeaderEntry + (*CallbackInfo_WorkflowClosed)(nil), // 45: temporal.server.api.persistence.v1.CallbackInfo.WorkflowClosed + (*CallbackInfo_Trigger)(nil), // 46: temporal.server.api.persistence.v1.CallbackInfo.Trigger + (*timestamppb.Timestamp)(nil), // 47: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 48: google.protobuf.Duration + (v1.WorkflowTaskType)(0), // 49: temporal.server.api.enums.v1.WorkflowTaskType + (v11.SuggestContinueAsNewReason)(0), // 50: temporal.api.enums.v1.SuggestContinueAsNewReason + (*v12.ResetPoints)(nil), // 51: temporal.api.workflow.v1.ResetPoints + (*v14.VersionHistories)(nil), // 52: temporal.server.api.history.v1.VersionHistories + (*v15.VectorClock)(nil), // 53: temporal.server.api.clock.v1.VectorClock + (*v16.BaseExecutionInfo)(nil), // 54: temporal.server.api.workflow.v1.BaseExecutionInfo + (*v13.WorkerVersionStamp)(nil), // 55: temporal.api.common.v1.WorkerVersionStamp + (*VersionedTransition)(nil), // 56: temporal.server.api.persistence.v1.VersionedTransition + (*StateMachineTimerGroup)(nil), // 57: temporal.server.api.persistence.v1.StateMachineTimerGroup + (*StateMachineTombstoneBatch)(nil), // 58: temporal.server.api.persistence.v1.StateMachineTombstoneBatch + (*v12.WorkflowExecutionVersioningInfo)(nil), // 59: temporal.api.workflow.v1.WorkflowExecutionVersioningInfo + (*v13.Priority)(nil), // 60: temporal.api.common.v1.Priority + (v11.WorkflowTaskFailedCause)(0), // 61: temporal.api.enums.v1.WorkflowTaskFailedCause + (v11.TimeoutType)(0), // 62: temporal.api.enums.v1.TimeoutType + (*v17.DeclinedTargetVersionUpgrade)(nil), // 63: temporal.api.history.v1.DeclinedTargetVersionUpgrade + (*v12.TimeSkippingConfig)(nil), // 64: temporal.api.workflow.v1.TimeSkippingConfig + (*v18.WorkerDeploymentVersion)(nil), // 65: temporal.api.deployment.v1.WorkerDeploymentVersion + (v1.WorkflowExecutionState)(0), // 66: temporal.server.api.enums.v1.WorkflowExecutionState + (v11.WorkflowExecutionStatus)(0), // 67: temporal.api.enums.v1.WorkflowExecutionStatus + (v11.EventType)(0), // 68: temporal.api.enums.v1.EventType + (v1.TaskType)(0), // 69: temporal.server.api.enums.v1.TaskType + (*ChasmTaskInfo)(nil), // 70: temporal.server.api.persistence.v1.ChasmTaskInfo + (v1.TaskPriority)(0), // 71: temporal.server.api.enums.v1.TaskPriority + (*v14.VersionHistoryItem)(nil), // 72: temporal.server.api.history.v1.VersionHistoryItem + (v1.WorkflowBackoffType)(0), // 73: temporal.server.api.enums.v1.WorkflowBackoffType + (*StateMachineTaskInfo)(nil), // 74: temporal.server.api.persistence.v1.StateMachineTaskInfo + (*v19.WorkerCommand)(nil), // 75: temporal.api.worker.v1.WorkerCommand + (*v110.Failure)(nil), // 76: temporal.api.failure.v1.Failure + (*v13.Payloads)(nil), // 77: temporal.api.common.v1.Payloads + (*v13.ActivityType)(nil), // 78: temporal.api.common.v1.ActivityType + (*v18.Deployment)(nil), // 79: temporal.api.deployment.v1.Deployment + (v11.ParentClosePolicy)(0), // 80: temporal.api.enums.v1.ParentClosePolicy + (v1.ChecksumFlavor)(0), // 81: temporal.server.api.enums.v1.ChecksumFlavor + (*v13.Link)(nil), // 82: temporal.api.common.v1.Link + (*v17.HistoryEvent)(nil), // 83: temporal.api.history.v1.HistoryEvent + (v1.CallbackState)(0), // 84: temporal.server.api.enums.v1.CallbackState + (v1.NexusOperationState)(0), // 85: temporal.server.api.enums.v1.NexusOperationState + (v11.NexusOperationCancellationState)(0), // 86: temporal.api.enums.v1.NexusOperationCancellationState + (*QueueState)(nil), // 87: temporal.server.api.persistence.v1.QueueState + (*v13.Payload)(nil), // 88: temporal.api.common.v1.Payload + (*UpdateInfo)(nil), // 89: temporal.server.api.persistence.v1.UpdateInfo + (*StateMachineMap)(nil), // 90: temporal.server.api.persistence.v1.StateMachineMap + (*StateMachineRef)(nil), // 91: temporal.server.api.persistence.v1.StateMachineRef } var file_temporal_server_api_persistence_v1_executions_proto_depIdxs = []int32{ - 24, // 0: temporal.server.api.persistence.v1.ShardInfo.update_time:type_name -> google.protobuf.Timestamp - 17, // 1: temporal.server.api.persistence.v1.ShardInfo.replication_dlq_ack_level:type_name -> temporal.server.api.persistence.v1.ShardInfo.ReplicationDlqAckLevelEntry - 18, // 2: temporal.server.api.persistence.v1.ShardInfo.queue_states:type_name -> temporal.server.api.persistence.v1.ShardInfo.QueueStatesEntry - 25, // 3: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_execution_timeout:type_name -> google.protobuf.Duration - 25, // 4: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_run_timeout:type_name -> google.protobuf.Duration - 25, // 5: temporal.server.api.persistence.v1.WorkflowExecutionInfo.default_workflow_task_timeout:type_name -> google.protobuf.Duration - 24, // 6: temporal.server.api.persistence.v1.WorkflowExecutionInfo.start_time:type_name -> google.protobuf.Timestamp - 24, // 7: temporal.server.api.persistence.v1.WorkflowExecutionInfo.last_update_time:type_name -> google.protobuf.Timestamp - 25, // 8: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_timeout:type_name -> google.protobuf.Duration - 24, // 9: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_started_time:type_name -> google.protobuf.Timestamp - 24, // 10: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_scheduled_time:type_name -> google.protobuf.Timestamp - 24, // 11: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_original_scheduled_time:type_name -> google.protobuf.Timestamp - 26, // 12: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_type:type_name -> temporal.server.api.enums.v1.WorkflowTaskType - 25, // 13: temporal.server.api.persistence.v1.WorkflowExecutionInfo.sticky_schedule_to_start_timeout:type_name -> google.protobuf.Duration - 25, // 14: temporal.server.api.persistence.v1.WorkflowExecutionInfo.retry_initial_interval:type_name -> google.protobuf.Duration - 25, // 15: temporal.server.api.persistence.v1.WorkflowExecutionInfo.retry_maximum_interval:type_name -> google.protobuf.Duration - 24, // 16: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_execution_expiration_time:type_name -> google.protobuf.Timestamp - 27, // 17: temporal.server.api.persistence.v1.WorkflowExecutionInfo.auto_reset_points:type_name -> temporal.api.workflow.v1.ResetPoints - 19, // 18: temporal.server.api.persistence.v1.WorkflowExecutionInfo.search_attributes:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.SearchAttributesEntry - 20, // 19: temporal.server.api.persistence.v1.WorkflowExecutionInfo.memo:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.MemoEntry - 28, // 20: temporal.server.api.persistence.v1.WorkflowExecutionInfo.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories - 2, // 21: temporal.server.api.persistence.v1.WorkflowExecutionInfo.execution_stats:type_name -> temporal.server.api.persistence.v1.ExecutionStats - 24, // 22: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_run_expiration_time:type_name -> google.protobuf.Timestamp - 24, // 23: temporal.server.api.persistence.v1.WorkflowExecutionInfo.execution_time:type_name -> google.protobuf.Timestamp - 29, // 24: temporal.server.api.persistence.v1.WorkflowExecutionInfo.parent_clock:type_name -> temporal.server.api.clock.v1.VectorClock - 24, // 25: temporal.server.api.persistence.v1.WorkflowExecutionInfo.close_time:type_name -> google.protobuf.Timestamp - 30, // 26: temporal.server.api.persistence.v1.WorkflowExecutionInfo.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo - 31, // 27: temporal.server.api.persistence.v1.WorkflowExecutionInfo.worker_version_stamp:type_name -> temporal.api.common.v1.WorkerVersionStamp - 21, // 28: temporal.server.api.persistence.v1.WorkflowExecutionInfo.update_infos:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.UpdateInfosEntry - 32, // 29: temporal.server.api.persistence.v1.WorkflowExecutionInfo.transition_history:type_name -> temporal.server.api.persistence.v1.VersionedTransition - 22, // 30: temporal.server.api.persistence.v1.WorkflowExecutionInfo.sub_state_machines_by_type:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.SubStateMachinesByTypeEntry - 33, // 31: temporal.server.api.persistence.v1.WorkflowExecutionState.state:type_name -> temporal.server.api.enums.v1.WorkflowExecutionState - 34, // 32: temporal.server.api.persistence.v1.WorkflowExecutionState.status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus - 35, // 33: temporal.server.api.persistence.v1.TransferTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType - 24, // 34: temporal.server.api.persistence.v1.TransferTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp - 23, // 35: temporal.server.api.persistence.v1.TransferTaskInfo.close_execution_task_details:type_name -> temporal.server.api.persistence.v1.TransferTaskInfo.CloseExecutionTaskDetails - 35, // 36: temporal.server.api.persistence.v1.ReplicationTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType - 24, // 37: temporal.server.api.persistence.v1.ReplicationTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp - 35, // 38: temporal.server.api.persistence.v1.VisibilityTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType - 24, // 39: temporal.server.api.persistence.v1.VisibilityTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp - 35, // 40: temporal.server.api.persistence.v1.TimerTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType - 36, // 41: temporal.server.api.persistence.v1.TimerTaskInfo.timeout_type:type_name -> temporal.api.enums.v1.TimeoutType - 37, // 42: temporal.server.api.persistence.v1.TimerTaskInfo.workflow_backoff_type:type_name -> temporal.server.api.enums.v1.WorkflowBackoffType - 24, // 43: temporal.server.api.persistence.v1.TimerTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp - 38, // 44: temporal.server.api.persistence.v1.TimerTaskInfo.state_machine_info:type_name -> temporal.server.api.persistence.v1.StateMachineTaskInfo - 35, // 45: temporal.server.api.persistence.v1.ArchivalTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType - 24, // 46: temporal.server.api.persistence.v1.ArchivalTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp - 35, // 47: temporal.server.api.persistence.v1.OutboundTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType - 24, // 48: temporal.server.api.persistence.v1.OutboundTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp - 38, // 49: temporal.server.api.persistence.v1.OutboundTaskInfo.state_machine_info:type_name -> temporal.server.api.persistence.v1.StateMachineTaskInfo - 24, // 50: temporal.server.api.persistence.v1.ActivityInfo.scheduled_time:type_name -> google.protobuf.Timestamp - 24, // 51: temporal.server.api.persistence.v1.ActivityInfo.started_time:type_name -> google.protobuf.Timestamp - 25, // 52: temporal.server.api.persistence.v1.ActivityInfo.schedule_to_start_timeout:type_name -> google.protobuf.Duration - 25, // 53: temporal.server.api.persistence.v1.ActivityInfo.schedule_to_close_timeout:type_name -> google.protobuf.Duration - 25, // 54: temporal.server.api.persistence.v1.ActivityInfo.start_to_close_timeout:type_name -> google.protobuf.Duration - 25, // 55: temporal.server.api.persistence.v1.ActivityInfo.heartbeat_timeout:type_name -> google.protobuf.Duration - 25, // 56: temporal.server.api.persistence.v1.ActivityInfo.retry_initial_interval:type_name -> google.protobuf.Duration - 25, // 57: temporal.server.api.persistence.v1.ActivityInfo.retry_maximum_interval:type_name -> google.protobuf.Duration - 24, // 58: temporal.server.api.persistence.v1.ActivityInfo.retry_expiration_time:type_name -> google.protobuf.Timestamp - 39, // 59: temporal.server.api.persistence.v1.ActivityInfo.retry_last_failure:type_name -> temporal.api.failure.v1.Failure - 40, // 60: temporal.server.api.persistence.v1.ActivityInfo.last_heartbeat_details:type_name -> temporal.api.common.v1.Payloads - 24, // 61: temporal.server.api.persistence.v1.ActivityInfo.last_heartbeat_update_time:type_name -> google.protobuf.Timestamp - 41, // 62: temporal.server.api.persistence.v1.ActivityInfo.activity_type:type_name -> temporal.api.common.v1.ActivityType - 24, // 63: temporal.server.api.persistence.v1.TimerInfo.expiry_time:type_name -> google.protobuf.Timestamp - 42, // 64: temporal.server.api.persistence.v1.ChildExecutionInfo.parent_close_policy:type_name -> temporal.api.enums.v1.ParentClosePolicy - 29, // 65: temporal.server.api.persistence.v1.ChildExecutionInfo.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 43, // 66: temporal.server.api.persistence.v1.Checksum.flavor:type_name -> temporal.server.api.enums.v1.ChecksumFlavor - 44, // 67: temporal.server.api.persistence.v1.CallbackInfo.public_info:type_name -> temporal.api.workflow.v1.CallbackInfo - 45, // 68: temporal.server.api.persistence.v1.ShardInfo.QueueStatesEntry.value:type_name -> temporal.server.api.persistence.v1.QueueState - 46, // 69: temporal.server.api.persistence.v1.WorkflowExecutionInfo.SearchAttributesEntry.value:type_name -> temporal.api.common.v1.Payload - 46, // 70: temporal.server.api.persistence.v1.WorkflowExecutionInfo.MemoEntry.value:type_name -> temporal.api.common.v1.Payload - 47, // 71: temporal.server.api.persistence.v1.WorkflowExecutionInfo.UpdateInfosEntry.value:type_name -> temporal.server.api.update.v1.UpdateInfo - 48, // 72: temporal.server.api.persistence.v1.WorkflowExecutionInfo.SubStateMachinesByTypeEntry.value:type_name -> temporal.server.api.persistence.v1.StateMachineMap - 73, // [73:73] is the sub-list for method output_type - 73, // [73:73] is the sub-list for method input_type - 73, // [73:73] is the sub-list for extension type_name - 73, // [73:73] is the sub-list for extension extendee - 0, // [0:73] is the sub-list for field type_name + 47, // 0: temporal.server.api.persistence.v1.ShardInfo.update_time:type_name -> google.protobuf.Timestamp + 30, // 1: temporal.server.api.persistence.v1.ShardInfo.replication_dlq_ack_level:type_name -> temporal.server.api.persistence.v1.ShardInfo.ReplicationDlqAckLevelEntry + 31, // 2: temporal.server.api.persistence.v1.ShardInfo.queue_states:type_name -> temporal.server.api.persistence.v1.ShardInfo.QueueStatesEntry + 48, // 3: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_execution_timeout:type_name -> google.protobuf.Duration + 48, // 4: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_run_timeout:type_name -> google.protobuf.Duration + 48, // 5: temporal.server.api.persistence.v1.WorkflowExecutionInfo.default_workflow_task_timeout:type_name -> google.protobuf.Duration + 47, // 6: temporal.server.api.persistence.v1.WorkflowExecutionInfo.start_time:type_name -> google.protobuf.Timestamp + 47, // 7: temporal.server.api.persistence.v1.WorkflowExecutionInfo.last_update_time:type_name -> google.protobuf.Timestamp + 48, // 8: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_timeout:type_name -> google.protobuf.Duration + 47, // 9: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_started_time:type_name -> google.protobuf.Timestamp + 47, // 10: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_scheduled_time:type_name -> google.protobuf.Timestamp + 47, // 11: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_original_scheduled_time:type_name -> google.protobuf.Timestamp + 49, // 12: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_type:type_name -> temporal.server.api.enums.v1.WorkflowTaskType + 50, // 13: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_suggest_continue_as_new_reasons:type_name -> temporal.api.enums.v1.SuggestContinueAsNewReason + 48, // 14: temporal.server.api.persistence.v1.WorkflowExecutionInfo.sticky_schedule_to_start_timeout:type_name -> google.protobuf.Duration + 48, // 15: temporal.server.api.persistence.v1.WorkflowExecutionInfo.retry_initial_interval:type_name -> google.protobuf.Duration + 48, // 16: temporal.server.api.persistence.v1.WorkflowExecutionInfo.retry_maximum_interval:type_name -> google.protobuf.Duration + 47, // 17: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_execution_expiration_time:type_name -> google.protobuf.Timestamp + 51, // 18: temporal.server.api.persistence.v1.WorkflowExecutionInfo.auto_reset_points:type_name -> temporal.api.workflow.v1.ResetPoints + 32, // 19: temporal.server.api.persistence.v1.WorkflowExecutionInfo.search_attributes:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.SearchAttributesEntry + 33, // 20: temporal.server.api.persistence.v1.WorkflowExecutionInfo.memo:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.MemoEntry + 52, // 21: temporal.server.api.persistence.v1.WorkflowExecutionInfo.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories + 5, // 22: temporal.server.api.persistence.v1.WorkflowExecutionInfo.execution_stats:type_name -> temporal.server.api.persistence.v1.ExecutionStats + 47, // 23: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_run_expiration_time:type_name -> google.protobuf.Timestamp + 47, // 24: temporal.server.api.persistence.v1.WorkflowExecutionInfo.execution_time:type_name -> google.protobuf.Timestamp + 53, // 25: temporal.server.api.persistence.v1.WorkflowExecutionInfo.parent_clock:type_name -> temporal.server.api.clock.v1.VectorClock + 47, // 26: temporal.server.api.persistence.v1.WorkflowExecutionInfo.close_time:type_name -> google.protobuf.Timestamp + 54, // 27: temporal.server.api.persistence.v1.WorkflowExecutionInfo.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo + 55, // 28: temporal.server.api.persistence.v1.WorkflowExecutionInfo.most_recent_worker_version_stamp:type_name -> temporal.api.common.v1.WorkerVersionStamp + 34, // 29: temporal.server.api.persistence.v1.WorkflowExecutionInfo.update_infos:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.UpdateInfosEntry + 56, // 30: temporal.server.api.persistence.v1.WorkflowExecutionInfo.transition_history:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 35, // 31: temporal.server.api.persistence.v1.WorkflowExecutionInfo.sub_state_machines_by_type:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.SubStateMachinesByTypeEntry + 57, // 32: temporal.server.api.persistence.v1.WorkflowExecutionInfo.state_machine_timers:type_name -> temporal.server.api.persistence.v1.StateMachineTimerGroup + 56, // 33: temporal.server.api.persistence.v1.WorkflowExecutionInfo.workflow_task_last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 56, // 34: temporal.server.api.persistence.v1.WorkflowExecutionInfo.visibility_last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 56, // 35: temporal.server.api.persistence.v1.WorkflowExecutionInfo.signal_request_ids_last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 58, // 36: temporal.server.api.persistence.v1.WorkflowExecutionInfo.sub_state_machine_tombstone_batches:type_name -> temporal.server.api.persistence.v1.StateMachineTombstoneBatch + 59, // 37: temporal.server.api.persistence.v1.WorkflowExecutionInfo.versioning_info:type_name -> temporal.api.workflow.v1.WorkflowExecutionVersioningInfo + 56, // 38: temporal.server.api.persistence.v1.WorkflowExecutionInfo.previous_transition_history:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 56, // 39: temporal.server.api.persistence.v1.WorkflowExecutionInfo.last_transition_history_break_point:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 36, // 40: temporal.server.api.persistence.v1.WorkflowExecutionInfo.children_initialized_post_reset_point:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo.ChildrenInitializedPostResetPointEntry + 60, // 41: temporal.server.api.persistence.v1.WorkflowExecutionInfo.priority:type_name -> temporal.api.common.v1.Priority + 29, // 42: temporal.server.api.persistence.v1.WorkflowExecutionInfo.pause_info:type_name -> temporal.server.api.persistence.v1.WorkflowPauseInfo + 61, // 43: temporal.server.api.persistence.v1.WorkflowExecutionInfo.last_workflow_task_failure_cause:type_name -> temporal.api.enums.v1.WorkflowTaskFailedCause + 62, // 44: temporal.server.api.persistence.v1.WorkflowExecutionInfo.last_workflow_task_timed_out_type:type_name -> temporal.api.enums.v1.TimeoutType + 4, // 45: temporal.server.api.persistence.v1.WorkflowExecutionInfo.last_notified_target_version:type_name -> temporal.server.api.persistence.v1.LastNotifiedTargetVersion + 63, // 46: temporal.server.api.persistence.v1.WorkflowExecutionInfo.declined_target_version_upgrade:type_name -> temporal.api.history.v1.DeclinedTargetVersionUpgrade + 2, // 47: temporal.server.api.persistence.v1.WorkflowExecutionInfo.time_skipping_info:type_name -> temporal.server.api.persistence.v1.TimeSkippingInfo + 64, // 48: temporal.server.api.persistence.v1.TimeSkippingInfo.config:type_name -> temporal.api.workflow.v1.TimeSkippingConfig + 48, // 49: temporal.server.api.persistence.v1.TimeSkippingInfo.accumulated_skipped_duration:type_name -> google.protobuf.Duration + 3, // 50: temporal.server.api.persistence.v1.TimeSkippingInfo.current_elapsed_duration_bound:type_name -> temporal.server.api.persistence.v1.TimeSkippingBoundInfo + 47, // 51: temporal.server.api.persistence.v1.TimeSkippingBoundInfo.target_time:type_name -> google.protobuf.Timestamp + 65, // 52: temporal.server.api.persistence.v1.LastNotifiedTargetVersion.deployment_version:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersion + 66, // 53: temporal.server.api.persistence.v1.WorkflowExecutionState.state:type_name -> temporal.server.api.enums.v1.WorkflowExecutionState + 67, // 54: temporal.server.api.persistence.v1.WorkflowExecutionState.status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus + 56, // 55: temporal.server.api.persistence.v1.WorkflowExecutionState.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 47, // 56: temporal.server.api.persistence.v1.WorkflowExecutionState.start_time:type_name -> google.protobuf.Timestamp + 37, // 57: temporal.server.api.persistence.v1.WorkflowExecutionState.request_ids:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionState.RequestIdsEntry + 68, // 58: temporal.server.api.persistence.v1.RequestIDInfo.event_type:type_name -> temporal.api.enums.v1.EventType + 69, // 59: temporal.server.api.persistence.v1.TransferTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType + 47, // 60: temporal.server.api.persistence.v1.TransferTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp + 38, // 61: temporal.server.api.persistence.v1.TransferTaskInfo.close_execution_task_details:type_name -> temporal.server.api.persistence.v1.TransferTaskInfo.CloseExecutionTaskDetails + 70, // 62: temporal.server.api.persistence.v1.TransferTaskInfo.chasm_task_info:type_name -> temporal.server.api.persistence.v1.ChasmTaskInfo + 69, // 63: temporal.server.api.persistence.v1.ReplicationTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType + 47, // 64: temporal.server.api.persistence.v1.ReplicationTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp + 71, // 65: temporal.server.api.persistence.v1.ReplicationTaskInfo.priority:type_name -> temporal.server.api.enums.v1.TaskPriority + 56, // 66: temporal.server.api.persistence.v1.ReplicationTaskInfo.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 9, // 67: temporal.server.api.persistence.v1.ReplicationTaskInfo.task_equivalents:type_name -> temporal.server.api.persistence.v1.ReplicationTaskInfo + 72, // 68: temporal.server.api.persistence.v1.ReplicationTaskInfo.last_version_history_item:type_name -> temporal.server.api.history.v1.VersionHistoryItem + 69, // 69: temporal.server.api.persistence.v1.VisibilityTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType + 47, // 70: temporal.server.api.persistence.v1.VisibilityTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp + 47, // 71: temporal.server.api.persistence.v1.VisibilityTaskInfo.close_time:type_name -> google.protobuf.Timestamp + 70, // 72: temporal.server.api.persistence.v1.VisibilityTaskInfo.chasm_task_info:type_name -> temporal.server.api.persistence.v1.ChasmTaskInfo + 69, // 73: temporal.server.api.persistence.v1.TimerTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType + 62, // 74: temporal.server.api.persistence.v1.TimerTaskInfo.timeout_type:type_name -> temporal.api.enums.v1.TimeoutType + 73, // 75: temporal.server.api.persistence.v1.TimerTaskInfo.workflow_backoff_type:type_name -> temporal.server.api.enums.v1.WorkflowBackoffType + 47, // 76: temporal.server.api.persistence.v1.TimerTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp + 70, // 77: temporal.server.api.persistence.v1.TimerTaskInfo.chasm_task_info:type_name -> temporal.server.api.persistence.v1.ChasmTaskInfo + 69, // 78: temporal.server.api.persistence.v1.ArchivalTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType + 47, // 79: temporal.server.api.persistence.v1.ArchivalTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp + 69, // 80: temporal.server.api.persistence.v1.OutboundTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType + 47, // 81: temporal.server.api.persistence.v1.OutboundTaskInfo.visibility_time:type_name -> google.protobuf.Timestamp + 74, // 82: temporal.server.api.persistence.v1.OutboundTaskInfo.state_machine_info:type_name -> temporal.server.api.persistence.v1.StateMachineTaskInfo + 70, // 83: temporal.server.api.persistence.v1.OutboundTaskInfo.chasm_task_info:type_name -> temporal.server.api.persistence.v1.ChasmTaskInfo + 14, // 84: temporal.server.api.persistence.v1.OutboundTaskInfo.worker_commands_task:type_name -> temporal.server.api.persistence.v1.WorkerCommandsTask + 75, // 85: temporal.server.api.persistence.v1.WorkerCommandsTask.commands:type_name -> temporal.api.worker.v1.WorkerCommand + 47, // 86: temporal.server.api.persistence.v1.ActivityInfo.scheduled_time:type_name -> google.protobuf.Timestamp + 47, // 87: temporal.server.api.persistence.v1.ActivityInfo.started_time:type_name -> google.protobuf.Timestamp + 48, // 88: temporal.server.api.persistence.v1.ActivityInfo.schedule_to_start_timeout:type_name -> google.protobuf.Duration + 48, // 89: temporal.server.api.persistence.v1.ActivityInfo.schedule_to_close_timeout:type_name -> google.protobuf.Duration + 48, // 90: temporal.server.api.persistence.v1.ActivityInfo.start_to_close_timeout:type_name -> google.protobuf.Duration + 48, // 91: temporal.server.api.persistence.v1.ActivityInfo.heartbeat_timeout:type_name -> google.protobuf.Duration + 48, // 92: temporal.server.api.persistence.v1.ActivityInfo.retry_initial_interval:type_name -> google.protobuf.Duration + 48, // 93: temporal.server.api.persistence.v1.ActivityInfo.retry_maximum_interval:type_name -> google.protobuf.Duration + 47, // 94: temporal.server.api.persistence.v1.ActivityInfo.retry_expiration_time:type_name -> google.protobuf.Timestamp + 76, // 95: temporal.server.api.persistence.v1.ActivityInfo.retry_last_failure:type_name -> temporal.api.failure.v1.Failure + 77, // 96: temporal.server.api.persistence.v1.ActivityInfo.last_heartbeat_details:type_name -> temporal.api.common.v1.Payloads + 47, // 97: temporal.server.api.persistence.v1.ActivityInfo.last_heartbeat_update_time:type_name -> google.protobuf.Timestamp + 78, // 98: temporal.server.api.persistence.v1.ActivityInfo.activity_type:type_name -> temporal.api.common.v1.ActivityType + 39, // 99: temporal.server.api.persistence.v1.ActivityInfo.use_workflow_build_id_info:type_name -> temporal.server.api.persistence.v1.ActivityInfo.UseWorkflowBuildIdInfo + 55, // 100: temporal.server.api.persistence.v1.ActivityInfo.last_worker_version_stamp:type_name -> temporal.api.common.v1.WorkerVersionStamp + 56, // 101: temporal.server.api.persistence.v1.ActivityInfo.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 47, // 102: temporal.server.api.persistence.v1.ActivityInfo.first_scheduled_time:type_name -> google.protobuf.Timestamp + 47, // 103: temporal.server.api.persistence.v1.ActivityInfo.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 79, // 104: temporal.server.api.persistence.v1.ActivityInfo.last_started_deployment:type_name -> temporal.api.deployment.v1.Deployment + 65, // 105: temporal.server.api.persistence.v1.ActivityInfo.last_deployment_version:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersion + 60, // 106: temporal.server.api.persistence.v1.ActivityInfo.priority:type_name -> temporal.api.common.v1.Priority + 40, // 107: temporal.server.api.persistence.v1.ActivityInfo.pause_info:type_name -> temporal.server.api.persistence.v1.ActivityInfo.PauseInfo + 53, // 108: temporal.server.api.persistence.v1.ActivityInfo.started_clock:type_name -> temporal.server.api.clock.v1.VectorClock + 47, // 109: temporal.server.api.persistence.v1.TimerInfo.expiry_time:type_name -> google.protobuf.Timestamp + 56, // 110: temporal.server.api.persistence.v1.TimerInfo.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 80, // 111: temporal.server.api.persistence.v1.ChildExecutionInfo.parent_close_policy:type_name -> temporal.api.enums.v1.ParentClosePolicy + 53, // 112: temporal.server.api.persistence.v1.ChildExecutionInfo.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 56, // 113: temporal.server.api.persistence.v1.ChildExecutionInfo.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 60, // 114: temporal.server.api.persistence.v1.ChildExecutionInfo.priority:type_name -> temporal.api.common.v1.Priority + 56, // 115: temporal.server.api.persistence.v1.RequestCancelInfo.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 56, // 116: temporal.server.api.persistence.v1.SignalInfo.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 81, // 117: temporal.server.api.persistence.v1.Checksum.flavor:type_name -> temporal.server.api.enums.v1.ChecksumFlavor + 42, // 118: temporal.server.api.persistence.v1.Callback.nexus:type_name -> temporal.server.api.persistence.v1.Callback.Nexus + 43, // 119: temporal.server.api.persistence.v1.Callback.hsm:type_name -> temporal.server.api.persistence.v1.Callback.HSM + 82, // 120: temporal.server.api.persistence.v1.Callback.links:type_name -> temporal.api.common.v1.Link + 83, // 121: temporal.server.api.persistence.v1.HSMCompletionCallbackArg.last_event:type_name -> temporal.api.history.v1.HistoryEvent + 23, // 122: temporal.server.api.persistence.v1.CallbackInfo.callback:type_name -> temporal.server.api.persistence.v1.Callback + 46, // 123: temporal.server.api.persistence.v1.CallbackInfo.trigger:type_name -> temporal.server.api.persistence.v1.CallbackInfo.Trigger + 47, // 124: temporal.server.api.persistence.v1.CallbackInfo.registration_time:type_name -> google.protobuf.Timestamp + 84, // 125: temporal.server.api.persistence.v1.CallbackInfo.state:type_name -> temporal.server.api.enums.v1.CallbackState + 47, // 126: temporal.server.api.persistence.v1.CallbackInfo.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 76, // 127: temporal.server.api.persistence.v1.CallbackInfo.last_attempt_failure:type_name -> temporal.api.failure.v1.Failure + 47, // 128: temporal.server.api.persistence.v1.CallbackInfo.next_attempt_schedule_time:type_name -> google.protobuf.Timestamp + 48, // 129: temporal.server.api.persistence.v1.NexusOperationInfo.schedule_to_close_timeout:type_name -> google.protobuf.Duration + 47, // 130: temporal.server.api.persistence.v1.NexusOperationInfo.scheduled_time:type_name -> google.protobuf.Timestamp + 85, // 131: temporal.server.api.persistence.v1.NexusOperationInfo.state:type_name -> temporal.server.api.enums.v1.NexusOperationState + 47, // 132: temporal.server.api.persistence.v1.NexusOperationInfo.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 76, // 133: temporal.server.api.persistence.v1.NexusOperationInfo.last_attempt_failure:type_name -> temporal.api.failure.v1.Failure + 47, // 134: temporal.server.api.persistence.v1.NexusOperationInfo.next_attempt_schedule_time:type_name -> google.protobuf.Timestamp + 48, // 135: temporal.server.api.persistence.v1.NexusOperationInfo.schedule_to_start_timeout:type_name -> google.protobuf.Duration + 48, // 136: temporal.server.api.persistence.v1.NexusOperationInfo.start_to_close_timeout:type_name -> google.protobuf.Duration + 47, // 137: temporal.server.api.persistence.v1.NexusOperationInfo.started_time:type_name -> google.protobuf.Timestamp + 47, // 138: temporal.server.api.persistence.v1.NexusOperationCancellationInfo.requested_time:type_name -> google.protobuf.Timestamp + 86, // 139: temporal.server.api.persistence.v1.NexusOperationCancellationInfo.state:type_name -> temporal.api.enums.v1.NexusOperationCancellationState + 47, // 140: temporal.server.api.persistence.v1.NexusOperationCancellationInfo.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 76, // 141: temporal.server.api.persistence.v1.NexusOperationCancellationInfo.last_attempt_failure:type_name -> temporal.api.failure.v1.Failure + 47, // 142: temporal.server.api.persistence.v1.NexusOperationCancellationInfo.next_attempt_schedule_time:type_name -> google.protobuf.Timestamp + 47, // 143: temporal.server.api.persistence.v1.WorkflowPauseInfo.pause_time:type_name -> google.protobuf.Timestamp + 87, // 144: temporal.server.api.persistence.v1.ShardInfo.QueueStatesEntry.value:type_name -> temporal.server.api.persistence.v1.QueueState + 88, // 145: temporal.server.api.persistence.v1.WorkflowExecutionInfo.SearchAttributesEntry.value:type_name -> temporal.api.common.v1.Payload + 88, // 146: temporal.server.api.persistence.v1.WorkflowExecutionInfo.MemoEntry.value:type_name -> temporal.api.common.v1.Payload + 89, // 147: temporal.server.api.persistence.v1.WorkflowExecutionInfo.UpdateInfosEntry.value:type_name -> temporal.server.api.persistence.v1.UpdateInfo + 90, // 148: temporal.server.api.persistence.v1.WorkflowExecutionInfo.SubStateMachinesByTypeEntry.value:type_name -> temporal.server.api.persistence.v1.StateMachineMap + 28, // 149: temporal.server.api.persistence.v1.WorkflowExecutionInfo.ChildrenInitializedPostResetPointEntry.value:type_name -> temporal.server.api.persistence.v1.ResetChildInfo + 7, // 150: temporal.server.api.persistence.v1.WorkflowExecutionState.RequestIdsEntry.value:type_name -> temporal.server.api.persistence.v1.RequestIDInfo + 47, // 151: temporal.server.api.persistence.v1.ActivityInfo.PauseInfo.pause_time:type_name -> google.protobuf.Timestamp + 41, // 152: temporal.server.api.persistence.v1.ActivityInfo.PauseInfo.manual:type_name -> temporal.server.api.persistence.v1.ActivityInfo.PauseInfo.Manual + 44, // 153: temporal.server.api.persistence.v1.Callback.Nexus.header:type_name -> temporal.server.api.persistence.v1.Callback.Nexus.HeaderEntry + 91, // 154: temporal.server.api.persistence.v1.Callback.HSM.ref:type_name -> temporal.server.api.persistence.v1.StateMachineRef + 45, // 155: temporal.server.api.persistence.v1.CallbackInfo.Trigger.workflow_closed:type_name -> temporal.server.api.persistence.v1.CallbackInfo.WorkflowClosed + 156, // [156:156] is the sub-list for method output_type + 156, // [156:156] is the sub-list for method input_type + 156, // [156:156] is the sub-list for extension type_name + 156, // [156:156] is the sub-list for extension extendee + 0, // [0:156] is the sub-list for field type_name } func init() { file_temporal_server_api_persistence_v1_executions_proto_init() } @@ -3506,236 +5651,51 @@ func file_temporal_server_api_persistence_v1_executions_proto_init() { if File_temporal_server_api_persistence_v1_executions_proto != nil { return } - file_temporal_server_api_persistence_v1_queues_proto_init() + file_temporal_server_api_persistence_v1_chasm_proto_init() file_temporal_server_api_persistence_v1_hsm_proto_init() - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShardInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkflowExecutionInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExecutionStats); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkflowExecutionState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransferTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VisibilityTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimerTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ArchivalTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutboundTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ActivityInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TimerInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChildExecutionInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RequestCancelInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SignalInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Checksum); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CallbackInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TransferTaskInfo_CloseExecutionTaskDetails); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } + file_temporal_server_api_persistence_v1_queues_proto_init() + file_temporal_server_api_persistence_v1_update_proto_init() + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[1].OneofWrappers = []any{ + (*WorkflowExecutionInfo_LastWorkflowTaskFailureCause)(nil), + (*WorkflowExecutionInfo_LastWorkflowTaskTimedOutType)(nil), } - file_temporal_server_api_persistence_v1_executions_proto_msgTypes[4].OneofWrappers = []interface{}{ + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[8].OneofWrappers = []any{ (*TransferTaskInfo_CloseExecutionTaskDetails_)(nil), + (*TransferTaskInfo_ChasmTaskInfo)(nil), + } + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[10].OneofWrappers = []any{ + (*VisibilityTaskInfo_ChasmTaskInfo)(nil), + } + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[11].OneofWrappers = []any{ + (*TimerTaskInfo_ChasmTaskInfo)(nil), + } + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[13].OneofWrappers = []any{ + (*OutboundTaskInfo_StateMachineInfo)(nil), + (*OutboundTaskInfo_ChasmTaskInfo)(nil), + (*OutboundTaskInfo_WorkerCommandsTask)(nil), + } + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[17].OneofWrappers = []any{ + (*ActivityInfo_UseWorkflowBuildIdInfo_)(nil), + (*ActivityInfo_LastIndependentlyAssignedBuildId)(nil), + } + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[23].OneofWrappers = []any{ + (*Callback_Nexus_)(nil), + (*Callback_Hsm)(nil), + } + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[40].OneofWrappers = []any{ + (*ActivityInfo_PauseInfo_Manual_)(nil), + (*ActivityInfo_PauseInfo_RuleId)(nil), + } + file_temporal_server_api_persistence_v1_executions_proto_msgTypes[46].OneofWrappers = []any{ + (*CallbackInfo_Trigger_WorkflowClosed)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_executions_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_executions_proto_rawDesc), len(file_temporal_server_api_persistence_v1_executions_proto_rawDesc)), NumEnums: 0, - NumMessages: 24, + NumMessages: 47, NumExtensions: 0, NumServices: 0, }, @@ -3744,7 +5704,6 @@ func file_temporal_server_api_persistence_v1_executions_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_executions_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_executions_proto = out.File - file_temporal_server_api_persistence_v1_executions_proto_rawDesc = nil file_temporal_server_api_persistence_v1_executions_proto_goTypes = nil file_temporal_server_api_persistence_v1_executions_proto_depIdxs = nil } diff --git a/api/persistence/v1/history_tree.go-helpers.pb.go b/api/persistence/v1/history_tree.go-helpers.pb.go index 069db71cbf5..e87fc369d71 100644 --- a/api/persistence/v1/history_tree.go-helpers.pb.go +++ b/api/persistence/v1/history_tree.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence diff --git a/api/persistence/v1/history_tree.pb.go b/api/persistence/v1/history_tree.pb.go index 6ad192ea385..69bf7eda958 100644 --- a/api/persistence/v1/history_tree.pb.go +++ b/api/persistence/v1/history_tree.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -46,11 +25,8 @@ const ( // branch column type HistoryTreeInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BranchInfo *HistoryBranch `protobuf:"bytes,1,opt,name=branch_info,json=branchInfo,proto3" json:"branch_info,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + BranchInfo *HistoryBranch `protobuf:"bytes,1,opt,name=branch_info,json=branchInfo,proto3" json:"branch_info,omitempty"` // For fork operation to prevent race condition of leaking event data when forking branches fail. Also can be used for clean up leaked data. ForkTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=fork_time,json=forkTime,proto3" json:"fork_time,omitempty"` // For lookup back to workflow during debugging, also background cleanup when fork operation cannot finish self cleanup due to crash. @@ -58,16 +34,16 @@ type HistoryTreeInfo struct { // Deprecating branch token in favor of branch info. // // Deprecated: Marked as deprecated in temporal/server/api/persistence/v1/history_tree.proto. - BranchToken []byte `protobuf:"bytes,4,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + BranchToken []byte `protobuf:"bytes,4,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryTreeInfo) Reset() { *x = HistoryTreeInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryTreeInfo) String() string { @@ -78,7 +54,7 @@ func (*HistoryTreeInfo) ProtoMessage() {} func (x *HistoryTreeInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -124,22 +100,19 @@ func (x *HistoryTreeInfo) GetBranchToken() []byte { // For history persistence to serialize/deserialize branch details. type HistoryBranch struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TreeId string `protobuf:"bytes,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` + BranchId string `protobuf:"bytes,2,opt,name=branch_id,json=branchId,proto3" json:"branch_id,omitempty"` + Ancestors []*HistoryBranchRange `protobuf:"bytes,3,rep,name=ancestors,proto3" json:"ancestors,omitempty"` unknownFields protoimpl.UnknownFields - - TreeId string `protobuf:"bytes,1,opt,name=tree_id,json=treeId,proto3" json:"tree_id,omitempty"` - BranchId string `protobuf:"bytes,2,opt,name=branch_id,json=branchId,proto3" json:"branch_id,omitempty"` - Ancestors []*HistoryBranchRange `protobuf:"bytes,3,rep,name=ancestors,proto3" json:"ancestors,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HistoryBranch) Reset() { *x = HistoryBranch{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryBranch) String() string { @@ -150,7 +123,7 @@ func (*HistoryBranch) ProtoMessage() {} func (x *HistoryBranch) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -188,25 +161,22 @@ func (x *HistoryBranch) GetAncestors() []*HistoryBranchRange { // HistoryBranchRange represents a piece of range for a branch. type HistoryBranchRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // BranchId of original branch forked from. BranchId string `protobuf:"bytes,1,opt,name=branch_id,json=branchId,proto3" json:"branch_id,omitempty"` // Beginning node for the range, inclusive. BeginNodeId int64 `protobuf:"varint,2,opt,name=begin_node_id,json=beginNodeId,proto3" json:"begin_node_id,omitempty"` // Ending node for the range, exclusive. - EndNodeId int64 `protobuf:"varint,3,opt,name=end_node_id,json=endNodeId,proto3" json:"end_node_id,omitempty"` + EndNodeId int64 `protobuf:"varint,3,opt,name=end_node_id,json=endNodeId,proto3" json:"end_node_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryBranchRange) Reset() { *x = HistoryBranchRange{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryBranchRange) String() string { @@ -217,7 +187,7 @@ func (*HistoryBranchRange) ProtoMessage() {} func (x *HistoryBranchRange) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -255,66 +225,38 @@ func (x *HistoryBranchRange) GetEndNodeId() int64 { var File_temporal_server_api_persistence_v1_history_tree_proto protoreflect.FileDescriptor -var file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc = []byte{ - 0x0a, 0x35, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe7, 0x01, 0x0a, 0x0f, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, - 0x54, 0x72, 0x65, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x56, 0x0a, 0x0b, 0x62, 0x72, 0x61, 0x6e, 0x63, - 0x68, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x52, 0x0a, 0x62, 0x72, - 0x61, 0x6e, 0x63, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3b, 0x0a, 0x09, 0x66, - 0x6f, 0x72, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x66, 0x6f, 0x72, 0x6b, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x16, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x27, 0x0a, 0x0c, 0x62, - 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, - 0x42, 0x04, 0x18, 0x01, 0x68, 0x00, 0x52, 0x0b, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x22, 0xa7, 0x01, 0x0a, 0x0d, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x72, 0x61, - 0x6e, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x72, 0x65, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x72, 0x65, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1f, 0x0a, 0x09, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x58, - 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x72, 0x61, 0x6e, 0x63, 0x68, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x81, 0x01, 0x0a, 0x12, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x72, - 0x61, 0x6e, 0x63, 0x68, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x09, 0x62, 0x72, 0x61, 0x6e, - 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x72, 0x61, 0x6e, - 0x63, 0x68, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, 0x0d, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, - 0x62, 0x65, 0x67, 0x69, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x22, - 0x0a, 0x0b, 0x65, 0x6e, 0x64, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x42, 0x36, 0x5a, 0x34, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, - 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc = "" + + "\n" + + "5temporal/server/api/persistence/v1/history_tree.proto\x12\"temporal.server.api.persistence.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xd9\x01\n" + + "\x0fHistoryTreeInfo\x12R\n" + + "\vbranch_info\x18\x01 \x01(\v21.temporal.server.api.persistence.v1.HistoryBranchR\n" + + "branchInfo\x127\n" + + "\tfork_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\bforkTime\x12\x12\n" + + "\x04info\x18\x03 \x01(\tR\x04info\x12%\n" + + "\fbranch_token\x18\x04 \x01(\fB\x02\x18\x01R\vbranchToken\"\x9b\x01\n" + + "\rHistoryBranch\x12\x17\n" + + "\atree_id\x18\x01 \x01(\tR\x06treeId\x12\x1b\n" + + "\tbranch_id\x18\x02 \x01(\tR\bbranchId\x12T\n" + + "\tancestors\x18\x03 \x03(\v26.temporal.server.api.persistence.v1.HistoryBranchRangeR\tancestors\"u\n" + + "\x12HistoryBranchRange\x12\x1b\n" + + "\tbranch_id\x18\x01 \x01(\tR\bbranchId\x12\"\n" + + "\rbegin_node_id\x18\x02 \x01(\x03R\vbeginNodeId\x12\x1e\n" + + "\vend_node_id\x18\x03 \x01(\x03R\tendNodeIdB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" var ( file_temporal_server_api_persistence_v1_history_tree_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_history_tree_proto_rawDescData = file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc + file_temporal_server_api_persistence_v1_history_tree_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_history_tree_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_history_tree_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_history_tree_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_history_tree_proto_rawDescData) + file_temporal_server_api_persistence_v1_history_tree_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc), len(file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_history_tree_proto_rawDescData } var file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_temporal_server_api_persistence_v1_history_tree_proto_goTypes = []interface{}{ +var file_temporal_server_api_persistence_v1_history_tree_proto_goTypes = []any{ (*HistoryTreeInfo)(nil), // 0: temporal.server.api.persistence.v1.HistoryTreeInfo (*HistoryBranch)(nil), // 1: temporal.server.api.persistence.v1.HistoryBranch (*HistoryBranchRange)(nil), // 2: temporal.server.api.persistence.v1.HistoryBranchRange @@ -336,49 +278,11 @@ func file_temporal_server_api_persistence_v1_history_tree_proto_init() { if File_temporal_server_api_persistence_v1_history_tree_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryTreeInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryBranch); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryBranchRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc), len(file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc)), NumEnums: 0, NumMessages: 3, NumExtensions: 0, @@ -389,7 +293,6 @@ func file_temporal_server_api_persistence_v1_history_tree_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_history_tree_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_history_tree_proto = out.File - file_temporal_server_api_persistence_v1_history_tree_proto_rawDesc = nil file_temporal_server_api_persistence_v1_history_tree_proto_goTypes = nil file_temporal_server_api_persistence_v1_history_tree_proto_depIdxs = nil } diff --git a/api/persistence/v1/hsm.go-helpers.pb.go b/api/persistence/v1/hsm.go-helpers.pb.go index 2ca5c5d15f8..032d83c79a4 100644 --- a/api/persistence/v1/hsm.go-helpers.pb.go +++ b/api/persistence/v1/hsm.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence @@ -214,6 +190,43 @@ func (this *StateMachineTaskInfo) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type StateMachineTimerGroup to the protobuf v3 wire format +func (val *StateMachineTimerGroup) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StateMachineTimerGroup from the protobuf v3 wire format +func (val *StateMachineTimerGroup) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StateMachineTimerGroup) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StateMachineTimerGroup values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StateMachineTimerGroup) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StateMachineTimerGroup + switch t := that.(type) { + case *StateMachineTimerGroup: + that1 = t + case StateMachineTimerGroup: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type VersionedTransition to the protobuf v3 wire format func (val *VersionedTransition) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -250,3 +263,114 @@ func (this *VersionedTransition) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type StateMachineTombstoneBatch to the protobuf v3 wire format +func (val *StateMachineTombstoneBatch) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StateMachineTombstoneBatch from the protobuf v3 wire format +func (val *StateMachineTombstoneBatch) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StateMachineTombstoneBatch) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StateMachineTombstoneBatch values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StateMachineTombstoneBatch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StateMachineTombstoneBatch + switch t := that.(type) { + case *StateMachineTombstoneBatch: + that1 = t + case StateMachineTombstoneBatch: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StateMachineTombstone to the protobuf v3 wire format +func (val *StateMachineTombstone) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StateMachineTombstone from the protobuf v3 wire format +func (val *StateMachineTombstone) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StateMachineTombstone) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StateMachineTombstone values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StateMachineTombstone) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StateMachineTombstone + switch t := that.(type) { + case *StateMachineTombstone: + that1 = t + case StateMachineTombstone: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StateMachinePath to the protobuf v3 wire format +func (val *StateMachinePath) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StateMachinePath from the protobuf v3 wire format +func (val *StateMachinePath) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StateMachinePath) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StateMachinePath values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StateMachinePath) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StateMachinePath + switch t := that.(type) { + case *StateMachinePath: + that1 = t + case StateMachinePath: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/persistence/v1/hsm.pb.go b/api/persistence/v1/hsm.pb.go index fb01d0ffa7f..741d43af834 100644 --- a/api/persistence/v1/hsm.pb.go +++ b/api/persistence/v1/hsm.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2024 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,9 +9,11 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -45,25 +25,43 @@ const ( // A node in a hierarchical state machine tree. type StateMachineNode struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` + // Serialized data of the underlying state machine. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // Map of state machine type to a map of machines by ID. + Children map[string]*StateMachineMap `protobuf:"bytes,2,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Versioned transition when the node was instantiated. + // This field, plus node path uniquely identifies a state machine node in a mutable state instance. + // This field will always be set even when transition history is disabled. + // NOTE: If transition history is disabled, the transition_count field will be 0 and + // cannot be used to uniquely identify a node. + // NOTE: Node deletion is not yet implemented at the time of writing so we can still uniquely identify a node just + // with the initial namespace failover version. + InitialVersionedTransition *VersionedTransition `protobuf:"bytes,3,opt,name=initial_versioned_transition,json=initialVersionedTransition,proto3" json:"initial_versioned_transition,omitempty"` + // Versioned transition when the node was last updated. + // This field will always be set even when transition history is disabled. + // NOTE: If transition history is disabled, the transition_count field will be 0 and + // cannot be used for non-concurrent task staleness check or to determine whether this node should be synced + // during state replication. + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,4,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` // Number of transitions on this state machine object. // Used to verify that a task is not stale if the state machine does not allow concurrent task execution. - TransitionCount int64 `protobuf:"varint,1,opt,name=transition_count,json=transitionCount,proto3" json:"transition_count,omitempty"` - // Seralized data of the underlying state machine. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Children map[int32]*StateMachineMap `protobuf:"bytes,3,rep,name=children,proto3" json:"children,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The transition count monotonically increases with each state transition and only resets when the entire + // mutable state was rebuilt. This case is handled by the task_generation_shard_clock_timestamp field in + // WorkflowExecutionInfo. + // NOTE: This field is cluster specific and cannot be replicated. + // NOTE: This field will be made obsolete when transition history is enabled in favor of + // last_update_versioned_transition. + TransitionCount int64 `protobuf:"varint,100,opt,name=transition_count,json=transitionCount,proto3" json:"transition_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StateMachineNode) Reset() { *x = StateMachineNode{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StateMachineNode) String() string { @@ -74,7 +72,7 @@ func (*StateMachineNode) ProtoMessage() {} func (x *StateMachineNode) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -89,46 +87,57 @@ func (*StateMachineNode) Descriptor() ([]byte, []int) { return file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP(), []int{0} } -func (x *StateMachineNode) GetTransitionCount() int64 { +func (x *StateMachineNode) GetData() []byte { if x != nil { - return x.TransitionCount + return x.Data } - return 0 + return nil } -func (x *StateMachineNode) GetData() []byte { +func (x *StateMachineNode) GetChildren() map[string]*StateMachineMap { if x != nil { - return x.Data + return x.Children } return nil } -func (x *StateMachineNode) GetChildren() map[int32]*StateMachineMap { +func (x *StateMachineNode) GetInitialVersionedTransition() *VersionedTransition { if x != nil { - return x.Children + return x.InitialVersionedTransition } return nil } +func (x *StateMachineNode) GetLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.LastUpdateVersionedTransition + } + return nil +} + +func (x *StateMachineNode) GetTransitionCount() int64 { + if x != nil { + return x.TransitionCount + } + return 0 +} + // Map of state machine ID to StateMachineNode. type StateMachineMap struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // (-- api-linter: core::0140::prepositions=disabled // // aip.dev/not-precedent: "by" is used to clarify the keys and values. --) - MachinesById map[string]*StateMachineNode `protobuf:"bytes,1,rep,name=machines_by_id,json=machinesById,proto3" json:"machines_by_id,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + MachinesById map[string]*StateMachineNode `protobuf:"bytes,1,rep,name=machines_by_id,json=machinesById,proto3" json:"machines_by_id,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StateMachineMap) Reset() { *x = StateMachineMap{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StateMachineMap) String() string { @@ -139,7 +148,7 @@ func (*StateMachineMap) ProtoMessage() {} func (x *StateMachineMap) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -162,23 +171,20 @@ func (x *StateMachineMap) GetMachinesById() map[string]*StateMachineNode { } type StateMachineKey struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Addressable type of the corresponding state machine in a single tree level. - Type int32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Addressable ID of the corresponding state machine in a single tree level. - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StateMachineKey) Reset() { *x = StateMachineKey{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StateMachineKey) String() string { @@ -189,7 +195,7 @@ func (*StateMachineKey) ProtoMessage() {} func (x *StateMachineKey) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -204,11 +210,11 @@ func (*StateMachineKey) Descriptor() ([]byte, []int) { return file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP(), []int{2} } -func (x *StateMachineKey) GetType() int32 { +func (x *StateMachineKey) GetType() string { if x != nil { return x.Type } - return 0 + return "" } func (x *StateMachineKey) GetId() string { @@ -220,31 +226,43 @@ func (x *StateMachineKey) GetId() string { // A reference to a state machine at a point in time. type StateMachineRef struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Nested path to a state machine. Path []*StateMachineKey `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` - // Namespace failover version on the corresponding mutable state object, used for staleness detection when global - // namespaces are enabled. - MutableStateNamespaceFailoverVersion int64 `protobuf:"varint,2,opt,name=mutable_state_namespace_failover_version,json=mutableStateNamespaceFailoverVersion,proto3" json:"mutable_state_namespace_failover_version,omitempty"` - // Number of transitions on the corresponding mutable state object. Used to verify that a task is not referencing a - // stale state or, in some situations, that the task itself is not stale. - MutableStateTransitionCount int64 `protobuf:"varint,3,opt,name=mutable_state_transition_count,json=mutableStateTransitionCount,proto3" json:"mutable_state_transition_count,omitempty"` + // Versioned transition of the ref was instantiated. + // Used to verify that the ref is not referencing a stale state or, in some situations, + // that the ref itself is not stale. + // NOTE: If transition history is disabled, the field will not be specified and + // cannot be used for staleness check. + MutableStateVersionedTransition *VersionedTransition `protobuf:"bytes,2,opt,name=mutable_state_versioned_transition,json=mutableStateVersionedTransition,proto3" json:"mutable_state_versioned_transition,omitempty"` + // Versioned transition when the state machine node was instantiated. + // This field, plus node path uniquely identifies a state machine node in a mutable state instance. + // This field will always be set even when transition history is disabled. + // NOTE: If transition history is disabled, the transition_count field will be 0 and + // cannot be used to uniquely identify a node. + // NOTE: Node deletion is not yet implemented at the time of writing so we can still uniquely identify a node just + // with the initial namespace failover version. + MachineInitialVersionedTransition *VersionedTransition `protobuf:"bytes,3,opt,name=machine_initial_versioned_transition,json=machineInitialVersionedTransition,proto3" json:"machine_initial_versioned_transition,omitempty"` + // Versioned transition when the state machine node was last updated. + // If not specified, this reference is considered non-concurrent, + // and should match the last_update_versioned_transition on the corresponding state machine node. + // NOTE: If transition history is disabled, the transition_count field will be 0 and + // cannot be used for non-concurrent task staleness check. + MachineLastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,4,opt,name=machine_last_update_versioned_transition,json=machineLastUpdateVersionedTransition,proto3" json:"machine_last_update_versioned_transition,omitempty"` // Number of transitions executed on the referenced state machine node at the time this Ref is instantiated. - // If non-zero, the state machine is assumed to support only non-concurrent tasks, and this number should match the - // number of state transitions on the corresponding state machine object. - MachineTransitionCount int64 `protobuf:"varint,4,opt,name=machine_transition_count,json=machineTransitionCount,proto3" json:"machine_transition_count,omitempty"` + // If non-zero, this reference is considered non-concurrent and this number should match the number of state + // transitions on the corresponding state machine node. + // This field will be obsolete once mutable state transition history is productionized. + MachineTransitionCount int64 `protobuf:"varint,100,opt,name=machine_transition_count,json=machineTransitionCount,proto3" json:"machine_transition_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StateMachineRef) Reset() { *x = StateMachineRef{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StateMachineRef) String() string { @@ -255,7 +273,7 @@ func (*StateMachineRef) ProtoMessage() {} func (x *StateMachineRef) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -277,18 +295,25 @@ func (x *StateMachineRef) GetPath() []*StateMachineKey { return nil } -func (x *StateMachineRef) GetMutableStateNamespaceFailoverVersion() int64 { +func (x *StateMachineRef) GetMutableStateVersionedTransition() *VersionedTransition { if x != nil { - return x.MutableStateNamespaceFailoverVersion + return x.MutableStateVersionedTransition } - return 0 + return nil } -func (x *StateMachineRef) GetMutableStateTransitionCount() int64 { +func (x *StateMachineRef) GetMachineInitialVersionedTransition() *VersionedTransition { if x != nil { - return x.MutableStateTransitionCount + return x.MachineInitialVersionedTransition } - return 0 + return nil +} + +func (x *StateMachineRef) GetMachineLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.MachineLastUpdateVersionedTransition + } + return nil } func (x *StateMachineRef) GetMachineTransitionCount() int64 { @@ -299,25 +324,22 @@ func (x *StateMachineRef) GetMachineTransitionCount() int64 { } type StateMachineTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Reference to a state machine. Ref *StateMachineRef `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` // Task type. Not to be confused with the state machine's type in the `ref` field. - Type int32 `protobuf:"varint,2,opt,name=type,proto3" json:"type,omitempty"` + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` // Opaque data attached to this task. May be nil. Deserialized by a registered TaskSerializer for this type. - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StateMachineTaskInfo) Reset() { *x = StateMachineTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StateMachineTaskInfo) String() string { @@ -328,7 +350,7 @@ func (*StateMachineTaskInfo) ProtoMessage() {} func (x *StateMachineTaskInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -350,11 +372,11 @@ func (x *StateMachineTaskInfo) GetRef() *StateMachineRef { return nil } -func (x *StateMachineTaskInfo) GetType() int32 { +func (x *StateMachineTaskInfo) GetType() string { if x != nil { return x.Type } - return 0 + return "" } func (x *StateMachineTaskInfo) GetData() []byte { @@ -364,34 +386,89 @@ func (x *StateMachineTaskInfo) GetData() []byte { return nil } -// Keeps track of the ranges of transition counts per namespace failover version. -// Each task generated by the HSM framework is imprinted with the current transaction’s `NamespaceFailoverVersion` and -// `MaxTransitionCount` at the end of the transaction. -// When a task is being processed, the `StateTransitionHistory` is compared with the imprinted task information to -// verify that a task is not referencing a stale state or that the task itself is not stale. -// For example, if the state has a history of `[{v: 1, t: 3}, {v: 2, t: 5}]`, task A `{v: 2, t: 4}` **is not** -// referencing stale state because for version `2` transitions `4-5` are valid, while task B `{v: 2, t: 6}` **is** -// referencing stale state because the transition count is out of range for version `2`. -// Furthermore, task C `{v: 1, t: 4}` itself is stale because it is referencing an impossible state, likely due to post -// split-brain reconciliation. -type VersionedTransition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// A group of state machine timer tasks for a given deadline, used for collapsing state machine timer tasks. +type StateMachineTimerGroup struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Task information. + Infos []*StateMachineTaskInfo `protobuf:"bytes,1,rep,name=infos,proto3" json:"infos,omitempty"` + // When this timer should be fired. + // (-- api-linter: core::0142::time-field-names=disabled + // + // aip.dev/not-precedent: Ignoring lint rules. --) + Deadline *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=deadline,proto3" json:"deadline,omitempty"` + // Whether or not a task was put in the queue for this group's deadline. + Scheduled bool `protobuf:"varint,3,opt,name=scheduled,proto3" json:"scheduled,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StateMachineTimerGroup) Reset() { + *x = StateMachineTimerGroup{} + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StateMachineTimerGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateMachineTimerGroup) ProtoMessage() {} + +func (x *StateMachineTimerGroup) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateMachineTimerGroup.ProtoReflect.Descriptor instead. +func (*StateMachineTimerGroup) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP(), []int{5} +} + +func (x *StateMachineTimerGroup) GetInfos() []*StateMachineTaskInfo { + if x != nil { + return x.Infos + } + return nil +} +func (x *StateMachineTimerGroup) GetDeadline() *timestamppb.Timestamp { + if x != nil { + return x.Deadline + } + return nil +} + +func (x *StateMachineTimerGroup) GetScheduled() bool { + if x != nil { + return x.Scheduled + } + return false +} + +// VersionedTransition is a unique identifier for a specific mutable state transition. +type VersionedTransition struct { + state protoimpl.MessageState `protogen:"open.v1"` // The namespace failover version at transition time. NamespaceFailoverVersion int64 `protobuf:"varint,1,opt,name=namespace_failover_version,json=namespaceFailoverVersion,proto3" json:"namespace_failover_version,omitempty"` - // Maximum state transition count perceived during the specified namespace_failover_version. - MaxTransitionCount int64 `protobuf:"varint,2,opt,name=max_transition_count,json=maxTransitionCount,proto3" json:"max_transition_count,omitempty"` + // State transition count perceived during the specified namespace_failover_version. + TransitionCount int64 `protobuf:"varint,2,opt,name=transition_count,json=transitionCount,proto3" json:"transition_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *VersionedTransition) Reset() { *x = VersionedTransition{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VersionedTransition) String() string { @@ -401,8 +478,8 @@ func (x *VersionedTransition) String() string { func (*VersionedTransition) ProtoMessage() {} func (x *VersionedTransition) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -414,7 +491,7 @@ func (x *VersionedTransition) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionedTransition.ProtoReflect.Descriptor instead. func (*VersionedTransition) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP(), []int{5} + return file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP(), []int{6} } func (x *VersionedTransition) GetNamespaceFailoverVersion() int64 { @@ -424,136 +501,397 @@ func (x *VersionedTransition) GetNamespaceFailoverVersion() int64 { return 0 } -func (x *VersionedTransition) GetMaxTransitionCount() int64 { +func (x *VersionedTransition) GetTransitionCount() int64 { if x != nil { - return x.MaxTransitionCount + return x.TransitionCount } return 0 } -var File_temporal_server_api_persistence_v1_hsm_proto protoreflect.FileDescriptor +type StateMachineTombstoneBatch struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The versioned transition in which the tombstones were created. + VersionedTransition *VersionedTransition `protobuf:"bytes,1,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + StateMachineTombstones []*StateMachineTombstone `protobuf:"bytes,2,rep,name=state_machine_tombstones,json=stateMachineTombstones,proto3" json:"state_machine_tombstones,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StateMachineTombstoneBatch) Reset() { + *x = StateMachineTombstoneBatch{} + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StateMachineTombstoneBatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateMachineTombstoneBatch) ProtoMessage() {} + +func (x *StateMachineTombstoneBatch) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateMachineTombstoneBatch.ProtoReflect.Descriptor instead. +func (*StateMachineTombstoneBatch) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP(), []int{7} +} -var file_temporal_server_api_persistence_v1_hsm_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x68, 0x73, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, 0xb7, 0x02, - 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4e, 0x6f, 0x64, - 0x65, 0x12, 0x2d, 0x0a, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x16, 0x0a, - 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x62, 0x0a, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, - 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, - 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x1a, 0x78, 0x0a, 0x0d, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, - 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4d, 0x61, 0x70, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x81, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, - 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4d, 0x61, 0x70, 0x12, 0x6f, 0x0a, 0x0e, 0x6d, 0x61, 0x63, - 0x68, 0x69, 0x6e, 0x65, 0x73, 0x5f, 0x62, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x45, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4d, - 0x61, 0x70, 0x2e, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x42, 0x79, 0x49, 0x64, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x42, 0x79, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x1a, 0x7d, 0x0a, 0x11, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x73, 0x42, - 0x79, 0x49, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3d, 0x0a, 0x0f, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x12, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc1, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x66, 0x12, 0x4b, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x4b, 0x65, 0x79, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x5a, 0x0a, 0x28, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x66, 0x61, - 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x24, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x1e, 0x6d, 0x75, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x1b, 0x6d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x72, 0x61, - 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x3c, 0x0a, 0x18, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x16, 0x6d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x22, 0x91, 0x01, 0x0a, 0x14, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x49, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x66, - 0x52, 0x03, 0x72, 0x65, 0x66, 0x42, 0x02, 0x68, 0x00, 0x12, 0x16, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x16, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, - 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, 0x22, 0x8d, 0x01, 0x0a, 0x13, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x64, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x1a, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, - 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, - 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, 0x6d, - 0x61, 0x78, 0x5f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x54, 0x72, 0x61, 0x6e, - 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x02, 0x68, 0x00, 0x42, 0x36, - 0x5a, 0x34, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (x *StateMachineTombstoneBatch) GetVersionedTransition() *VersionedTransition { + if x != nil { + return x.VersionedTransition + } + return nil +} + +func (x *StateMachineTombstoneBatch) GetStateMachineTombstones() []*StateMachineTombstone { + if x != nil { + return x.StateMachineTombstones + } + return nil +} + +type StateMachineTombstone struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to StateMachineKey: + // + // *StateMachineTombstone_ActivityScheduledEventId + // *StateMachineTombstone_TimerId + // *StateMachineTombstone_ChildExecutionInitiatedEventId + // *StateMachineTombstone_RequestCancelInitiatedEventId + // *StateMachineTombstone_SignalExternalInitiatedEventId + // *StateMachineTombstone_UpdateId + // *StateMachineTombstone_StateMachinePath + // *StateMachineTombstone_ChasmNodePath + StateMachineKey isStateMachineTombstone_StateMachineKey `protobuf_oneof:"state_machine_key"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StateMachineTombstone) Reset() { + *x = StateMachineTombstone{} + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StateMachineTombstone) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateMachineTombstone) ProtoMessage() {} + +func (x *StateMachineTombstone) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateMachineTombstone.ProtoReflect.Descriptor instead. +func (*StateMachineTombstone) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP(), []int{8} +} + +func (x *StateMachineTombstone) GetStateMachineKey() isStateMachineTombstone_StateMachineKey { + if x != nil { + return x.StateMachineKey + } + return nil +} + +func (x *StateMachineTombstone) GetActivityScheduledEventId() int64 { + if x != nil { + if x, ok := x.StateMachineKey.(*StateMachineTombstone_ActivityScheduledEventId); ok { + return x.ActivityScheduledEventId + } + } + return 0 +} + +func (x *StateMachineTombstone) GetTimerId() string { + if x != nil { + if x, ok := x.StateMachineKey.(*StateMachineTombstone_TimerId); ok { + return x.TimerId + } + } + return "" +} + +func (x *StateMachineTombstone) GetChildExecutionInitiatedEventId() int64 { + if x != nil { + if x, ok := x.StateMachineKey.(*StateMachineTombstone_ChildExecutionInitiatedEventId); ok { + return x.ChildExecutionInitiatedEventId + } + } + return 0 +} + +func (x *StateMachineTombstone) GetRequestCancelInitiatedEventId() int64 { + if x != nil { + if x, ok := x.StateMachineKey.(*StateMachineTombstone_RequestCancelInitiatedEventId); ok { + return x.RequestCancelInitiatedEventId + } + } + return 0 +} + +func (x *StateMachineTombstone) GetSignalExternalInitiatedEventId() int64 { + if x != nil { + if x, ok := x.StateMachineKey.(*StateMachineTombstone_SignalExternalInitiatedEventId); ok { + return x.SignalExternalInitiatedEventId + } + } + return 0 +} + +func (x *StateMachineTombstone) GetUpdateId() string { + if x != nil { + if x, ok := x.StateMachineKey.(*StateMachineTombstone_UpdateId); ok { + return x.UpdateId + } + } + return "" +} + +func (x *StateMachineTombstone) GetStateMachinePath() *StateMachinePath { + if x != nil { + if x, ok := x.StateMachineKey.(*StateMachineTombstone_StateMachinePath); ok { + return x.StateMachinePath + } + } + return nil } +func (x *StateMachineTombstone) GetChasmNodePath() string { + if x != nil { + if x, ok := x.StateMachineKey.(*StateMachineTombstone_ChasmNodePath); ok { + return x.ChasmNodePath + } + } + return "" +} + +type isStateMachineTombstone_StateMachineKey interface { + isStateMachineTombstone_StateMachineKey() +} + +type StateMachineTombstone_ActivityScheduledEventId struct { + ActivityScheduledEventId int64 `protobuf:"varint,1,opt,name=activity_scheduled_event_id,json=activityScheduledEventId,proto3,oneof"` +} + +type StateMachineTombstone_TimerId struct { + TimerId string `protobuf:"bytes,2,opt,name=timer_id,json=timerId,proto3,oneof"` +} + +type StateMachineTombstone_ChildExecutionInitiatedEventId struct { + ChildExecutionInitiatedEventId int64 `protobuf:"varint,3,opt,name=child_execution_initiated_event_id,json=childExecutionInitiatedEventId,proto3,oneof"` +} + +type StateMachineTombstone_RequestCancelInitiatedEventId struct { + RequestCancelInitiatedEventId int64 `protobuf:"varint,4,opt,name=request_cancel_initiated_event_id,json=requestCancelInitiatedEventId,proto3,oneof"` +} + +type StateMachineTombstone_SignalExternalInitiatedEventId struct { + SignalExternalInitiatedEventId int64 `protobuf:"varint,5,opt,name=signal_external_initiated_event_id,json=signalExternalInitiatedEventId,proto3,oneof"` +} + +type StateMachineTombstone_UpdateId struct { + UpdateId string `protobuf:"bytes,6,opt,name=update_id,json=updateId,proto3,oneof"` +} + +type StateMachineTombstone_StateMachinePath struct { + StateMachinePath *StateMachinePath `protobuf:"bytes,7,opt,name=state_machine_path,json=stateMachinePath,proto3,oneof"` +} + +type StateMachineTombstone_ChasmNodePath struct { + ChasmNodePath string `protobuf:"bytes,8,opt,name=chasm_node_path,json=chasmNodePath,proto3,oneof"` +} + +func (*StateMachineTombstone_ActivityScheduledEventId) isStateMachineTombstone_StateMachineKey() {} + +func (*StateMachineTombstone_TimerId) isStateMachineTombstone_StateMachineKey() {} + +func (*StateMachineTombstone_ChildExecutionInitiatedEventId) isStateMachineTombstone_StateMachineKey() { +} + +func (*StateMachineTombstone_RequestCancelInitiatedEventId) isStateMachineTombstone_StateMachineKey() { +} + +func (*StateMachineTombstone_SignalExternalInitiatedEventId) isStateMachineTombstone_StateMachineKey() { +} + +func (*StateMachineTombstone_UpdateId) isStateMachineTombstone_StateMachineKey() {} + +func (*StateMachineTombstone_StateMachinePath) isStateMachineTombstone_StateMachineKey() {} + +func (*StateMachineTombstone_ChasmNodePath) isStateMachineTombstone_StateMachineKey() {} + +type StateMachinePath struct { + state protoimpl.MessageState `protogen:"open.v1"` + Path []*StateMachineKey `protobuf:"bytes,1,rep,name=path,proto3" json:"path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StateMachinePath) Reset() { + *x = StateMachinePath{} + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StateMachinePath) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateMachinePath) ProtoMessage() {} + +func (x *StateMachinePath) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateMachinePath.ProtoReflect.Descriptor instead. +func (*StateMachinePath) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP(), []int{9} +} + +func (x *StateMachinePath) GetPath() []*StateMachineKey { + if x != nil { + return x.Path + } + return nil +} + +var File_temporal_server_api_persistence_v1_hsm_proto protoreflect.FileDescriptor + +const file_temporal_server_api_persistence_v1_hsm_proto_rawDesc = "" + + "\n" + + ",temporal/server/api/persistence/v1/hsm.proto\x12\"temporal.server.api.persistence.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xa1\x04\n" + + "\x10StateMachineNode\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\x12^\n" + + "\bchildren\x18\x02 \x03(\v2B.temporal.server.api.persistence.v1.StateMachineNode.ChildrenEntryR\bchildren\x12y\n" + + "\x1cinitial_versioned_transition\x18\x03 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1ainitialVersionedTransition\x12\x80\x01\n" + + " last_update_versioned_transition\x18\x04 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransition\x12)\n" + + "\x10transition_count\x18d \x01(\x03R\x0ftransitionCount\x1ap\n" + + "\rChildrenEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12I\n" + + "\x05value\x18\x02 \x01(\v23.temporal.server.api.persistence.v1.StateMachineMapR\x05value:\x028\x01\"\xf5\x01\n" + + "\x0fStateMachineMap\x12k\n" + + "\x0emachines_by_id\x18\x01 \x03(\v2E.temporal.server.api.persistence.v1.StateMachineMap.MachinesByIdEntryR\fmachinesById\x1au\n" + + "\x11MachinesByIdEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12J\n" + + "\x05value\x18\x02 \x01(\v24.temporal.server.api.persistence.v1.StateMachineNodeR\x05value:\x028\x01\"5\n" + + "\x0fStateMachineKey\x12\x12\n" + + "\x04type\x18\x01 \x01(\tR\x04type\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\"\xb8\x04\n" + + "\x0fStateMachineRef\x12G\n" + + "\x04path\x18\x01 \x03(\v23.temporal.server.api.persistence.v1.StateMachineKeyR\x04path\x12\x84\x01\n" + + "\"mutable_state_versioned_transition\x18\x02 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1fmutableStateVersionedTransition\x12\x88\x01\n" + + "$machine_initial_versioned_transition\x18\x03 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR!machineInitialVersionedTransition\x12\x8f\x01\n" + + "(machine_last_update_versioned_transition\x18\x04 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR$machineLastUpdateVersionedTransition\x128\n" + + "\x18machine_transition_count\x18d \x01(\x03R\x16machineTransitionCount\"\x85\x01\n" + + "\x14StateMachineTaskInfo\x12E\n" + + "\x03ref\x18\x01 \x01(\v23.temporal.server.api.persistence.v1.StateMachineRefR\x03ref\x12\x12\n" + + "\x04type\x18\x02 \x01(\tR\x04type\x12\x12\n" + + "\x04data\x18\x03 \x01(\fR\x04data\"\xbe\x01\n" + + "\x16StateMachineTimerGroup\x12N\n" + + "\x05infos\x18\x01 \x03(\v28.temporal.server.api.persistence.v1.StateMachineTaskInfoR\x05infos\x126\n" + + "\bdeadline\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\bdeadline\x12\x1c\n" + + "\tscheduled\x18\x03 \x01(\bR\tscheduled\"~\n" + + "\x13VersionedTransition\x12<\n" + + "\x1anamespace_failover_version\x18\x01 \x01(\x03R\x18namespaceFailoverVersion\x12)\n" + + "\x10transition_count\x18\x02 \x01(\x03R\x0ftransitionCount\"\xfd\x01\n" + + "\x1aStateMachineTombstoneBatch\x12j\n" + + "\x14versioned_transition\x18\x01 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransition\x12s\n" + + "\x18state_machine_tombstones\x18\x02 \x03(\v29.temporal.server.api.persistence.v1.StateMachineTombstoneR\x16stateMachineTombstones\"\xa1\x04\n" + + "\x15StateMachineTombstone\x12?\n" + + "\x1bactivity_scheduled_event_id\x18\x01 \x01(\x03H\x00R\x18activityScheduledEventId\x12\x1b\n" + + "\btimer_id\x18\x02 \x01(\tH\x00R\atimerId\x12L\n" + + "\"child_execution_initiated_event_id\x18\x03 \x01(\x03H\x00R\x1echildExecutionInitiatedEventId\x12J\n" + + "!request_cancel_initiated_event_id\x18\x04 \x01(\x03H\x00R\x1drequestCancelInitiatedEventId\x12L\n" + + "\"signal_external_initiated_event_id\x18\x05 \x01(\x03H\x00R\x1esignalExternalInitiatedEventId\x12\x1d\n" + + "\tupdate_id\x18\x06 \x01(\tH\x00R\bupdateId\x12d\n" + + "\x12state_machine_path\x18\a \x01(\v24.temporal.server.api.persistence.v1.StateMachinePathH\x00R\x10stateMachinePath\x12(\n" + + "\x0fchasm_node_path\x18\b \x01(\tH\x00R\rchasmNodePathB\x13\n" + + "\x11state_machine_key\"[\n" + + "\x10StateMachinePath\x12G\n" + + "\x04path\x18\x01 \x03(\v23.temporal.server.api.persistence.v1.StateMachineKeyR\x04pathB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" + var ( file_temporal_server_api_persistence_v1_hsm_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_hsm_proto_rawDescData = file_temporal_server_api_persistence_v1_hsm_proto_rawDesc + file_temporal_server_api_persistence_v1_hsm_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_hsm_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_hsm_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_hsm_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_hsm_proto_rawDescData) + file_temporal_server_api_persistence_v1_hsm_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_hsm_proto_rawDesc), len(file_temporal_server_api_persistence_v1_hsm_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_hsm_proto_rawDescData } -var file_temporal_server_api_persistence_v1_hsm_proto_msgTypes = make([]protoimpl.MessageInfo, 8) -var file_temporal_server_api_persistence_v1_hsm_proto_goTypes = []interface{}{ - (*StateMachineNode)(nil), // 0: temporal.server.api.persistence.v1.StateMachineNode - (*StateMachineMap)(nil), // 1: temporal.server.api.persistence.v1.StateMachineMap - (*StateMachineKey)(nil), // 2: temporal.server.api.persistence.v1.StateMachineKey - (*StateMachineRef)(nil), // 3: temporal.server.api.persistence.v1.StateMachineRef - (*StateMachineTaskInfo)(nil), // 4: temporal.server.api.persistence.v1.StateMachineTaskInfo - (*VersionedTransition)(nil), // 5: temporal.server.api.persistence.v1.VersionedTransition - nil, // 6: temporal.server.api.persistence.v1.StateMachineNode.ChildrenEntry - nil, // 7: temporal.server.api.persistence.v1.StateMachineMap.MachinesByIdEntry +var file_temporal_server_api_persistence_v1_hsm_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_temporal_server_api_persistence_v1_hsm_proto_goTypes = []any{ + (*StateMachineNode)(nil), // 0: temporal.server.api.persistence.v1.StateMachineNode + (*StateMachineMap)(nil), // 1: temporal.server.api.persistence.v1.StateMachineMap + (*StateMachineKey)(nil), // 2: temporal.server.api.persistence.v1.StateMachineKey + (*StateMachineRef)(nil), // 3: temporal.server.api.persistence.v1.StateMachineRef + (*StateMachineTaskInfo)(nil), // 4: temporal.server.api.persistence.v1.StateMachineTaskInfo + (*StateMachineTimerGroup)(nil), // 5: temporal.server.api.persistence.v1.StateMachineTimerGroup + (*VersionedTransition)(nil), // 6: temporal.server.api.persistence.v1.VersionedTransition + (*StateMachineTombstoneBatch)(nil), // 7: temporal.server.api.persistence.v1.StateMachineTombstoneBatch + (*StateMachineTombstone)(nil), // 8: temporal.server.api.persistence.v1.StateMachineTombstone + (*StateMachinePath)(nil), // 9: temporal.server.api.persistence.v1.StateMachinePath + nil, // 10: temporal.server.api.persistence.v1.StateMachineNode.ChildrenEntry + nil, // 11: temporal.server.api.persistence.v1.StateMachineMap.MachinesByIdEntry + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp } var file_temporal_server_api_persistence_v1_hsm_proto_depIdxs = []int32{ - 6, // 0: temporal.server.api.persistence.v1.StateMachineNode.children:type_name -> temporal.server.api.persistence.v1.StateMachineNode.ChildrenEntry - 7, // 1: temporal.server.api.persistence.v1.StateMachineMap.machines_by_id:type_name -> temporal.server.api.persistence.v1.StateMachineMap.MachinesByIdEntry - 2, // 2: temporal.server.api.persistence.v1.StateMachineRef.path:type_name -> temporal.server.api.persistence.v1.StateMachineKey - 3, // 3: temporal.server.api.persistence.v1.StateMachineTaskInfo.ref:type_name -> temporal.server.api.persistence.v1.StateMachineRef - 1, // 4: temporal.server.api.persistence.v1.StateMachineNode.ChildrenEntry.value:type_name -> temporal.server.api.persistence.v1.StateMachineMap - 0, // 5: temporal.server.api.persistence.v1.StateMachineMap.MachinesByIdEntry.value:type_name -> temporal.server.api.persistence.v1.StateMachineNode - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 10, // 0: temporal.server.api.persistence.v1.StateMachineNode.children:type_name -> temporal.server.api.persistence.v1.StateMachineNode.ChildrenEntry + 6, // 1: temporal.server.api.persistence.v1.StateMachineNode.initial_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 6, // 2: temporal.server.api.persistence.v1.StateMachineNode.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 11, // 3: temporal.server.api.persistence.v1.StateMachineMap.machines_by_id:type_name -> temporal.server.api.persistence.v1.StateMachineMap.MachinesByIdEntry + 2, // 4: temporal.server.api.persistence.v1.StateMachineRef.path:type_name -> temporal.server.api.persistence.v1.StateMachineKey + 6, // 5: temporal.server.api.persistence.v1.StateMachineRef.mutable_state_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 6, // 6: temporal.server.api.persistence.v1.StateMachineRef.machine_initial_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 6, // 7: temporal.server.api.persistence.v1.StateMachineRef.machine_last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 3, // 8: temporal.server.api.persistence.v1.StateMachineTaskInfo.ref:type_name -> temporal.server.api.persistence.v1.StateMachineRef + 4, // 9: temporal.server.api.persistence.v1.StateMachineTimerGroup.infos:type_name -> temporal.server.api.persistence.v1.StateMachineTaskInfo + 12, // 10: temporal.server.api.persistence.v1.StateMachineTimerGroup.deadline:type_name -> google.protobuf.Timestamp + 6, // 11: temporal.server.api.persistence.v1.StateMachineTombstoneBatch.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 8, // 12: temporal.server.api.persistence.v1.StateMachineTombstoneBatch.state_machine_tombstones:type_name -> temporal.server.api.persistence.v1.StateMachineTombstone + 9, // 13: temporal.server.api.persistence.v1.StateMachineTombstone.state_machine_path:type_name -> temporal.server.api.persistence.v1.StateMachinePath + 2, // 14: temporal.server.api.persistence.v1.StateMachinePath.path:type_name -> temporal.server.api.persistence.v1.StateMachineKey + 1, // 15: temporal.server.api.persistence.v1.StateMachineNode.ChildrenEntry.value:type_name -> temporal.server.api.persistence.v1.StateMachineMap + 0, // 16: temporal.server.api.persistence.v1.StateMachineMap.MachinesByIdEntry.value:type_name -> temporal.server.api.persistence.v1.StateMachineNode + 17, // [17:17] is the sub-list for method output_type + 17, // [17:17] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name } func init() { file_temporal_server_api_persistence_v1_hsm_proto_init() } @@ -561,87 +899,23 @@ func file_temporal_server_api_persistence_v1_hsm_proto_init() { if File_temporal_server_api_persistence_v1_hsm_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateMachineNode); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateMachineMap); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateMachineKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateMachineRef); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateMachineTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionedTransition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } + file_temporal_server_api_persistence_v1_hsm_proto_msgTypes[8].OneofWrappers = []any{ + (*StateMachineTombstone_ActivityScheduledEventId)(nil), + (*StateMachineTombstone_TimerId)(nil), + (*StateMachineTombstone_ChildExecutionInitiatedEventId)(nil), + (*StateMachineTombstone_RequestCancelInitiatedEventId)(nil), + (*StateMachineTombstone_SignalExternalInitiatedEventId)(nil), + (*StateMachineTombstone_UpdateId)(nil), + (*StateMachineTombstone_StateMachinePath)(nil), + (*StateMachineTombstone_ChasmNodePath)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_hsm_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_hsm_proto_rawDesc), len(file_temporal_server_api_persistence_v1_hsm_proto_rawDesc)), NumEnums: 0, - NumMessages: 8, + NumMessages: 12, NumExtensions: 0, NumServices: 0, }, @@ -650,7 +924,6 @@ func file_temporal_server_api_persistence_v1_hsm_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_hsm_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_hsm_proto = out.File - file_temporal_server_api_persistence_v1_hsm_proto_rawDesc = nil file_temporal_server_api_persistence_v1_hsm_proto_goTypes = nil file_temporal_server_api_persistence_v1_hsm_proto_depIdxs = nil } diff --git a/api/persistence/v1/namespaces.go-helpers.pb.go b/api/persistence/v1/namespaces.go-helpers.pb.go index 146072818c5..ec3414b7059 100644 --- a/api/persistence/v1/namespaces.go-helpers.pb.go +++ b/api/persistence/v1/namespaces.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence diff --git a/api/persistence/v1/namespaces.pb.go b/api/persistence/v1/namespaces.pb.go index 5431161945f..8ff5784f199 100644 --- a/api/persistence/v1/namespaces.pb.go +++ b/api/persistence/v1/namespaces.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,9 +9,11 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/api/enums/v1" v11 "go.temporal.io/api/namespace/v1" + v12 "go.temporal.io/api/rules/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -49,10 +29,7 @@ const ( // detail column type NamespaceDetail struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Info *NamespaceInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` Config *NamespaceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` ReplicationConfig *NamespaceReplicationConfig `protobuf:"bytes,3,opt,name=replication_config,json=replicationConfig,proto3" json:"replication_config,omitempty"` @@ -60,18 +37,15 @@ type NamespaceDetail struct { FailoverNotificationVersion int64 `protobuf:"varint,5,opt,name=failover_notification_version,json=failoverNotificationVersion,proto3" json:"failover_notification_version,omitempty"` FailoverVersion int64 `protobuf:"varint,6,opt,name=failover_version,json=failoverVersion,proto3" json:"failover_version,omitempty"` FailoverEndTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=failover_end_time,json=failoverEndTime,proto3" json:"failover_end_time,omitempty"` - // The list of registered Nexus outgoing services that are used by this namespace. This must remain sorted in - // ascending order by service name. - OutgoingServices []*NexusOutgoingService `protobuf:"bytes,8,rep,name=outgoing_services,json=outgoingServices,proto3" json:"outgoing_services,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NamespaceDetail) Reset() { *x = NamespaceDetail{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamespaceDetail) String() string { @@ -82,7 +56,7 @@ func (*NamespaceDetail) ProtoMessage() {} func (x *NamespaceDetail) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -146,33 +120,23 @@ func (x *NamespaceDetail) GetFailoverEndTime() *timestamppb.Timestamp { return nil } -func (x *NamespaceDetail) GetOutgoingServices() []*NexusOutgoingService { - if x != nil { - return x.OutgoingServices - } - return nil -} - type NamespaceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + State v1.NamespaceState `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.api.enums.v1.NamespaceState" json:"state,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + Owner string `protobuf:"bytes,5,opt,name=owner,proto3" json:"owner,omitempty"` + Data map[string]string `protobuf:"bytes,6,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - State v1.NamespaceState `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.api.enums.v1.NamespaceState" json:"state,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - Owner string `protobuf:"bytes,5,opt,name=owner,proto3" json:"owner,omitempty"` - Data map[string]string `protobuf:"bytes,6,rep,name=data,proto3" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *NamespaceInfo) Reset() { *x = NamespaceInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamespaceInfo) String() string { @@ -183,7 +147,7 @@ func (*NamespaceInfo) ProtoMessage() {} func (x *NamespaceInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -241,27 +205,25 @@ func (x *NamespaceInfo) GetData() map[string]string { } type NamespaceConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Retention *durationpb.Duration `protobuf:"bytes,1,opt,name=retention,proto3" json:"retention,omitempty"` - ArchivalBucket string `protobuf:"bytes,2,opt,name=archival_bucket,json=archivalBucket,proto3" json:"archival_bucket,omitempty"` - BadBinaries *v11.BadBinaries `protobuf:"bytes,3,opt,name=bad_binaries,json=badBinaries,proto3" json:"bad_binaries,omitempty"` - HistoryArchivalState v1.ArchivalState `protobuf:"varint,4,opt,name=history_archival_state,json=historyArchivalState,proto3,enum=temporal.api.enums.v1.ArchivalState" json:"history_archival_state,omitempty"` - HistoryArchivalUri string `protobuf:"bytes,5,opt,name=history_archival_uri,json=historyArchivalUri,proto3" json:"history_archival_uri,omitempty"` - VisibilityArchivalState v1.ArchivalState `protobuf:"varint,6,opt,name=visibility_archival_state,json=visibilityArchivalState,proto3,enum=temporal.api.enums.v1.ArchivalState" json:"visibility_archival_state,omitempty"` - VisibilityArchivalUri string `protobuf:"bytes,7,opt,name=visibility_archival_uri,json=visibilityArchivalUri,proto3" json:"visibility_archival_uri,omitempty"` - CustomSearchAttributeAliases map[string]string `protobuf:"bytes,8,rep,name=custom_search_attribute_aliases,json=customSearchAttributeAliases,proto3" json:"custom_search_attribute_aliases,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + Retention *durationpb.Duration `protobuf:"bytes,1,opt,name=retention,proto3" json:"retention,omitempty"` + ArchivalBucket string `protobuf:"bytes,2,opt,name=archival_bucket,json=archivalBucket,proto3" json:"archival_bucket,omitempty"` + BadBinaries *v11.BadBinaries `protobuf:"bytes,3,opt,name=bad_binaries,json=badBinaries,proto3" json:"bad_binaries,omitempty"` + HistoryArchivalState v1.ArchivalState `protobuf:"varint,4,opt,name=history_archival_state,json=historyArchivalState,proto3,enum=temporal.api.enums.v1.ArchivalState" json:"history_archival_state,omitempty"` + HistoryArchivalUri string `protobuf:"bytes,5,opt,name=history_archival_uri,json=historyArchivalUri,proto3" json:"history_archival_uri,omitempty"` + VisibilityArchivalState v1.ArchivalState `protobuf:"varint,6,opt,name=visibility_archival_state,json=visibilityArchivalState,proto3,enum=temporal.api.enums.v1.ArchivalState" json:"visibility_archival_state,omitempty"` + VisibilityArchivalUri string `protobuf:"bytes,7,opt,name=visibility_archival_uri,json=visibilityArchivalUri,proto3" json:"visibility_archival_uri,omitempty"` + CustomSearchAttributeAliases map[string]string `protobuf:"bytes,8,rep,name=custom_search_attribute_aliases,json=customSearchAttributeAliases,proto3" json:"custom_search_attribute_aliases,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + WorkflowRules map[string]*v12.WorkflowRule `protobuf:"bytes,9,rep,name=workflow_rules,json=workflowRules,proto3" json:"workflow_rules,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NamespaceConfig) Reset() { *x = NamespaceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamespaceConfig) String() string { @@ -272,7 +234,7 @@ func (*NamespaceConfig) ProtoMessage() {} func (x *NamespaceConfig) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -343,24 +305,28 @@ func (x *NamespaceConfig) GetCustomSearchAttributeAliases() map[string]string { return nil } -type NamespaceReplicationConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *NamespaceConfig) GetWorkflowRules() map[string]*v12.WorkflowRule { + if x != nil { + return x.WorkflowRules + } + return nil +} - ActiveClusterName string `protobuf:"bytes,1,opt,name=active_cluster_name,json=activeClusterName,proto3" json:"active_cluster_name,omitempty"` - Clusters []string `protobuf:"bytes,2,rep,name=clusters,proto3" json:"clusters,omitempty"` - State v1.ReplicationState `protobuf:"varint,3,opt,name=state,proto3,enum=temporal.api.enums.v1.ReplicationState" json:"state,omitempty"` - FailoverHistory []*FailoverStatus `protobuf:"bytes,8,rep,name=failover_history,json=failoverHistory,proto3" json:"failover_history,omitempty"` +type NamespaceReplicationConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + ActiveClusterName string `protobuf:"bytes,1,opt,name=active_cluster_name,json=activeClusterName,proto3" json:"active_cluster_name,omitempty"` + Clusters []string `protobuf:"bytes,2,rep,name=clusters,proto3" json:"clusters,omitempty"` + State v1.ReplicationState `protobuf:"varint,3,opt,name=state,proto3,enum=temporal.api.enums.v1.ReplicationState" json:"state,omitempty"` + FailoverHistory []*FailoverStatus `protobuf:"bytes,8,rep,name=failover_history,json=failoverHistory,proto3" json:"failover_history,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NamespaceReplicationConfig) Reset() { *x = NamespaceReplicationConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamespaceReplicationConfig) String() string { @@ -371,7 +337,7 @@ func (*NamespaceReplicationConfig) ProtoMessage() {} func (x *NamespaceReplicationConfig) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -416,21 +382,18 @@ func (x *NamespaceReplicationConfig) GetFailoverHistory() []*FailoverStatus { // Represents a historical replication status of a Namespace type FailoverStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` FailoverTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=failover_time,json=failoverTime,proto3" json:"failover_time,omitempty"` FailoverVersion int64 `protobuf:"varint,2,opt,name=failover_version,json=failoverVersion,proto3" json:"failover_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FailoverStatus) Reset() { *x = FailoverStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FailoverStatus) String() string { @@ -441,7 +404,7 @@ func (*FailoverStatus) ProtoMessage() {} func (x *FailoverStatus) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -472,172 +435,66 @@ func (x *FailoverStatus) GetFailoverVersion() int64 { var File_temporal_server_api_persistence_v1_namespaces_proto protoreflect.FileDescriptor -var file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, - 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, - 0x2f, 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf9, 0x04, 0x0a, 0x0f, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x49, - 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, - 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x71, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x29, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x46, 0x0a, 0x1d, 0x66, 0x61, 0x69, 0x6c, 0x6f, - 0x76, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1b, 0x66, - 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, - 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x11, 0x66, 0x61, 0x69, 0x6c, - 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x66, - 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x45, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x69, 0x0a, 0x11, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, - 0x78, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xd2, 0x02, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x16, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x24, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x02, 0x68, 0x00, 0x12, 0x53, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x3f, 0x0a, 0x09, - 0x44, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfe, 0x05, 0x0a, 0x0f, 0x4e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x0a, - 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, - 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2b, 0x0a, 0x0f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, - 0x61, 0x6c, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x4d, 0x0a, 0x0c, 0x62, 0x61, 0x64, 0x5f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x69, 0x65, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x61, 0x64, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x69, 0x65, 0x73, 0x52, 0x0b, 0x62, 0x61, - 0x64, 0x42, 0x69, 0x6e, 0x61, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5e, 0x0a, 0x16, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x14, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, 0x68, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x41, 0x72, 0x63, 0x68, - 0x69, 0x76, 0x61, 0x6c, 0x55, 0x72, 0x69, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, 0x0a, 0x19, 0x76, 0x69, - 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x17, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x41, 0x72, 0x63, 0x68, 0x69, - 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3a, 0x0a, 0x17, 0x76, - 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, - 0x6c, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x76, 0x69, 0x73, 0x69, - 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x41, 0x72, 0x63, 0x68, 0x69, 0x76, 0x61, 0x6c, 0x55, 0x72, 0x69, - 0x42, 0x02, 0x68, 0x00, 0x12, 0xa0, 0x01, 0x0a, 0x1f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x73, - 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, - 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x55, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1c, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x1a, - 0x57, 0x0a, 0x21, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x18, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x96, 0x02, 0x0a, 0x1a, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x12, 0x32, 0x0a, 0x13, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x1e, 0x0a, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, - 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x61, 0x0a, 0x10, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, - 0x6f, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x66, 0x61, 0x69, 0x6c, 0x6f, - 0x76, 0x65, 0x72, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x22, 0x84, - 0x01, 0x0a, 0x0e, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x43, 0x0a, 0x0d, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0c, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x66, 0x61, 0x69, 0x6c, - 0x6f, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x42, 0x36, - 0x5a, 0x34, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc = "" + + "\n" + + "3temporal/server/api/persistence/v1/namespaces.proto\x12\"temporal.server.api.persistence.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a%temporal/api/enums/v1/namespace.proto\x1a'temporal/api/namespace/v1/message.proto\x1a#temporal/api/rules/v1/message.proto\"\xf2\x03\n" + + "\x0fNamespaceDetail\x12E\n" + + "\x04info\x18\x01 \x01(\v21.temporal.server.api.persistence.v1.NamespaceInfoR\x04info\x12K\n" + + "\x06config\x18\x02 \x01(\v23.temporal.server.api.persistence.v1.NamespaceConfigR\x06config\x12m\n" + + "\x12replication_config\x18\x03 \x01(\v2>.temporal.server.api.persistence.v1.NamespaceReplicationConfigR\x11replicationConfig\x12%\n" + + "\x0econfig_version\x18\x04 \x01(\x03R\rconfigVersion\x12B\n" + + "\x1dfailover_notification_version\x18\x05 \x01(\x03R\x1bfailoverNotificationVersion\x12)\n" + + "\x10failover_version\x18\x06 \x01(\x03R\x0ffailoverVersion\x12F\n" + + "\x11failover_end_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x0ffailoverEndTime\"\xb2\x02\n" + + "\rNamespaceInfo\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12;\n" + + "\x05state\x18\x02 \x01(\x0e2%.temporal.api.enums.v1.NamespaceStateR\x05state\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12 \n" + + "\vdescription\x18\x04 \x01(\tR\vdescription\x12\x14\n" + + "\x05owner\x18\x05 \x01(\tR\x05owner\x12O\n" + + "\x04data\x18\x06 \x03(\v2;.temporal.server.api.persistence.v1.NamespaceInfo.DataEntryR\x04data\x1a7\n" + + "\tDataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xac\a\n" + + "\x0fNamespaceConfig\x127\n" + + "\tretention\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\tretention\x12'\n" + + "\x0farchival_bucket\x18\x02 \x01(\tR\x0earchivalBucket\x12I\n" + + "\fbad_binaries\x18\x03 \x01(\v2&.temporal.api.namespace.v1.BadBinariesR\vbadBinaries\x12Z\n" + + "\x16history_archival_state\x18\x04 \x01(\x0e2$.temporal.api.enums.v1.ArchivalStateR\x14historyArchivalState\x120\n" + + "\x14history_archival_uri\x18\x05 \x01(\tR\x12historyArchivalUri\x12`\n" + + "\x19visibility_archival_state\x18\x06 \x01(\x0e2$.temporal.api.enums.v1.ArchivalStateR\x17visibilityArchivalState\x126\n" + + "\x17visibility_archival_uri\x18\a \x01(\tR\x15visibilityArchivalUri\x12\x9c\x01\n" + + "\x1fcustom_search_attribute_aliases\x18\b \x03(\v2U.temporal.server.api.persistence.v1.NamespaceConfig.CustomSearchAttributeAliasesEntryR\x1ccustomSearchAttributeAliases\x12m\n" + + "\x0eworkflow_rules\x18\t \x03(\v2F.temporal.server.api.persistence.v1.NamespaceConfig.WorkflowRulesEntryR\rworkflowRules\x1aO\n" + + "!CustomSearchAttributeAliasesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1ae\n" + + "\x12WorkflowRulesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x129\n" + + "\x05value\x18\x02 \x01(\v2#.temporal.api.rules.v1.WorkflowRuleR\x05value:\x028\x01\"\x86\x02\n" + + "\x1aNamespaceReplicationConfig\x12.\n" + + "\x13active_cluster_name\x18\x01 \x01(\tR\x11activeClusterName\x12\x1a\n" + + "\bclusters\x18\x02 \x03(\tR\bclusters\x12=\n" + + "\x05state\x18\x03 \x01(\x0e2'.temporal.api.enums.v1.ReplicationStateR\x05state\x12]\n" + + "\x10failover_history\x18\b \x03(\v22.temporal.server.api.persistence.v1.FailoverStatusR\x0ffailoverHistory\"|\n" + + "\x0eFailoverStatus\x12?\n" + + "\rfailover_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ffailoverTime\x12)\n" + + "\x10failover_version\x18\x02 \x01(\x03R\x0ffailoverVersionB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" var ( file_temporal_server_api_persistence_v1_namespaces_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_namespaces_proto_rawDescData = file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc + file_temporal_server_api_persistence_v1_namespaces_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_namespaces_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_namespaces_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_namespaces_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_namespaces_proto_rawDescData) + file_temporal_server_api_persistence_v1_namespaces_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc), len(file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_namespaces_proto_rawDescData } -var file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes = make([]protoimpl.MessageInfo, 7) -var file_temporal_server_api_persistence_v1_namespaces_proto_goTypes = []interface{}{ +var file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_temporal_server_api_persistence_v1_namespaces_proto_goTypes = []any{ (*NamespaceDetail)(nil), // 0: temporal.server.api.persistence.v1.NamespaceDetail (*NamespaceInfo)(nil), // 1: temporal.server.api.persistence.v1.NamespaceInfo (*NamespaceConfig)(nil), // 2: temporal.server.api.persistence.v1.NamespaceConfig @@ -645,35 +502,37 @@ var file_temporal_server_api_persistence_v1_namespaces_proto_goTypes = []interfa (*FailoverStatus)(nil), // 4: temporal.server.api.persistence.v1.FailoverStatus nil, // 5: temporal.server.api.persistence.v1.NamespaceInfo.DataEntry nil, // 6: temporal.server.api.persistence.v1.NamespaceConfig.CustomSearchAttributeAliasesEntry - (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp - (*NexusOutgoingService)(nil), // 8: temporal.server.api.persistence.v1.NexusOutgoingService + nil, // 7: temporal.server.api.persistence.v1.NamespaceConfig.WorkflowRulesEntry + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp (v1.NamespaceState)(0), // 9: temporal.api.enums.v1.NamespaceState (*durationpb.Duration)(nil), // 10: google.protobuf.Duration (*v11.BadBinaries)(nil), // 11: temporal.api.namespace.v1.BadBinaries (v1.ArchivalState)(0), // 12: temporal.api.enums.v1.ArchivalState (v1.ReplicationState)(0), // 13: temporal.api.enums.v1.ReplicationState + (*v12.WorkflowRule)(nil), // 14: temporal.api.rules.v1.WorkflowRule } var file_temporal_server_api_persistence_v1_namespaces_proto_depIdxs = []int32{ 1, // 0: temporal.server.api.persistence.v1.NamespaceDetail.info:type_name -> temporal.server.api.persistence.v1.NamespaceInfo 2, // 1: temporal.server.api.persistence.v1.NamespaceDetail.config:type_name -> temporal.server.api.persistence.v1.NamespaceConfig 3, // 2: temporal.server.api.persistence.v1.NamespaceDetail.replication_config:type_name -> temporal.server.api.persistence.v1.NamespaceReplicationConfig - 7, // 3: temporal.server.api.persistence.v1.NamespaceDetail.failover_end_time:type_name -> google.protobuf.Timestamp - 8, // 4: temporal.server.api.persistence.v1.NamespaceDetail.outgoing_services:type_name -> temporal.server.api.persistence.v1.NexusOutgoingService - 9, // 5: temporal.server.api.persistence.v1.NamespaceInfo.state:type_name -> temporal.api.enums.v1.NamespaceState - 5, // 6: temporal.server.api.persistence.v1.NamespaceInfo.data:type_name -> temporal.server.api.persistence.v1.NamespaceInfo.DataEntry - 10, // 7: temporal.server.api.persistence.v1.NamespaceConfig.retention:type_name -> google.protobuf.Duration - 11, // 8: temporal.server.api.persistence.v1.NamespaceConfig.bad_binaries:type_name -> temporal.api.namespace.v1.BadBinaries - 12, // 9: temporal.server.api.persistence.v1.NamespaceConfig.history_archival_state:type_name -> temporal.api.enums.v1.ArchivalState - 12, // 10: temporal.server.api.persistence.v1.NamespaceConfig.visibility_archival_state:type_name -> temporal.api.enums.v1.ArchivalState - 6, // 11: temporal.server.api.persistence.v1.NamespaceConfig.custom_search_attribute_aliases:type_name -> temporal.server.api.persistence.v1.NamespaceConfig.CustomSearchAttributeAliasesEntry + 8, // 3: temporal.server.api.persistence.v1.NamespaceDetail.failover_end_time:type_name -> google.protobuf.Timestamp + 9, // 4: temporal.server.api.persistence.v1.NamespaceInfo.state:type_name -> temporal.api.enums.v1.NamespaceState + 5, // 5: temporal.server.api.persistence.v1.NamespaceInfo.data:type_name -> temporal.server.api.persistence.v1.NamespaceInfo.DataEntry + 10, // 6: temporal.server.api.persistence.v1.NamespaceConfig.retention:type_name -> google.protobuf.Duration + 11, // 7: temporal.server.api.persistence.v1.NamespaceConfig.bad_binaries:type_name -> temporal.api.namespace.v1.BadBinaries + 12, // 8: temporal.server.api.persistence.v1.NamespaceConfig.history_archival_state:type_name -> temporal.api.enums.v1.ArchivalState + 12, // 9: temporal.server.api.persistence.v1.NamespaceConfig.visibility_archival_state:type_name -> temporal.api.enums.v1.ArchivalState + 6, // 10: temporal.server.api.persistence.v1.NamespaceConfig.custom_search_attribute_aliases:type_name -> temporal.server.api.persistence.v1.NamespaceConfig.CustomSearchAttributeAliasesEntry + 7, // 11: temporal.server.api.persistence.v1.NamespaceConfig.workflow_rules:type_name -> temporal.server.api.persistence.v1.NamespaceConfig.WorkflowRulesEntry 13, // 12: temporal.server.api.persistence.v1.NamespaceReplicationConfig.state:type_name -> temporal.api.enums.v1.ReplicationState 4, // 13: temporal.server.api.persistence.v1.NamespaceReplicationConfig.failover_history:type_name -> temporal.server.api.persistence.v1.FailoverStatus - 7, // 14: temporal.server.api.persistence.v1.FailoverStatus.failover_time:type_name -> google.protobuf.Timestamp - 15, // [15:15] is the sub-list for method output_type - 15, // [15:15] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 8, // 14: temporal.server.api.persistence.v1.FailoverStatus.failover_time:type_name -> google.protobuf.Timestamp + 14, // 15: temporal.server.api.persistence.v1.NamespaceConfig.WorkflowRulesEntry.value:type_name -> temporal.api.rules.v1.WorkflowRule + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_temporal_server_api_persistence_v1_namespaces_proto_init() } @@ -681,76 +540,13 @@ func file_temporal_server_api_persistence_v1_namespaces_proto_init() { if File_temporal_server_api_persistence_v1_namespaces_proto != nil { return } - file_temporal_server_api_persistence_v1_nexus_proto_init() - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamespaceDetail); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamespaceInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamespaceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamespaceReplicationConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FailoverStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc), len(file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc)), NumEnums: 0, - NumMessages: 7, + NumMessages: 8, NumExtensions: 0, NumServices: 0, }, @@ -759,7 +555,6 @@ func file_temporal_server_api_persistence_v1_namespaces_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_namespaces_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_namespaces_proto = out.File - file_temporal_server_api_persistence_v1_namespaces_proto_rawDesc = nil file_temporal_server_api_persistence_v1_namespaces_proto_goTypes = nil file_temporal_server_api_persistence_v1_namespaces_proto_depIdxs = nil } diff --git a/api/persistence/v1/nexus.go-helpers.pb.go b/api/persistence/v1/nexus.go-helpers.pb.go index dd6df00954e..b6be69e789a 100644 --- a/api/persistence/v1/nexus.go-helpers.pb.go +++ b/api/persistence/v1/nexus.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence @@ -29,35 +5,72 @@ import ( "google.golang.org/protobuf/proto" ) -// Marshal an object of type NexusIncomingService to the protobuf v3 wire format -func (val *NexusIncomingService) Marshal() ([]byte, error) { +// Marshal an object of type NexusEndpointSpec to the protobuf v3 wire format +func (val *NexusEndpointSpec) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusEndpointSpec from the protobuf v3 wire format +func (val *NexusEndpointSpec) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusEndpointSpec) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusEndpointSpec values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusEndpointSpec) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusEndpointSpec + switch t := that.(type) { + case *NexusEndpointSpec: + that1 = t + case NexusEndpointSpec: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type NexusEndpointTarget to the protobuf v3 wire format +func (val *NexusEndpointTarget) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type NexusIncomingService from the protobuf v3 wire format -func (val *NexusIncomingService) Unmarshal(buf []byte) error { +// Unmarshal an object of type NexusEndpointTarget from the protobuf v3 wire format +func (val *NexusEndpointTarget) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *NexusIncomingService) Size() int { +func (val *NexusEndpointTarget) Size() int { return proto.Size(val) } -// Equal returns whether two NexusIncomingService values are equivalent by recursively +// Equal returns whether two NexusEndpointTarget values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *NexusIncomingService) Equal(that interface{}) bool { +func (this *NexusEndpointTarget) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *NexusIncomingService + var that1 *NexusEndpointTarget switch t := that.(type) { - case *NexusIncomingService: + case *NexusEndpointTarget: that1 = t - case NexusIncomingService: + case NexusEndpointTarget: that1 = &t default: return false @@ -66,35 +79,35 @@ func (this *NexusIncomingService) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type NexusIncomingServiceEntry to the protobuf v3 wire format -func (val *NexusIncomingServiceEntry) Marshal() ([]byte, error) { +// Marshal an object of type NexusEndpoint to the protobuf v3 wire format +func (val *NexusEndpoint) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type NexusIncomingServiceEntry from the protobuf v3 wire format -func (val *NexusIncomingServiceEntry) Unmarshal(buf []byte) error { +// Unmarshal an object of type NexusEndpoint from the protobuf v3 wire format +func (val *NexusEndpoint) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *NexusIncomingServiceEntry) Size() int { +func (val *NexusEndpoint) Size() int { return proto.Size(val) } -// Equal returns whether two NexusIncomingServiceEntry values are equivalent by recursively +// Equal returns whether two NexusEndpoint values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *NexusIncomingServiceEntry) Equal(that interface{}) bool { +func (this *NexusEndpoint) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *NexusIncomingServiceEntry + var that1 *NexusEndpoint switch t := that.(type) { - case *NexusIncomingServiceEntry: + case *NexusEndpoint: that1 = t - case NexusIncomingServiceEntry: + case NexusEndpoint: that1 = &t default: return false @@ -103,35 +116,35 @@ func (this *NexusIncomingServiceEntry) Equal(that interface{}) bool { return proto.Equal(this, that1) } -// Marshal an object of type NexusOutgoingService to the protobuf v3 wire format -func (val *NexusOutgoingService) Marshal() ([]byte, error) { +// Marshal an object of type NexusEndpointEntry to the protobuf v3 wire format +func (val *NexusEndpointEntry) Marshal() ([]byte, error) { return proto.Marshal(val) } -// Unmarshal an object of type NexusOutgoingService from the protobuf v3 wire format -func (val *NexusOutgoingService) Unmarshal(buf []byte) error { +// Unmarshal an object of type NexusEndpointEntry from the protobuf v3 wire format +func (val *NexusEndpointEntry) Unmarshal(buf []byte) error { return proto.Unmarshal(buf, val) } // Size returns the size of the object, in bytes, once serialized -func (val *NexusOutgoingService) Size() int { +func (val *NexusEndpointEntry) Size() int { return proto.Size(val) } -// Equal returns whether two NexusOutgoingService values are equivalent by recursively +// Equal returns whether two NexusEndpointEntry values are equivalent by recursively // comparing the message's fields. // For more information see the documentation for // https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *NexusOutgoingService) Equal(that interface{}) bool { +func (this *NexusEndpointEntry) Equal(that interface{}) bool { if that == nil { return this == nil } - var that1 *NexusOutgoingService + var that1 *NexusEndpointEntry switch t := that.(type) { - case *NexusOutgoingService: + case *NexusEndpointEntry: that1 = t - case NexusOutgoingService: + case NexusEndpointEntry: that1 = &t default: return false diff --git a/api/persistence/v1/nexus.pb.go b/api/persistence/v1/nexus.pb.go index f6be0c8df7c..2b36726b772 100644 --- a/api/persistence/v1/nexus.pb.go +++ b/api/persistence/v1/nexus.pb.go @@ -1,47 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Copyright (c) 2019 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -53,9 +9,10 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" - v11 "go.temporal.io/api/nexus/v1" - v1 "go.temporal.io/server/api/clock/v1" + v1 "go.temporal.io/api/common/v1" + v11 "go.temporal.io/server/api/clock/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" @@ -68,43 +25,191 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type NexusIncomingService struct { - state protoimpl.MessageState +// Contains mutable fields for an Endpoint. Duplicated from the public API's temporal.api.nexus.v1.EndpointSpec where +// the worker target has a namespace name. +// We store an ID in persistence to prevent namespace renames from breaking references. +type NexusEndpointSpec struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Endpoint name, unique for this cluster. Must match `[a-zA-Z_][a-zA-Z0-9_]*`. + // Renaming an endpoint breaks all workflow callers that reference this endpoint, causing operations to fail. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description *v1.Payload `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // Target to route requests to. + Target *NexusEndpointTarget `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +} + +func (x *NexusEndpointSpec) Reset() { + *x = NexusEndpointSpec{} + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusEndpointSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusEndpointSpec) ProtoMessage() {} + +func (x *NexusEndpointSpec) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusEndpointSpec.ProtoReflect.Descriptor instead. +func (*NexusEndpointSpec) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{0} +} + +func (x *NexusEndpointSpec) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NexusEndpointSpec) GetDescription() *v1.Payload { + if x != nil { + return x.Description + } + return nil +} + +func (x *NexusEndpointSpec) GetTarget() *NexusEndpointTarget { + if x != nil { + return x.Target + } + return nil +} + +// Target to route requests to. +// Duplicated from the public API's temporal.api.nexus.v1.EndpointTarget where the worker target has a namespace name. +// We store an ID in persistence to prevent namespace renames from breaking references. +type NexusEndpointTarget struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Variant: + // + // *NexusEndpointTarget_Worker_ + // *NexusEndpointTarget_External_ + Variant isNexusEndpointTarget_Variant `protobuf_oneof:"variant"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusEndpointTarget) Reset() { + *x = NexusEndpointTarget{} + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusEndpointTarget) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusEndpointTarget) ProtoMessage() {} + +func (x *NexusEndpointTarget) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusEndpointTarget.ProtoReflect.Descriptor instead. +func (*NexusEndpointTarget) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{1} +} + +func (x *NexusEndpointTarget) GetVariant() isNexusEndpointTarget_Variant { + if x != nil { + return x.Variant + } + return nil +} + +func (x *NexusEndpointTarget) GetWorker() *NexusEndpointTarget_Worker { + if x != nil { + if x, ok := x.Variant.(*NexusEndpointTarget_Worker_); ok { + return x.Worker + } + } + return nil +} - // The last recorded cluster-local Hybrid Logical Clock timestamp for _this_ service. - // Updated whenever the service is directly updated due to a user action but not when applying replication events. +func (x *NexusEndpointTarget) GetExternal() *NexusEndpointTarget_External { + if x != nil { + if x, ok := x.Variant.(*NexusEndpointTarget_External_); ok { + return x.External + } + } + return nil +} + +type isNexusEndpointTarget_Variant interface { + isNexusEndpointTarget_Variant() +} + +type NexusEndpointTarget_Worker_ struct { + Worker *NexusEndpointTarget_Worker `protobuf:"bytes,1,opt,name=worker,proto3,oneof"` +} + +type NexusEndpointTarget_External_ struct { + External *NexusEndpointTarget_External `protobuf:"bytes,2,opt,name=external,proto3,oneof"` +} + +func (*NexusEndpointTarget_Worker_) isNexusEndpointTarget_Variant() {} + +func (*NexusEndpointTarget_External_) isNexusEndpointTarget_Variant() {} + +type NexusEndpoint struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The last recorded cluster-local Hybrid Logical Clock timestamp for _this_ endpoint. + // Updated whenever the endpoint is directly updated due to a user action but not when applying replication events. // The clock is referenced when new timestamps are generated to ensure it produces monotonically increasing // timestamps. - Clock *v1.HybridLogicalClock `protobuf:"bytes,1,opt,name=clock,proto3" json:"clock,omitempty"` - // Service specification. This is a mirror of the public API and is intended to be mutable. - Spec *v11.IncomingServiceSpec `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"` - // The date and time when the service was created. + Clock *v11.HybridLogicalClock `protobuf:"bytes,1,opt,name=clock,proto3" json:"clock,omitempty"` + // Endpoint specification. This is a mirror of the public API and is intended to be mutable. + Spec *NexusEndpointSpec `protobuf:"bytes,2,opt,name=spec,proto3" json:"spec,omitempty"` + // The date and time when the endpoint was created. // (-- api-linter: core::0142::time-field-names=disabled // // aip.dev/not-precedent: Not following linter rules. --) - CreatedTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + CreatedTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_time,json=createdTime,proto3" json:"created_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *NexusIncomingService) Reset() { - *x = NexusIncomingService{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *NexusEndpoint) Reset() { + *x = NexusEndpoint{} + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *NexusIncomingService) String() string { +func (x *NexusEndpoint) String() string { return protoimpl.X.MessageStringOf(x) } -func (*NexusIncomingService) ProtoMessage() {} +func (*NexusEndpoint) ProtoMessage() {} -func (x *NexusIncomingService) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { +func (x *NexusEndpoint) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -114,61 +219,58 @@ func (x *NexusIncomingService) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use NexusIncomingService.ProtoReflect.Descriptor instead. -func (*NexusIncomingService) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{0} +// Deprecated: Use NexusEndpoint.ProtoReflect.Descriptor instead. +func (*NexusEndpoint) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{2} } -func (x *NexusIncomingService) GetClock() *v1.HybridLogicalClock { +func (x *NexusEndpoint) GetClock() *v11.HybridLogicalClock { if x != nil { return x.Clock } return nil } -func (x *NexusIncomingService) GetSpec() *v11.IncomingServiceSpec { +func (x *NexusEndpoint) GetSpec() *NexusEndpointSpec { if x != nil { return x.Spec } return nil } -func (x *NexusIncomingService) GetCreatedTime() *timestamppb.Timestamp { +func (x *NexusEndpoint) GetCreatedTime() *timestamppb.Timestamp { if x != nil { return x.CreatedTime } return nil } -// Container for a version, a UUID, and a NexusIncomingService. -type NexusIncomingServiceEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// Container for a version, a UUID, and a NexusEndpoint. +type NexusEndpointEntry struct { + state protoimpl.MessageState `protogen:"open.v1"` + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Endpoint *NexusEndpoint `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` unknownFields protoimpl.UnknownFields - - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Service *NexusIncomingService `protobuf:"bytes,3,opt,name=service,proto3" json:"service,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *NexusIncomingServiceEntry) Reset() { - *x = NexusIncomingServiceEntry{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *NexusEndpointEntry) Reset() { + *x = NexusEndpointEntry{} + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *NexusIncomingServiceEntry) String() string { +func (x *NexusEndpointEntry) String() string { return protoimpl.X.MessageStringOf(x) } -func (*NexusIncomingServiceEntry) ProtoMessage() {} +func (*NexusEndpointEntry) ProtoMessage() {} -func (x *NexusIncomingServiceEntry) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { +func (x *NexusEndpointEntry) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -178,64 +280,59 @@ func (x *NexusIncomingServiceEntry) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use NexusIncomingServiceEntry.ProtoReflect.Descriptor instead. -func (*NexusIncomingServiceEntry) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{1} +// Deprecated: Use NexusEndpointEntry.ProtoReflect.Descriptor instead. +func (*NexusEndpointEntry) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{3} } -func (x *NexusIncomingServiceEntry) GetVersion() int64 { +func (x *NexusEndpointEntry) GetVersion() int64 { if x != nil { return x.Version } return 0 } -func (x *NexusIncomingServiceEntry) GetId() string { +func (x *NexusEndpointEntry) GetId() string { if x != nil { return x.Id } return "" } -func (x *NexusIncomingServiceEntry) GetService() *NexusIncomingService { +func (x *NexusEndpointEntry) GetEndpoint() *NexusEndpoint { if x != nil { - return x.Service + return x.Endpoint } return nil } -// Nexus outgoing service info, scoped to a namespace. -type NexusOutgoingService struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +// Target a worker polling on a Nexus task queue in a specific namespace. +type NexusEndpointTarget_Worker struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace ID to route requests to. + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // Nexus task queue to route requests to. + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` unknownFields protoimpl.UnknownFields - - // Current record version of this service. Incremented with each update. - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - // Service name. Must be unique for this namespace and match [go.temporal.io/server/common/nexus.ServiceNameRegex]. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Service specification. This is a mirror of the public API and is intended to be mutable. - Spec *v11.OutgoingServiceSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"` + sizeCache protoimpl.SizeCache } -func (x *NexusOutgoingService) Reset() { - *x = NexusOutgoingService{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } +func (x *NexusEndpointTarget_Worker) Reset() { + *x = NexusEndpointTarget_Worker{} + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *NexusOutgoingService) String() string { +func (x *NexusEndpointTarget_Worker) String() string { return protoimpl.X.MessageStringOf(x) } -func (*NexusOutgoingService) ProtoMessage() {} +func (*NexusEndpointTarget_Worker) ProtoMessage() {} -func (x *NexusOutgoingService) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { +func (x *NexusEndpointTarget_Worker) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -245,119 +342,142 @@ func (x *NexusOutgoingService) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use NexusOutgoingService.ProtoReflect.Descriptor instead. -func (*NexusOutgoingService) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{2} +// Deprecated: Use NexusEndpointTarget_Worker.ProtoReflect.Descriptor instead. +func (*NexusEndpointTarget_Worker) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{1, 0} } -func (x *NexusOutgoingService) GetVersion() int64 { +func (x *NexusEndpointTarget_Worker) GetNamespaceId() string { if x != nil { - return x.Version + return x.NamespaceId } - return 0 + return "" } -func (x *NexusOutgoingService) GetName() string { +func (x *NexusEndpointTarget_Worker) GetTaskQueue() string { if x != nil { - return x.Name + return x.TaskQueue } return "" } -func (x *NexusOutgoingService) GetSpec() *v11.OutgoingServiceSpec { +// Target an external server by URL. +// At a later point, this will support providing credentials, in the meantime, an http.RoundTripper can be injected +// into the server to modify the request. +type NexusEndpointTarget_External struct { + state protoimpl.MessageState `protogen:"open.v1"` + // URL to call. + // (-- api-linter: core::0140::uri=disabled + // + // aip.dev/not-precedent: Not following linter rules. --) + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusEndpointTarget_External) Reset() { + *x = NexusEndpointTarget_External{} + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusEndpointTarget_External) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusEndpointTarget_External) ProtoMessage() {} + +func (x *NexusEndpointTarget_External) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[5] if x != nil { - return x.Spec + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -var File_temporal_server_api_persistence_v1_nexus_proto protoreflect.FileDescriptor +// Deprecated: Use NexusEndpointTarget_External.ProtoReflect.Descriptor instead. +func (*NexusEndpointTarget_External) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP(), []int{1, 1} +} -var file_temporal_server_api_persistence_v1_nexus_proto_rawDesc = []byte{ - 0x0a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x23, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x65, - 0x78, 0x75, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe9, 0x01, - 0x0a, 0x14, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4a, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, - 0x31, 0x2e, 0x48, 0x79, 0x62, 0x72, 0x69, 0x64, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x43, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x42, 0x0a, - 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x53, 0x70, 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, - 0x0a, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa5, - 0x01, 0x0a, 0x19, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x12, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x56, 0x0a, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, - 0x65, 0x78, 0x75, 0x73, 0x49, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x90, 0x01, 0x0a, 0x14, 0x4e, 0x65, 0x78, 0x75, 0x73, 0x4f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x16, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x42, 0x0a, 0x04, 0x73, 0x70, - 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, 0x65, 0x78, 0x75, 0x73, 0x2e, 0x76, 0x31, 0x2e, - 0x4f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x70, - 0x65, 0x63, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x42, 0x02, 0x68, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, - 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (x *NexusEndpointTarget_External) GetUrl() string { + if x != nil { + return x.Url + } + return "" } +var File_temporal_server_api_persistence_v1_nexus_proto protoreflect.FileDescriptor + +const file_temporal_server_api_persistence_v1_nexus_proto_rawDesc = "" + + "\n" + + ".temporal/server/api/persistence/v1/nexus.proto\x12\"temporal.server.api.persistence.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a*temporal/server/api/clock/v1/message.proto\"\xbb\x01\n" + + "\x11NexusEndpointSpec\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12A\n" + + "\vdescription\x18\x02 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\vdescription\x12O\n" + + "\x06target\x18\x03 \x01(\v27.temporal.server.api.persistence.v1.NexusEndpointTargetR\x06target\"\xc4\x02\n" + + "\x13NexusEndpointTarget\x12X\n" + + "\x06worker\x18\x01 \x01(\v2>.temporal.server.api.persistence.v1.NexusEndpointTarget.WorkerH\x00R\x06worker\x12^\n" + + "\bexternal\x18\x02 \x01(\v2@.temporal.server.api.persistence.v1.NexusEndpointTarget.ExternalH\x00R\bexternal\x1aJ\n" + + "\x06Worker\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x1a\x1c\n" + + "\bExternal\x12\x10\n" + + "\x03url\x18\x01 \x01(\tR\x03urlB\t\n" + + "\avariant\"\xe1\x01\n" + + "\rNexusEndpoint\x12F\n" + + "\x05clock\x18\x01 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x05clock\x12I\n" + + "\x04spec\x18\x02 \x01(\v25.temporal.server.api.persistence.v1.NexusEndpointSpecR\x04spec\x12=\n" + + "\fcreated_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\vcreatedTime\"\x8d\x01\n" + + "\x12NexusEndpointEntry\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x12M\n" + + "\bendpoint\x18\x03 \x01(\v21.temporal.server.api.persistence.v1.NexusEndpointR\bendpointB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" + var ( file_temporal_server_api_persistence_v1_nexus_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_nexus_proto_rawDescData = file_temporal_server_api_persistence_v1_nexus_proto_rawDesc + file_temporal_server_api_persistence_v1_nexus_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_nexus_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_nexus_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_nexus_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_nexus_proto_rawDescData) + file_temporal_server_api_persistence_v1_nexus_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_nexus_proto_rawDesc), len(file_temporal_server_api_persistence_v1_nexus_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_nexus_proto_rawDescData } -var file_temporal_server_api_persistence_v1_nexus_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_temporal_server_api_persistence_v1_nexus_proto_goTypes = []interface{}{ - (*NexusIncomingService)(nil), // 0: temporal.server.api.persistence.v1.NexusIncomingService - (*NexusIncomingServiceEntry)(nil), // 1: temporal.server.api.persistence.v1.NexusIncomingServiceEntry - (*NexusOutgoingService)(nil), // 2: temporal.server.api.persistence.v1.NexusOutgoingService - (*v1.HybridLogicalClock)(nil), // 3: temporal.server.api.clock.v1.HybridLogicalClock - (*v11.IncomingServiceSpec)(nil), // 4: temporal.api.nexus.v1.IncomingServiceSpec - (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp - (*v11.OutgoingServiceSpec)(nil), // 6: temporal.api.nexus.v1.OutgoingServiceSpec +var file_temporal_server_api_persistence_v1_nexus_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_temporal_server_api_persistence_v1_nexus_proto_goTypes = []any{ + (*NexusEndpointSpec)(nil), // 0: temporal.server.api.persistence.v1.NexusEndpointSpec + (*NexusEndpointTarget)(nil), // 1: temporal.server.api.persistence.v1.NexusEndpointTarget + (*NexusEndpoint)(nil), // 2: temporal.server.api.persistence.v1.NexusEndpoint + (*NexusEndpointEntry)(nil), // 3: temporal.server.api.persistence.v1.NexusEndpointEntry + (*NexusEndpointTarget_Worker)(nil), // 4: temporal.server.api.persistence.v1.NexusEndpointTarget.Worker + (*NexusEndpointTarget_External)(nil), // 5: temporal.server.api.persistence.v1.NexusEndpointTarget.External + (*v1.Payload)(nil), // 6: temporal.api.common.v1.Payload + (*v11.HybridLogicalClock)(nil), // 7: temporal.server.api.clock.v1.HybridLogicalClock + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp } var file_temporal_server_api_persistence_v1_nexus_proto_depIdxs = []int32{ - 3, // 0: temporal.server.api.persistence.v1.NexusIncomingService.clock:type_name -> temporal.server.api.clock.v1.HybridLogicalClock - 4, // 1: temporal.server.api.persistence.v1.NexusIncomingService.spec:type_name -> temporal.api.nexus.v1.IncomingServiceSpec - 5, // 2: temporal.server.api.persistence.v1.NexusIncomingService.created_time:type_name -> google.protobuf.Timestamp - 0, // 3: temporal.server.api.persistence.v1.NexusIncomingServiceEntry.service:type_name -> temporal.server.api.persistence.v1.NexusIncomingService - 6, // 4: temporal.server.api.persistence.v1.NexusOutgoingService.spec:type_name -> temporal.api.nexus.v1.OutgoingServiceSpec - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 6, // 0: temporal.server.api.persistence.v1.NexusEndpointSpec.description:type_name -> temporal.api.common.v1.Payload + 1, // 1: temporal.server.api.persistence.v1.NexusEndpointSpec.target:type_name -> temporal.server.api.persistence.v1.NexusEndpointTarget + 4, // 2: temporal.server.api.persistence.v1.NexusEndpointTarget.worker:type_name -> temporal.server.api.persistence.v1.NexusEndpointTarget.Worker + 5, // 3: temporal.server.api.persistence.v1.NexusEndpointTarget.external:type_name -> temporal.server.api.persistence.v1.NexusEndpointTarget.External + 7, // 4: temporal.server.api.persistence.v1.NexusEndpoint.clock:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 0, // 5: temporal.server.api.persistence.v1.NexusEndpoint.spec:type_name -> temporal.server.api.persistence.v1.NexusEndpointSpec + 8, // 6: temporal.server.api.persistence.v1.NexusEndpoint.created_time:type_name -> google.protobuf.Timestamp + 2, // 7: temporal.server.api.persistence.v1.NexusEndpointEntry.endpoint:type_name -> temporal.server.api.persistence.v1.NexusEndpoint + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_temporal_server_api_persistence_v1_nexus_proto_init() } @@ -365,51 +485,17 @@ func file_temporal_server_api_persistence_v1_nexus_proto_init() { if File_temporal_server_api_persistence_v1_nexus_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NexusIncomingService); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NexusIncomingServiceEntry); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NexusOutgoingService); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } + file_temporal_server_api_persistence_v1_nexus_proto_msgTypes[1].OneofWrappers = []any{ + (*NexusEndpointTarget_Worker_)(nil), + (*NexusEndpointTarget_External_)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_nexus_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_nexus_proto_rawDesc), len(file_temporal_server_api_persistence_v1_nexus_proto_rawDesc)), NumEnums: 0, - NumMessages: 3, + NumMessages: 6, NumExtensions: 0, NumServices: 0, }, @@ -418,7 +504,6 @@ func file_temporal_server_api_persistence_v1_nexus_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_nexus_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_nexus_proto = out.File - file_temporal_server_api_persistence_v1_nexus_proto_rawDesc = nil file_temporal_server_api_persistence_v1_nexus_proto_goTypes = nil file_temporal_server_api_persistence_v1_nexus_proto_depIdxs = nil } diff --git a/api/persistence/v1/predicates.go-helpers.pb.go b/api/persistence/v1/predicates.go-helpers.pb.go index 6da9bb9bcc2..8776adf9c9f 100644 --- a/api/persistence/v1/predicates.go-helpers.pb.go +++ b/api/persistence/v1/predicates.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence @@ -361,3 +337,77 @@ func (this *DestinationPredicateAttributes) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type OutboundTaskGroupPredicateAttributes to the protobuf v3 wire format +func (val *OutboundTaskGroupPredicateAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type OutboundTaskGroupPredicateAttributes from the protobuf v3 wire format +func (val *OutboundTaskGroupPredicateAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *OutboundTaskGroupPredicateAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two OutboundTaskGroupPredicateAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *OutboundTaskGroupPredicateAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *OutboundTaskGroupPredicateAttributes + switch t := that.(type) { + case *OutboundTaskGroupPredicateAttributes: + that1 = t + case OutboundTaskGroupPredicateAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type OutboundTaskPredicateAttributes to the protobuf v3 wire format +func (val *OutboundTaskPredicateAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type OutboundTaskPredicateAttributes from the protobuf v3 wire format +func (val *OutboundTaskPredicateAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *OutboundTaskPredicateAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two OutboundTaskPredicateAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *OutboundTaskPredicateAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *OutboundTaskPredicateAttributes + switch t := that.(type) { + case *OutboundTaskPredicateAttributes: + that1 = t + case OutboundTaskPredicateAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/persistence/v1/predicates.pb.go b/api/persistence/v1/predicates.pb.go index 311966a97fc..eef8df764a6 100644 --- a/api/persistence/v1/predicates.pb.go +++ b/api/persistence/v1/predicates.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/server/api/enums/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -45,12 +24,9 @@ const ( ) type Predicate struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PredicateType v1.PredicateType `protobuf:"varint,1,opt,name=predicate_type,json=predicateType,proto3,enum=temporal.server.api.enums.v1.PredicateType" json:"predicate_type,omitempty"` - // Types that are assignable to Attributes: + state protoimpl.MessageState `protogen:"open.v1"` + PredicateType v1.PredicateType `protobuf:"varint,1,opt,name=predicate_type,json=predicateType,proto3,enum=temporal.server.api.enums.v1.PredicateType" json:"predicate_type,omitempty"` + // Types that are valid to be assigned to Attributes: // // *Predicate_UniversalPredicateAttributes // *Predicate_EmptyPredicateAttributes @@ -60,16 +36,18 @@ type Predicate struct { // *Predicate_NamespaceIdPredicateAttributes // *Predicate_TaskTypePredicateAttributes // *Predicate_DestinationPredicateAttributes - Attributes isPredicate_Attributes `protobuf_oneof:"attributes"` + // *Predicate_OutboundTaskGroupPredicateAttributes + // *Predicate_OutboundTaskPredicateAttributes + Attributes isPredicate_Attributes `protobuf_oneof:"attributes"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Predicate) Reset() { *x = Predicate{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Predicate) String() string { @@ -80,7 +58,7 @@ func (*Predicate) ProtoMessage() {} func (x *Predicate) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -102,65 +80,99 @@ func (x *Predicate) GetPredicateType() v1.PredicateType { return v1.PredicateType(0) } -func (m *Predicate) GetAttributes() isPredicate_Attributes { - if m != nil { - return m.Attributes +func (x *Predicate) GetAttributes() isPredicate_Attributes { + if x != nil { + return x.Attributes } return nil } func (x *Predicate) GetUniversalPredicateAttributes() *UniversalPredicateAttributes { - if x, ok := x.GetAttributes().(*Predicate_UniversalPredicateAttributes); ok { - return x.UniversalPredicateAttributes + if x != nil { + if x, ok := x.Attributes.(*Predicate_UniversalPredicateAttributes); ok { + return x.UniversalPredicateAttributes + } } return nil } func (x *Predicate) GetEmptyPredicateAttributes() *EmptyPredicateAttributes { - if x, ok := x.GetAttributes().(*Predicate_EmptyPredicateAttributes); ok { - return x.EmptyPredicateAttributes + if x != nil { + if x, ok := x.Attributes.(*Predicate_EmptyPredicateAttributes); ok { + return x.EmptyPredicateAttributes + } } return nil } func (x *Predicate) GetAndPredicateAttributes() *AndPredicateAttributes { - if x, ok := x.GetAttributes().(*Predicate_AndPredicateAttributes); ok { - return x.AndPredicateAttributes + if x != nil { + if x, ok := x.Attributes.(*Predicate_AndPredicateAttributes); ok { + return x.AndPredicateAttributes + } } return nil } func (x *Predicate) GetOrPredicateAttributes() *OrPredicateAttributes { - if x, ok := x.GetAttributes().(*Predicate_OrPredicateAttributes); ok { - return x.OrPredicateAttributes + if x != nil { + if x, ok := x.Attributes.(*Predicate_OrPredicateAttributes); ok { + return x.OrPredicateAttributes + } } return nil } func (x *Predicate) GetNotPredicateAttributes() *NotPredicateAttributes { - if x, ok := x.GetAttributes().(*Predicate_NotPredicateAttributes); ok { - return x.NotPredicateAttributes + if x != nil { + if x, ok := x.Attributes.(*Predicate_NotPredicateAttributes); ok { + return x.NotPredicateAttributes + } } return nil } func (x *Predicate) GetNamespaceIdPredicateAttributes() *NamespaceIdPredicateAttributes { - if x, ok := x.GetAttributes().(*Predicate_NamespaceIdPredicateAttributes); ok { - return x.NamespaceIdPredicateAttributes + if x != nil { + if x, ok := x.Attributes.(*Predicate_NamespaceIdPredicateAttributes); ok { + return x.NamespaceIdPredicateAttributes + } } return nil } func (x *Predicate) GetTaskTypePredicateAttributes() *TaskTypePredicateAttributes { - if x, ok := x.GetAttributes().(*Predicate_TaskTypePredicateAttributes); ok { - return x.TaskTypePredicateAttributes + if x != nil { + if x, ok := x.Attributes.(*Predicate_TaskTypePredicateAttributes); ok { + return x.TaskTypePredicateAttributes + } } return nil } func (x *Predicate) GetDestinationPredicateAttributes() *DestinationPredicateAttributes { - if x, ok := x.GetAttributes().(*Predicate_DestinationPredicateAttributes); ok { - return x.DestinationPredicateAttributes + if x != nil { + if x, ok := x.Attributes.(*Predicate_DestinationPredicateAttributes); ok { + return x.DestinationPredicateAttributes + } + } + return nil +} + +func (x *Predicate) GetOutboundTaskGroupPredicateAttributes() *OutboundTaskGroupPredicateAttributes { + if x != nil { + if x, ok := x.Attributes.(*Predicate_OutboundTaskGroupPredicateAttributes); ok { + return x.OutboundTaskGroupPredicateAttributes + } + } + return nil +} + +func (x *Predicate) GetOutboundTaskPredicateAttributes() *OutboundTaskPredicateAttributes { + if x != nil { + if x, ok := x.Attributes.(*Predicate_OutboundTaskPredicateAttributes); ok { + return x.OutboundTaskPredicateAttributes + } } return nil } @@ -201,6 +213,14 @@ type Predicate_DestinationPredicateAttributes struct { DestinationPredicateAttributes *DestinationPredicateAttributes `protobuf:"bytes,9,opt,name=destination_predicate_attributes,json=destinationPredicateAttributes,proto3,oneof"` } +type Predicate_OutboundTaskGroupPredicateAttributes struct { + OutboundTaskGroupPredicateAttributes *OutboundTaskGroupPredicateAttributes `protobuf:"bytes,10,opt,name=outbound_task_group_predicate_attributes,json=outboundTaskGroupPredicateAttributes,proto3,oneof"` +} + +type Predicate_OutboundTaskPredicateAttributes struct { + OutboundTaskPredicateAttributes *OutboundTaskPredicateAttributes `protobuf:"bytes,11,opt,name=outbound_task_predicate_attributes,json=outboundTaskPredicateAttributes,proto3,oneof"` +} + func (*Predicate_UniversalPredicateAttributes) isPredicate_Attributes() {} func (*Predicate_EmptyPredicateAttributes) isPredicate_Attributes() {} @@ -217,19 +237,21 @@ func (*Predicate_TaskTypePredicateAttributes) isPredicate_Attributes() {} func (*Predicate_DestinationPredicateAttributes) isPredicate_Attributes() {} +func (*Predicate_OutboundTaskGroupPredicateAttributes) isPredicate_Attributes() {} + +func (*Predicate_OutboundTaskPredicateAttributes) isPredicate_Attributes() {} + type UniversalPredicateAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UniversalPredicateAttributes) Reset() { *x = UniversalPredicateAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *UniversalPredicateAttributes) String() string { @@ -240,7 +262,7 @@ func (*UniversalPredicateAttributes) ProtoMessage() {} func (x *UniversalPredicateAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -256,18 +278,16 @@ func (*UniversalPredicateAttributes) Descriptor() ([]byte, []int) { } type EmptyPredicateAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EmptyPredicateAttributes) Reset() { *x = EmptyPredicateAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EmptyPredicateAttributes) String() string { @@ -278,7 +298,7 @@ func (*EmptyPredicateAttributes) ProtoMessage() {} func (x *EmptyPredicateAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -294,20 +314,17 @@ func (*EmptyPredicateAttributes) Descriptor() ([]byte, []int) { } type AndPredicateAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Predicates []*Predicate `protobuf:"bytes,1,rep,name=predicates,proto3" json:"predicates,omitempty"` unknownFields protoimpl.UnknownFields - - Predicates []*Predicate `protobuf:"bytes,1,rep,name=predicates,proto3" json:"predicates,omitempty"` + sizeCache protoimpl.SizeCache } func (x *AndPredicateAttributes) Reset() { *x = AndPredicateAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AndPredicateAttributes) String() string { @@ -318,7 +335,7 @@ func (*AndPredicateAttributes) ProtoMessage() {} func (x *AndPredicateAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -341,20 +358,17 @@ func (x *AndPredicateAttributes) GetPredicates() []*Predicate { } type OrPredicateAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Predicates []*Predicate `protobuf:"bytes,1,rep,name=predicates,proto3" json:"predicates,omitempty"` unknownFields protoimpl.UnknownFields - - Predicates []*Predicate `protobuf:"bytes,1,rep,name=predicates,proto3" json:"predicates,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OrPredicateAttributes) Reset() { *x = OrPredicateAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *OrPredicateAttributes) String() string { @@ -365,7 +379,7 @@ func (*OrPredicateAttributes) ProtoMessage() {} func (x *OrPredicateAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -388,20 +402,17 @@ func (x *OrPredicateAttributes) GetPredicates() []*Predicate { } type NotPredicateAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Predicate *Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"` unknownFields protoimpl.UnknownFields - - Predicate *Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"` + sizeCache protoimpl.SizeCache } func (x *NotPredicateAttributes) Reset() { *x = NotPredicateAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NotPredicateAttributes) String() string { @@ -412,7 +423,7 @@ func (*NotPredicateAttributes) ProtoMessage() {} func (x *NotPredicateAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -435,20 +446,17 @@ func (x *NotPredicateAttributes) GetPredicate() *Predicate { } type NamespaceIdPredicateAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceIds []string `protobuf:"bytes,1,rep,name=namespace_ids,json=namespaceIds,proto3" json:"namespace_ids,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceIds []string `protobuf:"bytes,1,rep,name=namespace_ids,json=namespaceIds,proto3" json:"namespace_ids,omitempty"` + sizeCache protoimpl.SizeCache } func (x *NamespaceIdPredicateAttributes) Reset() { *x = NamespaceIdPredicateAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamespaceIdPredicateAttributes) String() string { @@ -459,7 +467,7 @@ func (*NamespaceIdPredicateAttributes) ProtoMessage() {} func (x *NamespaceIdPredicateAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -482,20 +490,17 @@ func (x *NamespaceIdPredicateAttributes) GetNamespaceIds() []string { } type TaskTypePredicateAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + TaskTypes []v1.TaskType `protobuf:"varint,1,rep,packed,name=task_types,json=taskTypes,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_types,omitempty"` unknownFields protoimpl.UnknownFields - - TaskTypes []v1.TaskType `protobuf:"varint,1,rep,packed,name=task_types,json=taskTypes,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_types,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TaskTypePredicateAttributes) Reset() { *x = TaskTypePredicateAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskTypePredicateAttributes) String() string { @@ -506,7 +511,7 @@ func (*TaskTypePredicateAttributes) ProtoMessage() {} func (x *TaskTypePredicateAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -529,20 +534,17 @@ func (x *TaskTypePredicateAttributes) GetTaskTypes() []v1.TaskType { } type DestinationPredicateAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Destinations []string `protobuf:"bytes,1,rep,name=destinations,proto3" json:"destinations,omitempty"` unknownFields protoimpl.UnknownFields - - Destinations []string `protobuf:"bytes,1,rep,name=destinations,proto3" json:"destinations,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DestinationPredicateAttributes) Reset() { *x = DestinationPredicateAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DestinationPredicateAttributes) String() string { @@ -553,7 +555,7 @@ func (*DestinationPredicateAttributes) ProtoMessage() {} func (x *DestinationPredicateAttributes) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -575,166 +577,234 @@ func (x *DestinationPredicateAttributes) GetDestinations() []string { return nil } -var File_temporal_server_api_persistence_v1_predicates_proto protoreflect.FileDescriptor +type OutboundTaskGroupPredicateAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + Groups []string `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OutboundTaskGroupPredicateAttributes) Reset() { + *x = OutboundTaskGroupPredicateAttributes{} + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OutboundTaskGroupPredicateAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutboundTaskGroupPredicateAttributes) ProtoMessage() {} + +func (x *OutboundTaskGroupPredicateAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutboundTaskGroupPredicateAttributes.ProtoReflect.Descriptor instead. +func (*OutboundTaskGroupPredicateAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_predicates_proto_rawDescGZIP(), []int{9} +} + +func (x *OutboundTaskGroupPredicateAttributes) GetGroups() []string { + if x != nil { + return x.Groups + } + return nil +} -var file_temporal_server_api_persistence_v1_predicates_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, - 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, - 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xac, 0x09, 0x0a, 0x09, 0x50, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0d, 0x70, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x8c, 0x01, - 0x0a, 0x1e, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, - 0x73, 0x61, 0x6c, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x1c, 0x75, 0x6e, 0x69, 0x76, 0x65, 0x72, 0x73, - 0x61, 0x6c, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x80, 0x01, 0x0a, 0x1a, 0x65, 0x6d, 0x70, - 0x74, 0x79, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x18, 0x65, 0x6d, 0x70, 0x74, - 0x79, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x7a, 0x0a, 0x18, 0x61, 0x6e, 0x64, 0x5f, 0x70, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, - 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x64, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x16, 0x61, 0x6e, 0x64, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x77, 0x0a, 0x17, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x39, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x15, 0x6f, 0x72, 0x50, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x7a, 0x0a, 0x18, 0x6e, 0x6f, 0x74, 0x5f, 0x70, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x74, 0x50, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x48, 0x00, 0x52, 0x16, 0x6e, 0x6f, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x93, 0x01, - 0x0a, 0x21, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x70, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x1e, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x8a, 0x01, 0x0a, 0x1e, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, - 0x54, 0x79, 0x70, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x1b, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, - 0x70, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x92, 0x01, 0x0a, 0x20, 0x64, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x1e, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x42, 0x0c, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x55, 0x6e, 0x69, 0x76, 0x65, 0x72, - 0x73, 0x61, 0x6c, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x22, 0x6b, 0x0a, 0x16, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x72, 0x65, - 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x6a, 0x0a, 0x15, 0x4f, 0x72, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x12, 0x51, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x69, 0x0a, 0x16, 0x4e, 0x6f, 0x74, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x09, - 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, - 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x49, 0x0a, 0x1e, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0d, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x22, 0x68, 0x0a, 0x1b, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x50, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x74, 0x61, - 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0x48, 0x0a, 0x1e, 0x44, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x64, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, - 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, +type OutboundTaskPredicateAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + Groups []*OutboundTaskPredicateAttributes_Group `protobuf:"bytes,1,rep,name=groups,proto3" json:"groups,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } +func (x *OutboundTaskPredicateAttributes) Reset() { + *x = OutboundTaskPredicateAttributes{} + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OutboundTaskPredicateAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutboundTaskPredicateAttributes) ProtoMessage() {} + +func (x *OutboundTaskPredicateAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutboundTaskPredicateAttributes.ProtoReflect.Descriptor instead. +func (*OutboundTaskPredicateAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_predicates_proto_rawDescGZIP(), []int{10} +} + +func (x *OutboundTaskPredicateAttributes) GetGroups() []*OutboundTaskPredicateAttributes_Group { + if x != nil { + return x.Groups + } + return nil +} + +type OutboundTaskPredicateAttributes_Group struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskGroup string `protobuf:"bytes,1,opt,name=task_group,json=taskGroup,proto3" json:"task_group,omitempty"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Destination string `protobuf:"bytes,3,opt,name=destination,proto3" json:"destination,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OutboundTaskPredicateAttributes_Group) Reset() { + *x = OutboundTaskPredicateAttributes_Group{} + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OutboundTaskPredicateAttributes_Group) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OutboundTaskPredicateAttributes_Group) ProtoMessage() {} + +func (x *OutboundTaskPredicateAttributes_Group) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OutboundTaskPredicateAttributes_Group.ProtoReflect.Descriptor instead. +func (*OutboundTaskPredicateAttributes_Group) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_predicates_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *OutboundTaskPredicateAttributes_Group) GetTaskGroup() string { + if x != nil { + return x.TaskGroup + } + return "" +} + +func (x *OutboundTaskPredicateAttributes_Group) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *OutboundTaskPredicateAttributes_Group) GetDestination() string { + if x != nil { + return x.Destination + } + return "" +} + +var File_temporal_server_api_persistence_v1_predicates_proto protoreflect.FileDescriptor + +const file_temporal_server_api_persistence_v1_predicates_proto_rawDesc = "" + + "\n" + + "3temporal/server/api/persistence/v1/predicates.proto\x12\"temporal.server.api.persistence.v1\x1a,temporal/server/api/enums/v1/predicate.proto\x1a'temporal/server/api/enums/v1/task.proto\"\xc1\v\n" + + "\tPredicate\x12R\n" + + "\x0epredicate_type\x18\x01 \x01(\x0e2+.temporal.server.api.enums.v1.PredicateTypeR\rpredicateType\x12\x88\x01\n" + + "\x1euniversal_predicate_attributes\x18\x02 \x01(\v2@.temporal.server.api.persistence.v1.UniversalPredicateAttributesH\x00R\x1cuniversalPredicateAttributes\x12|\n" + + "\x1aempty_predicate_attributes\x18\x03 \x01(\v2<.temporal.server.api.persistence.v1.EmptyPredicateAttributesH\x00R\x18emptyPredicateAttributes\x12v\n" + + "\x18and_predicate_attributes\x18\x04 \x01(\v2:.temporal.server.api.persistence.v1.AndPredicateAttributesH\x00R\x16andPredicateAttributes\x12s\n" + + "\x17or_predicate_attributes\x18\x05 \x01(\v29.temporal.server.api.persistence.v1.OrPredicateAttributesH\x00R\x15orPredicateAttributes\x12v\n" + + "\x18not_predicate_attributes\x18\x06 \x01(\v2:.temporal.server.api.persistence.v1.NotPredicateAttributesH\x00R\x16notPredicateAttributes\x12\x8f\x01\n" + + "!namespace_id_predicate_attributes\x18\a \x01(\v2B.temporal.server.api.persistence.v1.NamespaceIdPredicateAttributesH\x00R\x1enamespaceIdPredicateAttributes\x12\x86\x01\n" + + "\x1etask_type_predicate_attributes\x18\b \x01(\v2?.temporal.server.api.persistence.v1.TaskTypePredicateAttributesH\x00R\x1btaskTypePredicateAttributes\x12\x8e\x01\n" + + " destination_predicate_attributes\x18\t \x01(\v2B.temporal.server.api.persistence.v1.DestinationPredicateAttributesH\x00R\x1edestinationPredicateAttributes\x12\xa2\x01\n" + + "(outbound_task_group_predicate_attributes\x18\n" + + " \x01(\v2H.temporal.server.api.persistence.v1.OutboundTaskGroupPredicateAttributesH\x00R$outboundTaskGroupPredicateAttributes\x12\x92\x01\n" + + "\"outbound_task_predicate_attributes\x18\v \x01(\v2C.temporal.server.api.persistence.v1.OutboundTaskPredicateAttributesH\x00R\x1foutboundTaskPredicateAttributesB\f\n" + + "\n" + + "attributes\"\x1e\n" + + "\x1cUniversalPredicateAttributes\"\x1a\n" + + "\x18EmptyPredicateAttributes\"g\n" + + "\x16AndPredicateAttributes\x12M\n" + + "\n" + + "predicates\x18\x01 \x03(\v2-.temporal.server.api.persistence.v1.PredicateR\n" + + "predicates\"f\n" + + "\x15OrPredicateAttributes\x12M\n" + + "\n" + + "predicates\x18\x01 \x03(\v2-.temporal.server.api.persistence.v1.PredicateR\n" + + "predicates\"e\n" + + "\x16NotPredicateAttributes\x12K\n" + + "\tpredicate\x18\x01 \x01(\v2-.temporal.server.api.persistence.v1.PredicateR\tpredicate\"E\n" + + "\x1eNamespaceIdPredicateAttributes\x12#\n" + + "\rnamespace_ids\x18\x01 \x03(\tR\fnamespaceIds\"d\n" + + "\x1bTaskTypePredicateAttributes\x12E\n" + + "\n" + + "task_types\x18\x01 \x03(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\ttaskTypes\"D\n" + + "\x1eDestinationPredicateAttributes\x12\"\n" + + "\fdestinations\x18\x01 \x03(\tR\fdestinations\">\n" + + "$OutboundTaskGroupPredicateAttributes\x12\x16\n" + + "\x06groups\x18\x01 \x03(\tR\x06groups\"\xf1\x01\n" + + "\x1fOutboundTaskPredicateAttributes\x12a\n" + + "\x06groups\x18\x01 \x03(\v2I.temporal.server.api.persistence.v1.OutboundTaskPredicateAttributes.GroupR\x06groups\x1ak\n" + + "\x05Group\x12\x1d\n" + + "\n" + + "task_group\x18\x01 \x01(\tR\ttaskGroup\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12 \n" + + "\vdestination\x18\x03 \x01(\tR\vdestinationB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" + var ( file_temporal_server_api_persistence_v1_predicates_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_predicates_proto_rawDescData = file_temporal_server_api_persistence_v1_predicates_proto_rawDesc + file_temporal_server_api_persistence_v1_predicates_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_predicates_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_predicates_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_predicates_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_predicates_proto_rawDescData) + file_temporal_server_api_persistence_v1_predicates_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_predicates_proto_rawDesc), len(file_temporal_server_api_persistence_v1_predicates_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_predicates_proto_rawDescData } -var file_temporal_server_api_persistence_v1_predicates_proto_msgTypes = make([]protoimpl.MessageInfo, 9) -var file_temporal_server_api_persistence_v1_predicates_proto_goTypes = []interface{}{ - (*Predicate)(nil), // 0: temporal.server.api.persistence.v1.Predicate - (*UniversalPredicateAttributes)(nil), // 1: temporal.server.api.persistence.v1.UniversalPredicateAttributes - (*EmptyPredicateAttributes)(nil), // 2: temporal.server.api.persistence.v1.EmptyPredicateAttributes - (*AndPredicateAttributes)(nil), // 3: temporal.server.api.persistence.v1.AndPredicateAttributes - (*OrPredicateAttributes)(nil), // 4: temporal.server.api.persistence.v1.OrPredicateAttributes - (*NotPredicateAttributes)(nil), // 5: temporal.server.api.persistence.v1.NotPredicateAttributes - (*NamespaceIdPredicateAttributes)(nil), // 6: temporal.server.api.persistence.v1.NamespaceIdPredicateAttributes - (*TaskTypePredicateAttributes)(nil), // 7: temporal.server.api.persistence.v1.TaskTypePredicateAttributes - (*DestinationPredicateAttributes)(nil), // 8: temporal.server.api.persistence.v1.DestinationPredicateAttributes - (v1.PredicateType)(0), // 9: temporal.server.api.enums.v1.PredicateType - (v1.TaskType)(0), // 10: temporal.server.api.enums.v1.TaskType +var file_temporal_server_api_persistence_v1_predicates_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_temporal_server_api_persistence_v1_predicates_proto_goTypes = []any{ + (*Predicate)(nil), // 0: temporal.server.api.persistence.v1.Predicate + (*UniversalPredicateAttributes)(nil), // 1: temporal.server.api.persistence.v1.UniversalPredicateAttributes + (*EmptyPredicateAttributes)(nil), // 2: temporal.server.api.persistence.v1.EmptyPredicateAttributes + (*AndPredicateAttributes)(nil), // 3: temporal.server.api.persistence.v1.AndPredicateAttributes + (*OrPredicateAttributes)(nil), // 4: temporal.server.api.persistence.v1.OrPredicateAttributes + (*NotPredicateAttributes)(nil), // 5: temporal.server.api.persistence.v1.NotPredicateAttributes + (*NamespaceIdPredicateAttributes)(nil), // 6: temporal.server.api.persistence.v1.NamespaceIdPredicateAttributes + (*TaskTypePredicateAttributes)(nil), // 7: temporal.server.api.persistence.v1.TaskTypePredicateAttributes + (*DestinationPredicateAttributes)(nil), // 8: temporal.server.api.persistence.v1.DestinationPredicateAttributes + (*OutboundTaskGroupPredicateAttributes)(nil), // 9: temporal.server.api.persistence.v1.OutboundTaskGroupPredicateAttributes + (*OutboundTaskPredicateAttributes)(nil), // 10: temporal.server.api.persistence.v1.OutboundTaskPredicateAttributes + (*OutboundTaskPredicateAttributes_Group)(nil), // 11: temporal.server.api.persistence.v1.OutboundTaskPredicateAttributes.Group + (v1.PredicateType)(0), // 12: temporal.server.api.enums.v1.PredicateType + (v1.TaskType)(0), // 13: temporal.server.api.enums.v1.TaskType } var file_temporal_server_api_persistence_v1_predicates_proto_depIdxs = []int32{ - 9, // 0: temporal.server.api.persistence.v1.Predicate.predicate_type:type_name -> temporal.server.api.enums.v1.PredicateType + 12, // 0: temporal.server.api.persistence.v1.Predicate.predicate_type:type_name -> temporal.server.api.enums.v1.PredicateType 1, // 1: temporal.server.api.persistence.v1.Predicate.universal_predicate_attributes:type_name -> temporal.server.api.persistence.v1.UniversalPredicateAttributes 2, // 2: temporal.server.api.persistence.v1.Predicate.empty_predicate_attributes:type_name -> temporal.server.api.persistence.v1.EmptyPredicateAttributes 3, // 3: temporal.server.api.persistence.v1.Predicate.and_predicate_attributes:type_name -> temporal.server.api.persistence.v1.AndPredicateAttributes @@ -743,15 +813,18 @@ var file_temporal_server_api_persistence_v1_predicates_proto_depIdxs = []int32{ 6, // 6: temporal.server.api.persistence.v1.Predicate.namespace_id_predicate_attributes:type_name -> temporal.server.api.persistence.v1.NamespaceIdPredicateAttributes 7, // 7: temporal.server.api.persistence.v1.Predicate.task_type_predicate_attributes:type_name -> temporal.server.api.persistence.v1.TaskTypePredicateAttributes 8, // 8: temporal.server.api.persistence.v1.Predicate.destination_predicate_attributes:type_name -> temporal.server.api.persistence.v1.DestinationPredicateAttributes - 0, // 9: temporal.server.api.persistence.v1.AndPredicateAttributes.predicates:type_name -> temporal.server.api.persistence.v1.Predicate - 0, // 10: temporal.server.api.persistence.v1.OrPredicateAttributes.predicates:type_name -> temporal.server.api.persistence.v1.Predicate - 0, // 11: temporal.server.api.persistence.v1.NotPredicateAttributes.predicate:type_name -> temporal.server.api.persistence.v1.Predicate - 10, // 12: temporal.server.api.persistence.v1.TaskTypePredicateAttributes.task_types:type_name -> temporal.server.api.enums.v1.TaskType - 13, // [13:13] is the sub-list for method output_type - 13, // [13:13] is the sub-list for method input_type - 13, // [13:13] is the sub-list for extension type_name - 13, // [13:13] is the sub-list for extension extendee - 0, // [0:13] is the sub-list for field type_name + 9, // 9: temporal.server.api.persistence.v1.Predicate.outbound_task_group_predicate_attributes:type_name -> temporal.server.api.persistence.v1.OutboundTaskGroupPredicateAttributes + 10, // 10: temporal.server.api.persistence.v1.Predicate.outbound_task_predicate_attributes:type_name -> temporal.server.api.persistence.v1.OutboundTaskPredicateAttributes + 0, // 11: temporal.server.api.persistence.v1.AndPredicateAttributes.predicates:type_name -> temporal.server.api.persistence.v1.Predicate + 0, // 12: temporal.server.api.persistence.v1.OrPredicateAttributes.predicates:type_name -> temporal.server.api.persistence.v1.Predicate + 0, // 13: temporal.server.api.persistence.v1.NotPredicateAttributes.predicate:type_name -> temporal.server.api.persistence.v1.Predicate + 13, // 14: temporal.server.api.persistence.v1.TaskTypePredicateAttributes.task_types:type_name -> temporal.server.api.enums.v1.TaskType + 11, // 15: temporal.server.api.persistence.v1.OutboundTaskPredicateAttributes.groups:type_name -> temporal.server.api.persistence.v1.OutboundTaskPredicateAttributes.Group + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_temporal_server_api_persistence_v1_predicates_proto_init() } @@ -759,117 +832,7 @@ func file_temporal_server_api_persistence_v1_predicates_proto_init() { if File_temporal_server_api_persistence_v1_predicates_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Predicate); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UniversalPredicateAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*EmptyPredicateAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AndPredicateAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OrPredicateAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NotPredicateAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamespaceIdPredicateAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskTypePredicateAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DestinationPredicateAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[0].OneofWrappers = []interface{}{ + file_temporal_server_api_persistence_v1_predicates_proto_msgTypes[0].OneofWrappers = []any{ (*Predicate_UniversalPredicateAttributes)(nil), (*Predicate_EmptyPredicateAttributes)(nil), (*Predicate_AndPredicateAttributes)(nil), @@ -878,14 +841,16 @@ func file_temporal_server_api_persistence_v1_predicates_proto_init() { (*Predicate_NamespaceIdPredicateAttributes)(nil), (*Predicate_TaskTypePredicateAttributes)(nil), (*Predicate_DestinationPredicateAttributes)(nil), + (*Predicate_OutboundTaskGroupPredicateAttributes)(nil), + (*Predicate_OutboundTaskPredicateAttributes)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_predicates_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_predicates_proto_rawDesc), len(file_temporal_server_api_persistence_v1_predicates_proto_rawDesc)), NumEnums: 0, - NumMessages: 9, + NumMessages: 12, NumExtensions: 0, NumServices: 0, }, @@ -894,7 +859,6 @@ func file_temporal_server_api_persistence_v1_predicates_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_predicates_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_predicates_proto = out.File - file_temporal_server_api_persistence_v1_predicates_proto_rawDesc = nil file_temporal_server_api_persistence_v1_predicates_proto_goTypes = nil file_temporal_server_api_persistence_v1_predicates_proto_depIdxs = nil } diff --git a/api/persistence/v1/queue_metadata.go-helpers.pb.go b/api/persistence/v1/queue_metadata.go-helpers.pb.go index 7948ecd7972..5490eba19dd 100644 --- a/api/persistence/v1/queue_metadata.go-helpers.pb.go +++ b/api/persistence/v1/queue_metadata.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence diff --git a/api/persistence/v1/queue_metadata.pb.go b/api/persistence/v1/queue_metadata.pb.go index 6ae9813f82a..ddd9b32b432 100644 --- a/api/persistence/v1/queue_metadata.pb.go +++ b/api/persistence/v1/queue_metadata.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -45,20 +24,17 @@ const ( // data column type QueueMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ClusterAckLevels map[string]int64 `protobuf:"bytes,1,rep,name=cluster_ack_levels,json=clusterAckLevels,proto3" json:"cluster_ack_levels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + ClusterAckLevels map[string]int64 `protobuf:"bytes,1,rep,name=cluster_ack_levels,json=clusterAckLevels,proto3" json:"cluster_ack_levels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *QueueMetadata) Reset() { *x = QueueMetadata{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queue_metadata_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queue_metadata_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueueMetadata) String() string { @@ -69,7 +45,7 @@ func (*QueueMetadata) ProtoMessage() {} func (x *QueueMetadata) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queue_metadata_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -93,46 +69,29 @@ func (x *QueueMetadata) GetClusterAckLevels() map[string]int64 { var File_temporal_server_api_persistence_v1_queue_metadata_proto protoreflect.FileDescriptor -var file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc = []byte{ - 0x0a, 0x37, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, 0xd7, 0x01, 0x0a, 0x0d, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x79, 0x0a, 0x12, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, - 0x63, 0x6b, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x1a, 0x4b, 0x0a, 0x15, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x41, 0x63, 0x6b, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x18, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, 0x34, - 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc = "" + + "\n" + + "7temporal/server/api/persistence/v1/queue_metadata.proto\x12\"temporal.server.api.persistence.v1\"\xcb\x01\n" + + "\rQueueMetadata\x12u\n" + + "\x12cluster_ack_levels\x18\x01 \x03(\v2G.temporal.server.api.persistence.v1.QueueMetadata.ClusterAckLevelsEntryR\x10clusterAckLevels\x1aC\n" + + "\x15ClusterAckLevelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\x03R\x05value:\x028\x01B6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" var ( file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescData = file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc + file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescData) + file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc), len(file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDescData } var file_temporal_server_api_persistence_v1_queue_metadata_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_temporal_server_api_persistence_v1_queue_metadata_proto_goTypes = []interface{}{ +var file_temporal_server_api_persistence_v1_queue_metadata_proto_goTypes = []any{ (*QueueMetadata)(nil), // 0: temporal.server.api.persistence.v1.QueueMetadata nil, // 1: temporal.server.api.persistence.v1.QueueMetadata.ClusterAckLevelsEntry } @@ -150,25 +109,11 @@ func file_temporal_server_api_persistence_v1_queue_metadata_proto_init() { if File_temporal_server_api_persistence_v1_queue_metadata_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_queue_metadata_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueueMetadata); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc), len(file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -179,7 +124,6 @@ func file_temporal_server_api_persistence_v1_queue_metadata_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_queue_metadata_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_queue_metadata_proto = out.File - file_temporal_server_api_persistence_v1_queue_metadata_proto_rawDesc = nil file_temporal_server_api_persistence_v1_queue_metadata_proto_goTypes = nil file_temporal_server_api_persistence_v1_queue_metadata_proto_depIdxs = nil } diff --git a/api/persistence/v1/queues.go-helpers.pb.go b/api/persistence/v1/queues.go-helpers.pb.go index aab5a387d33..6c750c66ffc 100644 --- a/api/persistence/v1/queues.go-helpers.pb.go +++ b/api/persistence/v1/queues.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence diff --git a/api/persistence/v1/queues.pb.go b/api/persistence/v1/queues.pb.go index 2d87437601c..8f9a46243e3 100644 --- a/api/persistence/v1/queues.pb.go +++ b/api/persistence/v1/queues.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/api/common/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -45,21 +24,18 @@ const ( ) type QueueState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReaderStates map[int64]*QueueReaderState `protobuf:"bytes,1,rep,name=reader_states,json=readerStates,proto3" json:"reader_states,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + ReaderStates map[int64]*QueueReaderState `protobuf:"bytes,1,rep,name=reader_states,json=readerStates,proto3" json:"reader_states,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` ExclusiveReaderHighWatermark *TaskKey `protobuf:"bytes,2,opt,name=exclusive_reader_high_watermark,json=exclusiveReaderHighWatermark,proto3" json:"exclusive_reader_high_watermark,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *QueueState) Reset() { *x = QueueState{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueueState) String() string { @@ -70,7 +46,7 @@ func (*QueueState) ProtoMessage() {} func (x *QueueState) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -100,20 +76,17 @@ func (x *QueueState) GetExclusiveReaderHighWatermark() *TaskKey { } type QueueReaderState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Scopes []*QueueSliceScope `protobuf:"bytes,1,rep,name=scopes,proto3" json:"scopes,omitempty"` unknownFields protoimpl.UnknownFields - - Scopes []*QueueSliceScope `protobuf:"bytes,1,rep,name=scopes,proto3" json:"scopes,omitempty"` + sizeCache protoimpl.SizeCache } func (x *QueueReaderState) Reset() { *x = QueueReaderState{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueueReaderState) String() string { @@ -124,7 +97,7 @@ func (*QueueReaderState) ProtoMessage() {} func (x *QueueReaderState) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -147,21 +120,18 @@ func (x *QueueReaderState) GetScopes() []*QueueSliceScope { } type QueueSliceScope struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Range *QueueSliceRange `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"` + Predicate *Predicate `protobuf:"bytes,2,opt,name=predicate,proto3" json:"predicate,omitempty"` unknownFields protoimpl.UnknownFields - - Range *QueueSliceRange `protobuf:"bytes,1,opt,name=range,proto3" json:"range,omitempty"` - Predicate *Predicate `protobuf:"bytes,2,opt,name=predicate,proto3" json:"predicate,omitempty"` + sizeCache protoimpl.SizeCache } func (x *QueueSliceScope) Reset() { *x = QueueSliceScope{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueueSliceScope) String() string { @@ -172,7 +142,7 @@ func (*QueueSliceScope) ProtoMessage() {} func (x *QueueSliceScope) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -202,21 +172,18 @@ func (x *QueueSliceScope) GetPredicate() *Predicate { } type QueueSliceRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + InclusiveMin *TaskKey `protobuf:"bytes,1,opt,name=inclusive_min,json=inclusiveMin,proto3" json:"inclusive_min,omitempty"` + ExclusiveMax *TaskKey `protobuf:"bytes,2,opt,name=exclusive_max,json=exclusiveMax,proto3" json:"exclusive_max,omitempty"` unknownFields protoimpl.UnknownFields - - InclusiveMin *TaskKey `protobuf:"bytes,1,opt,name=inclusive_min,json=inclusiveMin,proto3" json:"inclusive_min,omitempty"` - ExclusiveMax *TaskKey `protobuf:"bytes,2,opt,name=exclusive_max,json=exclusiveMax,proto3" json:"exclusive_max,omitempty"` + sizeCache protoimpl.SizeCache } func (x *QueueSliceRange) Reset() { *x = QueueSliceRange{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueueSliceRange) String() string { @@ -227,7 +194,7 @@ func (*QueueSliceRange) ProtoMessage() {} func (x *QueueSliceRange) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -257,20 +224,17 @@ func (x *QueueSliceRange) GetExclusiveMax() *TaskKey { } type ReadQueueMessagesNextPageToken struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - LastReadMessageId int64 `protobuf:"varint,1,opt,name=last_read_message_id,json=lastReadMessageId,proto3" json:"last_read_message_id,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + LastReadMessageId int64 `protobuf:"varint,1,opt,name=last_read_message_id,json=lastReadMessageId,proto3" json:"last_read_message_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReadQueueMessagesNextPageToken) Reset() { *x = ReadQueueMessagesNextPageToken{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReadQueueMessagesNextPageToken) String() string { @@ -281,7 +245,7 @@ func (*ReadQueueMessagesNextPageToken) ProtoMessage() {} func (x *ReadQueueMessagesNextPageToken) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -304,20 +268,17 @@ func (x *ReadQueueMessagesNextPageToken) GetLastReadMessageId() int64 { } type ListQueuesNextPageToken struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - LastReadQueueNumber int64 `protobuf:"varint,1,opt,name=last_read_queue_number,json=lastReadQueueNumber,proto3" json:"last_read_queue_number,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + LastReadQueueNumber int64 `protobuf:"varint,1,opt,name=last_read_queue_number,json=lastReadQueueNumber,proto3" json:"last_read_queue_number,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ListQueuesNextPageToken) Reset() { *x = ListQueuesNextPageToken{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ListQueuesNextPageToken) String() string { @@ -328,7 +289,7 @@ func (*ListQueuesNextPageToken) ProtoMessage() {} func (x *ListQueuesNextPageToken) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -353,10 +314,7 @@ func (x *ListQueuesNextPageToken) GetLastReadQueueNumber() int64 { // HistoryTask represents an internal history service task for a particular shard. We use a blob because there is no // common proto for all task proto types. type HistoryTask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // shard_id that this task belonged to when it was created. Technically, you can derive this from the task data // blob, but it's useful to have it here for quick access and to avoid deserializing the blob. Note that this may be // different from the shard id of this task in the current cluster because it could have come from a cluster with a @@ -365,16 +323,16 @@ type HistoryTask struct { // blob that contains the history task proto. There is a GoLang-specific generic deserializer for this blob, but // there is no common proto for all task proto types, so deserializing in other languages will require a custom // switch on the task category, which should be available from the metadata for the queue that this task came from. - Blob *v1.DataBlob `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"` + Blob *v1.DataBlob `protobuf:"bytes,2,opt,name=blob,proto3" json:"blob,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryTask) Reset() { *x = HistoryTask{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryTask) String() string { @@ -385,7 +343,7 @@ func (*HistoryTask) ProtoMessage() {} func (x *HistoryTask) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -415,10 +373,7 @@ func (x *HistoryTask) GetBlob() *v1.DataBlob { } type QueuePartition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // min_message_id is less than or equal to the id of every message in the queue. The min_message_id is mainly used to // skip over tombstones in Cassandra: let's say we deleted the first 1K messages from a queue with 1.1K messages. If // @@ -430,16 +385,16 @@ type QueuePartition struct { // However, such errors surface to clients with an "Unavailable" code, so clients retry, and the id should be updated // soon. Additionally, we only use min_message_id to skip over tombstones, so it will only affect read performance, // not correctness. - MinMessageId int64 `protobuf:"varint,1,opt,name=min_message_id,json=minMessageId,proto3" json:"min_message_id,omitempty"` + MinMessageId int64 `protobuf:"varint,1,opt,name=min_message_id,json=minMessageId,proto3" json:"min_message_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *QueuePartition) Reset() { *x = QueuePartition{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueuePartition) String() string { @@ -450,7 +405,7 @@ func (*QueuePartition) ProtoMessage() {} func (x *QueuePartition) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -473,21 +428,18 @@ func (x *QueuePartition) GetMinMessageId() int64 { } type Queue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // A map from partition index (0-based) to the partition metadata. - Partitions map[int32]*QueuePartition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Partitions map[int32]*QueuePartition `protobuf:"bytes,1,rep,name=partitions,proto3" json:"partitions,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Queue) Reset() { *x = Queue{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Queue) String() string { @@ -498,7 +450,7 @@ func (*Queue) ProtoMessage() {} func (x *Queue) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_queues_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -522,125 +474,55 @@ func (x *Queue) GetPartitions() map[int32]*QueuePartition { var File_temporal_server_api_persistence_v1_queues_proto protoreflect.FileDescriptor -var file_temporal_server_api_persistence_v1_queues_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x33, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, - 0x74, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x02, 0x0a, 0x0a, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x69, 0x0a, 0x0d, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x76, 0x0a, 0x1f, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, - 0x68, 0x69, 0x67, 0x68, 0x5f, 0x77, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x52, 0x1c, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x69, - 0x67, 0x68, 0x57, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x1a, - 0x7d, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x63, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4f, 0x0a, 0x06, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x52, 0x06, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb1, 0x01, 0x0a, 0x0f, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x4d, 0x0a, - 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x6c, 0x69, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05, - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4f, 0x0a, 0x09, 0x70, 0x72, 0x65, 0x64, - 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, - 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, - 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xbd, 0x01, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x53, 0x6c, 0x69, 0x63, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x54, 0x0a, 0x0d, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x52, 0x0c, 0x69, 0x6e, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x54, 0x0a, 0x0d, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x52, 0x0c, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x55, 0x0a, 0x1e, 0x52, 0x65, 0x61, 0x64, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x73, 0x4e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, - 0x33, 0x0a, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x6c, 0x61, - 0x73, 0x74, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x22, 0x52, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x73, - 0x4e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x37, 0x0a, 0x16, - 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x5f, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x6c, 0x61, 0x73, - 0x74, 0x52, 0x65, 0x61, 0x64, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, - 0x02, 0x68, 0x00, 0x22, 0x66, 0x0a, 0x0b, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, 0x73, - 0x6b, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x38, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x04, 0x62, - 0x6c, 0x6f, 0x62, 0x42, 0x02, 0x68, 0x00, 0x22, 0x3a, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x50, - 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0c, 0x6d, 0x69, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x22, 0xe1, 0x01, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x5d, 0x0a, 0x0a, 0x70, 0x61, - 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x79, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4c, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, - 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_persistence_v1_queues_proto_rawDesc = "" + + "\n" + + "/temporal/server/api/persistence/v1/queues.proto\x12\"temporal.server.api.persistence.v1\x1a$temporal/api/common/v1/message.proto\x1a3temporal/server/api/persistence/v1/predicates.proto\x1a.temporal/server/api/persistence/v1/tasks.proto\"\xde\x02\n" + + "\n" + + "QueueState\x12e\n" + + "\rreader_states\x18\x01 \x03(\v2@.temporal.server.api.persistence.v1.QueueState.ReaderStatesEntryR\freaderStates\x12r\n" + + "\x1fexclusive_reader_high_watermark\x18\x02 \x01(\v2+.temporal.server.api.persistence.v1.TaskKeyR\x1cexclusiveReaderHighWatermark\x1au\n" + + "\x11ReaderStatesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12J\n" + + "\x05value\x18\x02 \x01(\v24.temporal.server.api.persistence.v1.QueueReaderStateR\x05value:\x028\x01\"_\n" + + "\x10QueueReaderState\x12K\n" + + "\x06scopes\x18\x01 \x03(\v23.temporal.server.api.persistence.v1.QueueSliceScopeR\x06scopes\"\xa9\x01\n" + + "\x0fQueueSliceScope\x12I\n" + + "\x05range\x18\x01 \x01(\v23.temporal.server.api.persistence.v1.QueueSliceRangeR\x05range\x12K\n" + + "\tpredicate\x18\x02 \x01(\v2-.temporal.server.api.persistence.v1.PredicateR\tpredicate\"\xb5\x01\n" + + "\x0fQueueSliceRange\x12P\n" + + "\rinclusive_min\x18\x01 \x01(\v2+.temporal.server.api.persistence.v1.TaskKeyR\finclusiveMin\x12P\n" + + "\rexclusive_max\x18\x02 \x01(\v2+.temporal.server.api.persistence.v1.TaskKeyR\fexclusiveMax\"Q\n" + + "\x1eReadQueueMessagesNextPageToken\x12/\n" + + "\x14last_read_message_id\x18\x01 \x01(\x03R\x11lastReadMessageId\"N\n" + + "\x17ListQueuesNextPageToken\x123\n" + + "\x16last_read_queue_number\x18\x01 \x01(\x03R\x13lastReadQueueNumber\"^\n" + + "\vHistoryTask\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x124\n" + + "\x04blob\x18\x02 \x01(\v2 .temporal.api.common.v1.DataBlobR\x04blob\"6\n" + + "\x0eQueuePartition\x12$\n" + + "\x0emin_message_id\x18\x01 \x01(\x03R\fminMessageId\"\xd5\x01\n" + + "\x05Queue\x12Y\n" + + "\n" + + "partitions\x18\x01 \x03(\v29.temporal.server.api.persistence.v1.Queue.PartitionsEntryR\n" + + "partitions\x1aq\n" + + "\x0fPartitionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x05R\x03key\x12H\n" + + "\x05value\x18\x02 \x01(\v22.temporal.server.api.persistence.v1.QueuePartitionR\x05value:\x028\x01B6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" var ( file_temporal_server_api_persistence_v1_queues_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_queues_proto_rawDescData = file_temporal_server_api_persistence_v1_queues_proto_rawDesc + file_temporal_server_api_persistence_v1_queues_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_queues_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_queues_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_queues_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_queues_proto_rawDescData) + file_temporal_server_api_persistence_v1_queues_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_queues_proto_rawDesc), len(file_temporal_server_api_persistence_v1_queues_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_queues_proto_rawDescData } var file_temporal_server_api_persistence_v1_queues_proto_msgTypes = make([]protoimpl.MessageInfo, 11) -var file_temporal_server_api_persistence_v1_queues_proto_goTypes = []interface{}{ +var file_temporal_server_api_persistence_v1_queues_proto_goTypes = []any{ (*QueueState)(nil), // 0: temporal.server.api.persistence.v1.QueueState (*QueueReaderState)(nil), // 1: temporal.server.api.persistence.v1.QueueReaderState (*QueueSliceScope)(nil), // 2: temporal.server.api.persistence.v1.QueueSliceScope @@ -682,121 +564,11 @@ func file_temporal_server_api_persistence_v1_queues_proto_init() { } file_temporal_server_api_persistence_v1_predicates_proto_init() file_temporal_server_api_persistence_v1_tasks_proto_init() - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueueState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueueReaderState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueueSliceScope); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueueSliceRange); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadQueueMessagesNextPageToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListQueuesNextPageToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryTask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueuePartition); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_queues_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Queue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_queues_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_queues_proto_rawDesc), len(file_temporal_server_api_persistence_v1_queues_proto_rawDesc)), NumEnums: 0, NumMessages: 11, NumExtensions: 0, @@ -807,7 +579,6 @@ func file_temporal_server_api_persistence_v1_queues_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_queues_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_queues_proto = out.File - file_temporal_server_api_persistence_v1_queues_proto_rawDesc = nil file_temporal_server_api_persistence_v1_queues_proto_goTypes = nil file_temporal_server_api_persistence_v1_queues_proto_depIdxs = nil } diff --git a/api/persistence/v1/task_queues.go-helpers.pb.go b/api/persistence/v1/task_queues.go-helpers.pb.go index 0773e220e65..c2dc36e3d9f 100644 --- a/api/persistence/v1/task_queues.go-helpers.pb.go +++ b/api/persistence/v1/task_queues.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence @@ -103,6 +79,80 @@ func (this *CompatibleVersionSet) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type AssignmentRule to the protobuf v3 wire format +func (val *AssignmentRule) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type AssignmentRule from the protobuf v3 wire format +func (val *AssignmentRule) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *AssignmentRule) Size() int { + return proto.Size(val) +} + +// Equal returns whether two AssignmentRule values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *AssignmentRule) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *AssignmentRule + switch t := that.(type) { + case *AssignmentRule: + that1 = t + case AssignmentRule: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RedirectRule to the protobuf v3 wire format +func (val *RedirectRule) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RedirectRule from the protobuf v3 wire format +func (val *RedirectRule) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RedirectRule) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RedirectRule values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RedirectRule) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RedirectRule + switch t := that.(type) { + case *RedirectRule: + that1 = t + case RedirectRule: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type VersioningData to the protobuf v3 wire format func (val *VersioningData) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -140,6 +190,117 @@ func (this *VersioningData) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type DeploymentData to the protobuf v3 wire format +func (val *DeploymentData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeploymentData from the protobuf v3 wire format +func (val *DeploymentData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeploymentData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeploymentData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeploymentData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeploymentData + switch t := that.(type) { + case *DeploymentData: + that1 = t + case DeploymentData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerDeploymentData to the protobuf v3 wire format +func (val *WorkerDeploymentData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerDeploymentData from the protobuf v3 wire format +func (val *WorkerDeploymentData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerDeploymentData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerDeploymentData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerDeploymentData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerDeploymentData + switch t := that.(type) { + case *WorkerDeploymentData: + that1 = t + case WorkerDeploymentData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TaskQueueTypeUserData to the protobuf v3 wire format +func (val *TaskQueueTypeUserData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TaskQueueTypeUserData from the protobuf v3 wire format +func (val *TaskQueueTypeUserData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TaskQueueTypeUserData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TaskQueueTypeUserData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TaskQueueTypeUserData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TaskQueueTypeUserData + switch t := that.(type) { + case *TaskQueueTypeUserData: + that1 = t + case TaskQueueTypeUserData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type TaskQueueUserData to the protobuf v3 wire format func (val *TaskQueueUserData) Marshal() ([]byte, error) { return proto.Marshal(val) diff --git a/api/persistence/v1/task_queues.pb.go b/api/persistence/v1/task_queues.pb.go index 120404c5669..9b3d6d872e1 100644 --- a/api/persistence/v1/task_queues.pb.go +++ b/api/persistence/v1/task_queues.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -32,8 +10,13 @@ import ( reflect "reflect" "strconv" sync "sync" + unsafe "unsafe" + v13 "go.temporal.io/api/deployment/v1" + v11 "go.temporal.io/api/taskqueue/v1" v1 "go.temporal.io/server/api/clock/v1" + v12 "go.temporal.io/server/api/deployment/v1" + v14 "go.temporal.io/server/api/enums/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -106,31 +89,28 @@ func (BuildId_State) EnumDescriptor() ([]byte, []int) { // BuildId is an identifier with a timestamped status used to identify workers for task queue versioning purposes. type BuildId struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - State BuildId_State `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.server.api.persistence.v1.BuildId_State" json:"state,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + State BuildId_State `protobuf:"varint,2,opt,name=state,proto3,enum=temporal.server.api.persistence.v1.BuildId_State" json:"state,omitempty"` // HLC timestamp representing when the state was updated or the when build ID was originally inserted. // (-- api-linter: core::0142::time-field-type=disabled // // aip.dev/not-precedent: Using HLC instead of wall clock. --) StateUpdateTimestamp *v1.HybridLogicalClock `protobuf:"bytes,3,opt,name=state_update_timestamp,json=stateUpdateTimestamp,proto3" json:"state_update_timestamp,omitempty"` - // HLC timestamp representing when this build id was last made default in its version set. + // HLC timestamp representing when this build ID was last made default in its version set. // (-- api-linter: core::0142::time-field-type=disabled // // aip.dev/not-precedent: Using HLC instead of wall clock. --) BecameDefaultTimestamp *v1.HybridLogicalClock `protobuf:"bytes,4,opt,name=became_default_timestamp,json=becameDefaultTimestamp,proto3" json:"became_default_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BuildId) Reset() { *x = BuildId{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BuildId) String() string { @@ -141,7 +121,7 @@ func (*BuildId) ProtoMessage() {} func (x *BuildId) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -184,12 +164,9 @@ func (x *BuildId) GetBecameDefaultTimestamp() *v1.HybridLogicalClock { return nil } -// An internal represenation of temporal.api.taskqueue.v1.CompatibleVersionSet +// An internal representation of temporal.api.taskqueue.v1.CompatibleVersionSet type CompatibleVersionSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set IDs are used internally by matching. // A set typically has one set ID and extra care is taken to enforce this. // In some situations, including: @@ -199,7 +176,7 @@ type CompatibleVersionSet struct { // - Cross-task-queue activities/child workflows/CAN where the user has not set up parallel // versioning data // - // we have to guess the set id for a build id. If that happens, and then the build id is + // we have to guess the set id for a build ID. If that happens, and then the build ID is // discovered to be in a different set, then the sets will be merged and both (or more) // build ids will be preserved, so that we don't lose tasks. // The first set id is considered the "primary", and the others are "demoted". Once a build @@ -212,15 +189,15 @@ type CompatibleVersionSet struct { // // aip.dev/not-precedent: Using HLC instead of wall clock. --) BecameDefaultTimestamp *v1.HybridLogicalClock `protobuf:"bytes,4,opt,name=became_default_timestamp,json=becameDefaultTimestamp,proto3" json:"became_default_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CompatibleVersionSet) Reset() { *x = CompatibleVersionSet{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CompatibleVersionSet) String() string { @@ -231,7 +208,7 @@ func (*CompatibleVersionSet) ProtoMessage() {} func (x *CompatibleVersionSet) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -267,24 +244,161 @@ func (x *CompatibleVersionSet) GetBecameDefaultTimestamp() *v1.HybridLogicalCloc return nil } +type AssignmentRule struct { + state protoimpl.MessageState `protogen:"open.v1"` + Rule *v11.BuildIdAssignmentRule `protobuf:"bytes,1,opt,name=rule,proto3" json:"rule,omitempty"` + // (-- api-linter: core::0142::time-field-type=disabled + // + // aip.dev/not-precedent: Using HLC instead of wall clock. --) + CreateTimestamp *v1.HybridLogicalClock `protobuf:"bytes,2,opt,name=create_timestamp,json=createTimestamp,proto3" json:"create_timestamp,omitempty"` + // when delete_timestamp is present the rule should be treated as deleted + // + // (-- api-linter: core::0142::time-field-type=disabled + // + // aip.dev/not-precedent: Using HLC instead of wall clock. --) + DeleteTimestamp *v1.HybridLogicalClock `protobuf:"bytes,3,opt,name=delete_timestamp,json=deleteTimestamp,proto3" json:"delete_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssignmentRule) Reset() { + *x = AssignmentRule{} + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssignmentRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignmentRule) ProtoMessage() {} + +func (x *AssignmentRule) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignmentRule.ProtoReflect.Descriptor instead. +func (*AssignmentRule) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{2} +} + +func (x *AssignmentRule) GetRule() *v11.BuildIdAssignmentRule { + if x != nil { + return x.Rule + } + return nil +} + +func (x *AssignmentRule) GetCreateTimestamp() *v1.HybridLogicalClock { + if x != nil { + return x.CreateTimestamp + } + return nil +} + +func (x *AssignmentRule) GetDeleteTimestamp() *v1.HybridLogicalClock { + if x != nil { + return x.DeleteTimestamp + } + return nil +} + +type RedirectRule struct { + state protoimpl.MessageState `protogen:"open.v1"` + Rule *v11.CompatibleBuildIdRedirectRule `protobuf:"bytes,1,opt,name=rule,proto3" json:"rule,omitempty"` + // (-- api-linter: core::0142::time-field-type=disabled + // + // aip.dev/not-precedent: Using HLC instead of wall clock. --) + CreateTimestamp *v1.HybridLogicalClock `protobuf:"bytes,2,opt,name=create_timestamp,json=createTimestamp,proto3" json:"create_timestamp,omitempty"` + // when delete_timestamp is present the rule should be treated as deleted + // + // (-- api-linter: core::0142::time-field-type=disabled + // + // aip.dev/not-precedent: Using HLC instead of wall clock. --) + DeleteTimestamp *v1.HybridLogicalClock `protobuf:"bytes,3,opt,name=delete_timestamp,json=deleteTimestamp,proto3" json:"delete_timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RedirectRule) Reset() { + *x = RedirectRule{} + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RedirectRule) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RedirectRule) ProtoMessage() {} + +func (x *RedirectRule) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RedirectRule.ProtoReflect.Descriptor instead. +func (*RedirectRule) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{3} +} + +func (x *RedirectRule) GetRule() *v11.CompatibleBuildIdRedirectRule { + if x != nil { + return x.Rule + } + return nil +} + +func (x *RedirectRule) GetCreateTimestamp() *v1.HybridLogicalClock { + if x != nil { + return x.CreateTimestamp + } + return nil +} + +func (x *RedirectRule) GetDeleteTimestamp() *v1.HybridLogicalClock { + if x != nil { + return x.DeleteTimestamp + } + return nil +} + // Holds all the data related to worker versioning for a task queue. // Backwards-incompatible changes cannot be made, as this would make existing stored data unreadable. type VersioningData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // All the incompatible version sets, unordered except for the last element, which is considered the set "default". VersionSets []*CompatibleVersionSet `protobuf:"bytes,1,rep,name=version_sets,json=versionSets,proto3" json:"version_sets,omitempty"` + // Ordered list of assignment rules. Also contains recently-deleted rules. + AssignmentRules []*AssignmentRule `protobuf:"bytes,2,rep,name=assignment_rules,json=assignmentRules,proto3" json:"assignment_rules,omitempty"` + // Unordered list of redirect rules. Also contains recently-deleted rules. + RedirectRules []*RedirectRule `protobuf:"bytes,3,rep,name=redirect_rules,json=redirectRules,proto3" json:"redirect_rules,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *VersioningData) Reset() { *x = VersioningData{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VersioningData) String() string { @@ -294,8 +408,8 @@ func (x *VersioningData) String() string { func (*VersioningData) ProtoMessage() {} func (x *VersioningData) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -307,7 +421,7 @@ func (x *VersioningData) ProtoReflect() protoreflect.Message { // Deprecated: Use VersioningData.ProtoReflect.Descriptor instead. func (*VersioningData) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{2} + return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{4} } func (x *VersioningData) GetVersionSets() []*CompatibleVersionSet { @@ -317,32 +431,241 @@ func (x *VersioningData) GetVersionSets() []*CompatibleVersionSet { return nil } -// Container for all persistent user provided data for a task queue. -// Task queue as a named concept here is close to how users interpret them, rather than relating to some specific type -// (workflow vs activity, etc) and thus, as a consequence, any data that applies to a specific type (say, activity rate -// limiting) should be defined as such within this structure. +func (x *VersioningData) GetAssignmentRules() []*AssignmentRule { + if x != nil { + return x.AssignmentRules + } + return nil +} + +func (x *VersioningData) GetRedirectRules() []*RedirectRule { + if x != nil { + return x.RedirectRules + } + return nil +} + +type DeploymentData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Set of worker deployment versions that this task queue belongs to. + // Current Version is defined implicitly as the version with `current_since_time!=nil` and the most + // recent `routing_update_time`. + // Ramping Version is defined implicitly as the version with `ramping_since_time!=nil` and the most + // recent `routing_update_time`. + // The Ramping Version receives a share of unversioned/unpinned tasks according to its + // `ramp_percentage`. If there is no Ramping Version, all the unversioned/unpinned tasks are + // routed to the Current Version. If there is no Current Version, any poller with UNVERSIONED + // (or unspecified) WorkflowVersioningMode will receive the tasks. + // Remove after `AsyncSetCurrentAndRamping` workflow version is irreversibly enabled. + // + // Deprecated: Marked as deprecated in temporal/server/api/persistence/v1/task_queues.proto. + Versions []*v12.DeploymentVersionData `protobuf:"bytes,2,rep,name=versions,proto3" json:"versions,omitempty"` + // Present if the task queue's ramping version is unversioned. + // Remove after `AsyncSetCurrentAndRamping` workflow version is irreversibly enabled. + // + // Deprecated: Marked as deprecated in temporal/server/api/persistence/v1/task_queues.proto. + UnversionedRampData *v12.DeploymentVersionData `protobuf:"bytes,3,opt,name=unversioned_ramp_data,json=unversionedRampData,proto3" json:"unversioned_ramp_data,omitempty"` + // Routing and version membership data for all worker deployments that this task queue belongs to. + // Key is the deployment name. + DeploymentsData map[string]*WorkerDeploymentData `protobuf:"bytes,4,rep,name=deployments_data,json=deploymentsData,proto3" json:"deployments_data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeploymentData) Reset() { + *x = DeploymentData{} + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeploymentData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeploymentData) ProtoMessage() {} + +func (x *DeploymentData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeploymentData.ProtoReflect.Descriptor instead. +func (*DeploymentData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{5} +} + +// Deprecated: Marked as deprecated in temporal/server/api/persistence/v1/task_queues.proto. +func (x *DeploymentData) GetVersions() []*v12.DeploymentVersionData { + if x != nil { + return x.Versions + } + return nil +} + +// Deprecated: Marked as deprecated in temporal/server/api/persistence/v1/task_queues.proto. +func (x *DeploymentData) GetUnversionedRampData() *v12.DeploymentVersionData { + if x != nil { + return x.UnversionedRampData + } + return nil +} + +func (x *DeploymentData) GetDeploymentsData() map[string]*WorkerDeploymentData { + if x != nil { + return x.DeploymentsData + } + return nil +} + +// Routing config and version membership data for a given worker deployment that a TQ should know. +type WorkerDeploymentData struct { + state protoimpl.MessageState `protogen:"open.v1"` + RoutingConfig *v13.RoutingConfig `protobuf:"bytes,1,opt,name=routing_config,json=routingConfig,proto3" json:"routing_config,omitempty"` + // This map tracks the membership of the task queue in the deployment versions. A version is + // present here iff the task queue has ever been polled from the version. + // Key is the build id. + Versions map[string]*v12.WorkerDeploymentVersionData `protobuf:"bytes,2,rep,name=versions,proto3" json:"versions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerDeploymentData) Reset() { + *x = WorkerDeploymentData{} + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerDeploymentData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerDeploymentData) ProtoMessage() {} + +func (x *WorkerDeploymentData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerDeploymentData.ProtoReflect.Descriptor instead. +func (*WorkerDeploymentData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{6} +} + +func (x *WorkerDeploymentData) GetRoutingConfig() *v13.RoutingConfig { + if x != nil { + return x.RoutingConfig + } + return nil +} + +func (x *WorkerDeploymentData) GetVersions() map[string]*v12.WorkerDeploymentVersionData { + if x != nil { + return x.Versions + } + return nil +} + +// Container for all persistent user data that varies per task queue type within a family. +type TaskQueueTypeUserData struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentData *DeploymentData `protobuf:"bytes,1,opt,name=deployment_data,json=deploymentData,proto3" json:"deployment_data,omitempty"` + Config *v11.TaskQueueConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + FairnessState v14.FairnessState `protobuf:"varint,3,opt,name=fairness_state,json=fairnessState,proto3,enum=temporal.server.api.enums.v1.FairnessState" json:"fairness_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskQueueTypeUserData) Reset() { + *x = TaskQueueTypeUserData{} + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskQueueTypeUserData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskQueueTypeUserData) ProtoMessage() {} + +func (x *TaskQueueTypeUserData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskQueueTypeUserData.ProtoReflect.Descriptor instead. +func (*TaskQueueTypeUserData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{7} +} + +func (x *TaskQueueTypeUserData) GetDeploymentData() *DeploymentData { + if x != nil { + return x.DeploymentData + } + return nil +} + +func (x *TaskQueueTypeUserData) GetConfig() *v11.TaskQueueConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *TaskQueueTypeUserData) GetFairnessState() v14.FairnessState { + if x != nil { + return x.FairnessState + } + return v14.FairnessState(0) +} + +// Container for all persistent user provided data for a task queue family. +// "Task queue" as a named concept here is a task queue family, i.e. the set of task queues +// that share a name, at most one of each type (workflow, activity, etc.). // This data must all fit in a single DB column and is kept cached in-memory, take extra care to ensure data added here // has reasonable size limits imposed on it. type TaskQueueUserData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The last recorded cluster-local Hybrid Logical Clock timestamp for _this_ task queue. + state protoimpl.MessageState `protogen:"open.v1"` + // The last recorded cluster-local Hybrid Logical Clock timestamp for _this_ task queue family. // Updated whenever user data is directly updated due to a user action but not when applying replication events. // The clock is referenced when new timestamps are generated to ensure it produces monotonically increasing // timestamps. Clock *v1.HybridLogicalClock `protobuf:"bytes,1,opt,name=clock,proto3" json:"clock,omitempty"` VersioningData *VersioningData `protobuf:"bytes,2,opt,name=versioning_data,json=versioningData,proto3" json:"versioning_data,omitempty"` + // Map from task queue type (workflow, activity, nexus) to per-type data. + PerType map[int32]*TaskQueueTypeUserData `protobuf:"bytes,3,rep,name=per_type,json=perType,proto3" json:"per_type,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TaskQueueUserData) Reset() { *x = TaskQueueUserData{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskQueueUserData) String() string { @@ -352,8 +675,8 @@ func (x *TaskQueueUserData) String() string { func (*TaskQueueUserData) ProtoMessage() {} func (x *TaskQueueUserData) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -365,7 +688,7 @@ func (x *TaskQueueUserData) ProtoReflect() protoreflect.Message { // Deprecated: Use TaskQueueUserData.ProtoReflect.Descriptor instead. func (*TaskQueueUserData) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{3} + return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{8} } func (x *TaskQueueUserData) GetClock() *v1.HybridLogicalClock { @@ -382,23 +705,27 @@ func (x *TaskQueueUserData) GetVersioningData() *VersioningData { return nil } +func (x *TaskQueueUserData) GetPerType() map[int32]*TaskQueueTypeUserData { + if x != nil { + return x.PerType + } + return nil +} + // Simple wrapper that includes a TaskQueueUserData and its storage version. type VersionedTaskQueueUserData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data *TaskQueueUserData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields - - Data *TaskQueueUserData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VersionedTaskQueueUserData) Reset() { *x = VersionedTaskQueueUserData{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *VersionedTaskQueueUserData) String() string { @@ -408,8 +735,8 @@ func (x *VersionedTaskQueueUserData) String() string { func (*VersionedTaskQueueUserData) ProtoMessage() {} func (x *VersionedTaskQueueUserData) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[9] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -421,7 +748,7 @@ func (x *VersionedTaskQueueUserData) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionedTaskQueueUserData.ProtoReflect.Descriptor instead. func (*VersionedTaskQueueUserData) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{4} + return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP(), []int{9} } func (x *VersionedTaskQueueUserData) GetData() *TaskQueueUserData { @@ -440,125 +767,135 @@ func (x *VersionedTaskQueueUserData) GetVersion() int64 { var File_temporal_server_api_persistence_v1_task_queues_proto protoreflect.FileDescriptor -var file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc = []byte{ - 0x0a, 0x34, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, - 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0x8b, 0x03, 0x0a, 0x07, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x4b, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, 0x0a, 0x16, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, - 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x79, 0x62, 0x72, 0x69, 0x64, 0x4c, 0x6f, 0x67, - 0x69, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x6e, 0x0a, 0x18, 0x62, 0x65, 0x63, 0x61, 0x6d, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, - 0x2e, 0x48, 0x79, 0x62, 0x72, 0x69, 0x64, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x16, 0x62, 0x65, 0x63, 0x61, 0x6d, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x68, 0x00, 0x22, 0x43, 0x0a, - 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, - 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x11, 0x0a, - 0x0d, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, - 0x22, 0xf1, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x07, 0x73, 0x65, 0x74, 0x5f, 0x69, - 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x74, 0x49, 0x64, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x4c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, - 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, - 0x64, 0x52, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x6e, 0x0a, 0x18, 0x62, 0x65, 0x63, 0x61, 0x6d, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x79, - 0x62, 0x72, 0x69, 0x64, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x6f, 0x63, 0x6b, - 0x52, 0x16, 0x62, 0x65, 0x63, 0x61, 0x6d, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x68, 0x00, 0x22, 0x71, 0x0a, 0x0e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5f, 0x0a, 0x0c, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc0, 0x01, 0x0a, 0x11, 0x54, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4a, - 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x79, 0x62, 0x72, - 0x69, 0x64, 0x4c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, - 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5f, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x61, - 0x74, 0x61, 0x52, 0x0e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, - 0x61, 0x42, 0x02, 0x68, 0x00, 0x22, 0x89, 0x01, 0x0a, 0x1a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, - 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, - 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x42, 0x36, 0x5a, 0x34, - 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc = "" + + "\n" + + "4temporal/server/api/persistence/v1/task_queues.proto\x12\"temporal.server.api.persistence.v1\x1a(temporal/api/deployment/v1/message.proto\x1a'temporal/api/taskqueue/v1/message.proto\x1a*temporal/server/api/clock/v1/message.proto\x1a/temporal/server/api/deployment/v1/message.proto\x1a1temporal/server/api/enums/v1/fairness_state.proto\"\xfb\x02\n" + + "\aBuildId\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12G\n" + + "\x05state\x18\x02 \x01(\x0e21.temporal.server.api.persistence.v1.BuildId.StateR\x05state\x12f\n" + + "\x16state_update_timestamp\x18\x03 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x14stateUpdateTimestamp\x12j\n" + + "\x18became_default_timestamp\x18\x04 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x16becameDefaultTimestamp\"C\n" + + "\x05State\x12\x15\n" + + "\x11STATE_UNSPECIFIED\x10\x00\x12\x10\n" + + "\fSTATE_ACTIVE\x10\x01\x12\x11\n" + + "\rSTATE_DELETED\x10\x02\"\xe5\x01\n" + + "\x14CompatibleVersionSet\x12\x17\n" + + "\aset_ids\x18\x01 \x03(\tR\x06setIds\x12H\n" + + "\tbuild_ids\x18\x02 \x03(\v2+.temporal.server.api.persistence.v1.BuildIdR\bbuildIds\x12j\n" + + "\x18became_default_timestamp\x18\x04 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x16becameDefaultTimestamp\"\x90\x02\n" + + "\x0eAssignmentRule\x12D\n" + + "\x04rule\x18\x01 \x01(\v20.temporal.api.taskqueue.v1.BuildIdAssignmentRuleR\x04rule\x12[\n" + + "\x10create_timestamp\x18\x02 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x0fcreateTimestamp\x12[\n" + + "\x10delete_timestamp\x18\x03 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x0fdeleteTimestamp\"\x96\x02\n" + + "\fRedirectRule\x12L\n" + + "\x04rule\x18\x01 \x01(\v28.temporal.api.taskqueue.v1.CompatibleBuildIdRedirectRuleR\x04rule\x12[\n" + + "\x10create_timestamp\x18\x02 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x0fcreateTimestamp\x12[\n" + + "\x10delete_timestamp\x18\x03 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x0fdeleteTimestamp\"\xa5\x02\n" + + "\x0eVersioningData\x12[\n" + + "\fversion_sets\x18\x01 \x03(\v28.temporal.server.api.persistence.v1.CompatibleVersionSetR\vversionSets\x12]\n" + + "\x10assignment_rules\x18\x02 \x03(\v22.temporal.server.api.persistence.v1.AssignmentRuleR\x0fassignmentRules\x12W\n" + + "\x0eredirect_rules\x18\x03 \x03(\v20.temporal.server.api.persistence.v1.RedirectRuleR\rredirectRules\"\xd4\x03\n" + + "\x0eDeploymentData\x12X\n" + + "\bversions\x18\x02 \x03(\v28.temporal.server.api.deployment.v1.DeploymentVersionDataB\x02\x18\x01R\bversions\x12p\n" + + "\x15unversioned_ramp_data\x18\x03 \x01(\v28.temporal.server.api.deployment.v1.DeploymentVersionDataB\x02\x18\x01R\x13unversionedRampData\x12r\n" + + "\x10deployments_data\x18\x04 \x03(\v2G.temporal.server.api.persistence.v1.DeploymentData.DeploymentsDataEntryR\x0fdeploymentsData\x1a|\n" + + "\x14DeploymentsDataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12N\n" + + "\x05value\x18\x02 \x01(\v28.temporal.server.api.persistence.v1.WorkerDeploymentDataR\x05value:\x028\x01J\x04\b\x01\x10\x02\"\xc9\x02\n" + + "\x14WorkerDeploymentData\x12P\n" + + "\x0erouting_config\x18\x01 \x01(\v2).temporal.api.deployment.v1.RoutingConfigR\rroutingConfig\x12b\n" + + "\bversions\x18\x02 \x03(\v2F.temporal.server.api.persistence.v1.WorkerDeploymentData.VersionsEntryR\bversions\x1a{\n" + + "\rVersionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12T\n" + + "\x05value\x18\x02 \x01(\v2>.temporal.server.api.deployment.v1.WorkerDeploymentVersionDataR\x05value:\x028\x01\"\x8c\x02\n" + + "\x15TaskQueueTypeUserData\x12[\n" + + "\x0fdeployment_data\x18\x01 \x01(\v22.temporal.server.api.persistence.v1.DeploymentDataR\x0edeploymentData\x12B\n" + + "\x06config\x18\x02 \x01(\v2*.temporal.api.taskqueue.v1.TaskQueueConfigR\x06config\x12R\n" + + "\x0efairness_state\x18\x03 \x01(\x0e2+.temporal.server.api.enums.v1.FairnessStateR\rfairnessState\"\x8e\x03\n" + + "\x11TaskQueueUserData\x12F\n" + + "\x05clock\x18\x01 \x01(\v20.temporal.server.api.clock.v1.HybridLogicalClockR\x05clock\x12[\n" + + "\x0fversioning_data\x18\x02 \x01(\v22.temporal.server.api.persistence.v1.VersioningDataR\x0eversioningData\x12]\n" + + "\bper_type\x18\x03 \x03(\v2B.temporal.server.api.persistence.v1.TaskQueueUserData.PerTypeEntryR\aperType\x1au\n" + + "\fPerTypeEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x05R\x03key\x12O\n" + + "\x05value\x18\x02 \x01(\v29.temporal.server.api.persistence.v1.TaskQueueTypeUserDataR\x05value:\x028\x01\"\x81\x01\n" + + "\x1aVersionedTaskQueueUserData\x12I\n" + + "\x04data\x18\x01 \x01(\v25.temporal.server.api.persistence.v1.TaskQueueUserDataR\x04data\x12\x18\n" + + "\aversion\x18\x02 \x01(\x03R\aversionB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" var ( file_temporal_server_api_persistence_v1_task_queues_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_task_queues_proto_rawDescData = file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc + file_temporal_server_api_persistence_v1_task_queues_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_task_queues_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_task_queues_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_task_queues_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_task_queues_proto_rawDescData) + file_temporal_server_api_persistence_v1_task_queues_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc), len(file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_task_queues_proto_rawDescData } var file_temporal_server_api_persistence_v1_task_queues_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_temporal_server_api_persistence_v1_task_queues_proto_goTypes = []interface{}{ - (BuildId_State)(0), // 0: temporal.server.api.persistence.v1.BuildId.State - (*BuildId)(nil), // 1: temporal.server.api.persistence.v1.BuildId - (*CompatibleVersionSet)(nil), // 2: temporal.server.api.persistence.v1.CompatibleVersionSet - (*VersioningData)(nil), // 3: temporal.server.api.persistence.v1.VersioningData - (*TaskQueueUserData)(nil), // 4: temporal.server.api.persistence.v1.TaskQueueUserData - (*VersionedTaskQueueUserData)(nil), // 5: temporal.server.api.persistence.v1.VersionedTaskQueueUserData - (*v1.HybridLogicalClock)(nil), // 6: temporal.server.api.clock.v1.HybridLogicalClock +var file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_temporal_server_api_persistence_v1_task_queues_proto_goTypes = []any{ + (BuildId_State)(0), // 0: temporal.server.api.persistence.v1.BuildId.State + (*BuildId)(nil), // 1: temporal.server.api.persistence.v1.BuildId + (*CompatibleVersionSet)(nil), // 2: temporal.server.api.persistence.v1.CompatibleVersionSet + (*AssignmentRule)(nil), // 3: temporal.server.api.persistence.v1.AssignmentRule + (*RedirectRule)(nil), // 4: temporal.server.api.persistence.v1.RedirectRule + (*VersioningData)(nil), // 5: temporal.server.api.persistence.v1.VersioningData + (*DeploymentData)(nil), // 6: temporal.server.api.persistence.v1.DeploymentData + (*WorkerDeploymentData)(nil), // 7: temporal.server.api.persistence.v1.WorkerDeploymentData + (*TaskQueueTypeUserData)(nil), // 8: temporal.server.api.persistence.v1.TaskQueueTypeUserData + (*TaskQueueUserData)(nil), // 9: temporal.server.api.persistence.v1.TaskQueueUserData + (*VersionedTaskQueueUserData)(nil), // 10: temporal.server.api.persistence.v1.VersionedTaskQueueUserData + nil, // 11: temporal.server.api.persistence.v1.DeploymentData.DeploymentsDataEntry + nil, // 12: temporal.server.api.persistence.v1.WorkerDeploymentData.VersionsEntry + nil, // 13: temporal.server.api.persistence.v1.TaskQueueUserData.PerTypeEntry + (*v1.HybridLogicalClock)(nil), // 14: temporal.server.api.clock.v1.HybridLogicalClock + (*v11.BuildIdAssignmentRule)(nil), // 15: temporal.api.taskqueue.v1.BuildIdAssignmentRule + (*v11.CompatibleBuildIdRedirectRule)(nil), // 16: temporal.api.taskqueue.v1.CompatibleBuildIdRedirectRule + (*v12.DeploymentVersionData)(nil), // 17: temporal.server.api.deployment.v1.DeploymentVersionData + (*v13.RoutingConfig)(nil), // 18: temporal.api.deployment.v1.RoutingConfig + (*v11.TaskQueueConfig)(nil), // 19: temporal.api.taskqueue.v1.TaskQueueConfig + (v14.FairnessState)(0), // 20: temporal.server.api.enums.v1.FairnessState + (*v12.WorkerDeploymentVersionData)(nil), // 21: temporal.server.api.deployment.v1.WorkerDeploymentVersionData } var file_temporal_server_api_persistence_v1_task_queues_proto_depIdxs = []int32{ - 0, // 0: temporal.server.api.persistence.v1.BuildId.state:type_name -> temporal.server.api.persistence.v1.BuildId.State - 6, // 1: temporal.server.api.persistence.v1.BuildId.state_update_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock - 6, // 2: temporal.server.api.persistence.v1.BuildId.became_default_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock - 1, // 3: temporal.server.api.persistence.v1.CompatibleVersionSet.build_ids:type_name -> temporal.server.api.persistence.v1.BuildId - 6, // 4: temporal.server.api.persistence.v1.CompatibleVersionSet.became_default_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock - 2, // 5: temporal.server.api.persistence.v1.VersioningData.version_sets:type_name -> temporal.server.api.persistence.v1.CompatibleVersionSet - 6, // 6: temporal.server.api.persistence.v1.TaskQueueUserData.clock:type_name -> temporal.server.api.clock.v1.HybridLogicalClock - 3, // 7: temporal.server.api.persistence.v1.TaskQueueUserData.versioning_data:type_name -> temporal.server.api.persistence.v1.VersioningData - 4, // 8: temporal.server.api.persistence.v1.VersionedTaskQueueUserData.data:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 0, // 0: temporal.server.api.persistence.v1.BuildId.state:type_name -> temporal.server.api.persistence.v1.BuildId.State + 14, // 1: temporal.server.api.persistence.v1.BuildId.state_update_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 14, // 2: temporal.server.api.persistence.v1.BuildId.became_default_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 1, // 3: temporal.server.api.persistence.v1.CompatibleVersionSet.build_ids:type_name -> temporal.server.api.persistence.v1.BuildId + 14, // 4: temporal.server.api.persistence.v1.CompatibleVersionSet.became_default_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 15, // 5: temporal.server.api.persistence.v1.AssignmentRule.rule:type_name -> temporal.api.taskqueue.v1.BuildIdAssignmentRule + 14, // 6: temporal.server.api.persistence.v1.AssignmentRule.create_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 14, // 7: temporal.server.api.persistence.v1.AssignmentRule.delete_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 16, // 8: temporal.server.api.persistence.v1.RedirectRule.rule:type_name -> temporal.api.taskqueue.v1.CompatibleBuildIdRedirectRule + 14, // 9: temporal.server.api.persistence.v1.RedirectRule.create_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 14, // 10: temporal.server.api.persistence.v1.RedirectRule.delete_timestamp:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 2, // 11: temporal.server.api.persistence.v1.VersioningData.version_sets:type_name -> temporal.server.api.persistence.v1.CompatibleVersionSet + 3, // 12: temporal.server.api.persistence.v1.VersioningData.assignment_rules:type_name -> temporal.server.api.persistence.v1.AssignmentRule + 4, // 13: temporal.server.api.persistence.v1.VersioningData.redirect_rules:type_name -> temporal.server.api.persistence.v1.RedirectRule + 17, // 14: temporal.server.api.persistence.v1.DeploymentData.versions:type_name -> temporal.server.api.deployment.v1.DeploymentVersionData + 17, // 15: temporal.server.api.persistence.v1.DeploymentData.unversioned_ramp_data:type_name -> temporal.server.api.deployment.v1.DeploymentVersionData + 11, // 16: temporal.server.api.persistence.v1.DeploymentData.deployments_data:type_name -> temporal.server.api.persistence.v1.DeploymentData.DeploymentsDataEntry + 18, // 17: temporal.server.api.persistence.v1.WorkerDeploymentData.routing_config:type_name -> temporal.api.deployment.v1.RoutingConfig + 12, // 18: temporal.server.api.persistence.v1.WorkerDeploymentData.versions:type_name -> temporal.server.api.persistence.v1.WorkerDeploymentData.VersionsEntry + 6, // 19: temporal.server.api.persistence.v1.TaskQueueTypeUserData.deployment_data:type_name -> temporal.server.api.persistence.v1.DeploymentData + 19, // 20: temporal.server.api.persistence.v1.TaskQueueTypeUserData.config:type_name -> temporal.api.taskqueue.v1.TaskQueueConfig + 20, // 21: temporal.server.api.persistence.v1.TaskQueueTypeUserData.fairness_state:type_name -> temporal.server.api.enums.v1.FairnessState + 14, // 22: temporal.server.api.persistence.v1.TaskQueueUserData.clock:type_name -> temporal.server.api.clock.v1.HybridLogicalClock + 5, // 23: temporal.server.api.persistence.v1.TaskQueueUserData.versioning_data:type_name -> temporal.server.api.persistence.v1.VersioningData + 13, // 24: temporal.server.api.persistence.v1.TaskQueueUserData.per_type:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData.PerTypeEntry + 9, // 25: temporal.server.api.persistence.v1.VersionedTaskQueueUserData.data:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData + 7, // 26: temporal.server.api.persistence.v1.DeploymentData.DeploymentsDataEntry.value:type_name -> temporal.server.api.persistence.v1.WorkerDeploymentData + 21, // 27: temporal.server.api.persistence.v1.WorkerDeploymentData.VersionsEntry.value:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersionData + 8, // 28: temporal.server.api.persistence.v1.TaskQueueUserData.PerTypeEntry.value:type_name -> temporal.server.api.persistence.v1.TaskQueueTypeUserData + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 29, // [29:29] is the sub-list for extension extendee + 0, // [0:29] is the sub-list for field type_name } func init() { file_temporal_server_api_persistence_v1_task_queues_proto_init() } @@ -566,75 +903,13 @@ func file_temporal_server_api_persistence_v1_task_queues_proto_init() { if File_temporal_server_api_persistence_v1_task_queues_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BuildId); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CompatibleVersionSet); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersioningData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskQueueUserData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionedTaskQueueUserData); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc), len(file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc)), NumEnums: 1, - NumMessages: 5, + NumMessages: 13, NumExtensions: 0, NumServices: 0, }, @@ -644,7 +919,6 @@ func file_temporal_server_api_persistence_v1_task_queues_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_task_queues_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_task_queues_proto = out.File - file_temporal_server_api_persistence_v1_task_queues_proto_rawDesc = nil file_temporal_server_api_persistence_v1_task_queues_proto_goTypes = nil file_temporal_server_api_persistence_v1_task_queues_proto_depIdxs = nil } diff --git a/api/persistence/v1/tasks.go-helpers.pb.go b/api/persistence/v1/tasks.go-helpers.pb.go index 42854850ea2..cee6ab32b50 100644 --- a/api/persistence/v1/tasks.go-helpers.pb.go +++ b/api/persistence/v1/tasks.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence @@ -140,6 +116,117 @@ func (this *TaskQueueInfo) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type SubqueueInfo to the protobuf v3 wire format +func (val *SubqueueInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SubqueueInfo from the protobuf v3 wire format +func (val *SubqueueInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SubqueueInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SubqueueInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SubqueueInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SubqueueInfo + switch t := that.(type) { + case *SubqueueInfo: + that1 = t + case SubqueueInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type FairnessKeyCount to the protobuf v3 wire format +func (val *FairnessKeyCount) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type FairnessKeyCount from the protobuf v3 wire format +func (val *FairnessKeyCount) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *FairnessKeyCount) Size() int { + return proto.Size(val) +} + +// Equal returns whether two FairnessKeyCount values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *FairnessKeyCount) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *FairnessKeyCount + switch t := that.(type) { + case *FairnessKeyCount: + that1 = t + case FairnessKeyCount: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SubqueueKey to the protobuf v3 wire format +func (val *SubqueueKey) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SubqueueKey from the protobuf v3 wire format +func (val *SubqueueKey) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SubqueueKey) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SubqueueKey values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SubqueueKey) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SubqueueKey + switch t := that.(type) { + case *SubqueueKey: + that1 = t + case SubqueueKey: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type TaskKey to the protobuf v3 wire format func (val *TaskKey) Marshal() ([]byte, error) { return proto.Marshal(val) diff --git a/api/persistence/v1/tasks.pb.go b/api/persistence/v1/tasks.pb.go index 32855303667..4a4bd498556 100644 --- a/api/persistence/v1/tasks.pb.go +++ b/api/persistence/v1/tasks.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,8 +9,10 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" - v12 "go.temporal.io/api/enums/v1" + v12 "go.temporal.io/api/common/v1" + v13 "go.temporal.io/api/enums/v1" v1 "go.temporal.io/server/api/clock/v1" v11 "go.temporal.io/server/api/taskqueue/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -49,21 +29,19 @@ const ( // task column type AllocatedTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data *TaskInfo `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + TaskPass int64 `protobuf:"varint,3,opt,name=task_pass,json=taskPass,proto3" json:"task_pass,omitempty"` + TaskId int64 `protobuf:"varint,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` unknownFields protoimpl.UnknownFields - - Data *TaskInfo `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - TaskId int64 `protobuf:"varint,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *AllocatedTaskInfo) Reset() { *x = AllocatedTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *AllocatedTaskInfo) String() string { @@ -74,7 +52,7 @@ func (*AllocatedTaskInfo) ProtoMessage() {} func (x *AllocatedTaskInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -96,6 +74,13 @@ func (x *AllocatedTaskInfo) GetData() *TaskInfo { return nil } +func (x *AllocatedTaskInfo) GetTaskPass() int64 { + if x != nil { + return x.TaskPass + } + return 0 +} + func (x *AllocatedTaskInfo) GetTaskId() int64 { if x != nil { return x.TaskId @@ -104,10 +89,7 @@ func (x *AllocatedTaskInfo) GetTaskId() int64 { } type TaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` @@ -118,15 +100,20 @@ type TaskInfo struct { // How this task should be directed. (Missing means the default for // TaskVersionDirective, which is unversioned.) VersionDirective *v11.TaskVersionDirective `protobuf:"bytes,8,opt,name=version_directive,json=versionDirective,proto3" json:"version_directive,omitempty"` + // Stamp field allows to differentiate between different instances of the same task + Stamp int32 `protobuf:"varint,9,opt,name=stamp,proto3" json:"stamp,omitempty"` + Priority *v12.Priority `protobuf:"bytes,10,opt,name=priority,proto3" json:"priority,omitempty"` + // Reference to any chasm component associated with this task + ComponentRef []byte `protobuf:"bytes,11,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TaskInfo) Reset() { *x = TaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskInfo) String() string { @@ -137,7 +124,7 @@ func (*TaskInfo) ProtoMessage() {} func (x *TaskInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -208,28 +195,72 @@ func (x *TaskInfo) GetVersionDirective() *v11.TaskVersionDirective { return nil } +func (x *TaskInfo) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +func (x *TaskInfo) GetPriority() *v12.Priority { + if x != nil { + return x.Priority + } + return nil +} + +func (x *TaskInfo) GetComponentRef() []byte { + if x != nil { + return x.ComponentRef + } + return nil +} + // task_queue column type TaskQueueInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - TaskType v12.TaskQueueType `protobuf:"varint,3,opt,name=task_type,json=taskType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_type,omitempty"` - Kind v12.TaskQueueKind `protobuf:"varint,4,opt,name=kind,proto3,enum=temporal.api.enums.v1.TaskQueueKind" json:"kind,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + TaskType v13.TaskQueueType `protobuf:"varint,3,opt,name=task_type,json=taskType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_type,omitempty"` + Kind v13.TaskQueueKind `protobuf:"varint,4,opt,name=kind,proto3,enum=temporal.api.enums.v1.TaskQueueKind" json:"kind,omitempty"` + // After data is migrated into subqueues, this contains a copy of the ack level for subqueue 0. AckLevel int64 `protobuf:"varint,5,opt,name=ack_level,json=ackLevel,proto3" json:"ack_level,omitempty"` ExpiryTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expiry_time,json=expiryTime,proto3" json:"expiry_time,omitempty"` LastUpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=last_update_time,json=lastUpdateTime,proto3" json:"last_update_time,omitempty"` + // After data is migrated into subqueues, this contains a copy of the count for subqueue 0. + ApproximateBacklogCount int64 `protobuf:"varint,8,opt,name=approximate_backlog_count,json=approximateBacklogCount,proto3" json:"approximate_backlog_count,omitempty"` + // Subqueues contains one entry for each subqueue in this physical task queue. + // Tasks are split into subqueues to implement priority and fairness. + // Subqueues are indexed starting from 0, the zero subqueue is always present + // and corresponds to the "main" queue before subqueues were introduced. + // + // The message at index n describes the subqueue at index n. + // + // Each subqueue has its own ack level and approx backlog count, but they share + // the range id. For compatibility, ack level and backlog count for subqueue 0 + // is copied into TaskQueueInfo. + Subqueues []*SubqueueInfo `protobuf:"bytes,9,rep,name=subqueues,proto3" json:"subqueues,omitempty"` + // For transitioning from tasks (v1) to tasks_v2 and back: + // + // If this TaskQueueInfo is in v1 and this is set, then v2 may have tasks. + // If this TaskQueueInfo is in v2 and this is set, then v1 may have tasks. + // + // New metadata starts with this flag set (we could skip this when useNewMatcher is off). + // Whenever locking any metadata as the inactive one (drain-only), this should be set. + // If the flag is true, no tasks should be written to the active table until the inactive + // table has also been locked (and the flag set there for a potential reverse transition). + // After determinining that the inactive table has no more tasks left, then this + // can be cleared on the active table. + OtherHasTasks bool `protobuf:"varint,10,opt,name=other_has_tasks,json=otherHasTasks,proto3" json:"other_has_tasks,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TaskQueueInfo) Reset() { *x = TaskQueueInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskQueueInfo) String() string { @@ -240,7 +271,7 @@ func (*TaskQueueInfo) ProtoMessage() {} func (x *TaskQueueInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -269,18 +300,18 @@ func (x *TaskQueueInfo) GetName() string { return "" } -func (x *TaskQueueInfo) GetTaskType() v12.TaskQueueType { +func (x *TaskQueueInfo) GetTaskType() v13.TaskQueueType { if x != nil { return x.TaskType } - return v12.TaskQueueType(0) + return v13.TaskQueueType(0) } -func (x *TaskQueueInfo) GetKind() v12.TaskQueueKind { +func (x *TaskQueueInfo) GetKind() v13.TaskQueueKind { if x != nil { return x.Kind } - return v12.TaskQueueKind(0) + return v13.TaskQueueKind(0) } func (x *TaskQueueInfo) GetAckLevel() int64 { @@ -304,22 +335,228 @@ func (x *TaskQueueInfo) GetLastUpdateTime() *timestamppb.Timestamp { return nil } -type TaskKey struct { - state protoimpl.MessageState +func (x *TaskQueueInfo) GetApproximateBacklogCount() int64 { + if x != nil { + return x.ApproximateBacklogCount + } + return 0 +} + +func (x *TaskQueueInfo) GetSubqueues() []*SubqueueInfo { + if x != nil { + return x.Subqueues + } + return nil +} + +func (x *TaskQueueInfo) GetOtherHasTasks() bool { + if x != nil { + return x.OtherHasTasks + } + return false +} + +type SubqueueInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Key is the information used by a splitting algorithm to decide which tasks should go in + // this subqueue. It should not change after being registered in TaskQueueInfo. + Key *SubqueueKey `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The rest are mutable state for the subqueue: + AckLevel int64 `protobuf:"varint,2,opt,name=ack_level,json=ackLevel,proto3" json:"ack_level,omitempty"` + FairAckLevel *v11.FairLevel `protobuf:"bytes,4,opt,name=fair_ack_level,json=fairAckLevel,proto3" json:"fair_ack_level,omitempty"` + ApproximateBacklogCount int64 `protobuf:"varint,3,opt,name=approximate_backlog_count,json=approximateBacklogCount,proto3" json:"approximate_backlog_count,omitempty"` + // Max read level keeps track of the highest task level ever written, but is only + // maintained best-effort. Do not trust these values. + FairMaxReadLevel *v11.FairLevel `protobuf:"bytes,5,opt,name=fair_max_read_level,json=fairMaxReadLevel,proto3" json:"fair_max_read_level,omitempty"` + // We can persist a limited number of fairness key counts in task queue + // metadata so they're not lost on migration. + TopKFairnessCounts []*FairnessKeyCount `protobuf:"bytes,6,rep,name=top_k_fairness_counts,json=topKFairnessCounts,proto3" json:"top_k_fairness_counts,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SubqueueInfo) Reset() { + *x = SubqueueInfo{} + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SubqueueInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubqueueInfo) ProtoMessage() {} + +func (x *SubqueueInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubqueueInfo.ProtoReflect.Descriptor instead. +func (*SubqueueInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_tasks_proto_rawDescGZIP(), []int{3} +} + +func (x *SubqueueInfo) GetKey() *SubqueueKey { + if x != nil { + return x.Key + } + return nil +} + +func (x *SubqueueInfo) GetAckLevel() int64 { + if x != nil { + return x.AckLevel + } + return 0 +} + +func (x *SubqueueInfo) GetFairAckLevel() *v11.FairLevel { + if x != nil { + return x.FairAckLevel + } + return nil +} + +func (x *SubqueueInfo) GetApproximateBacklogCount() int64 { + if x != nil { + return x.ApproximateBacklogCount + } + return 0 +} + +func (x *SubqueueInfo) GetFairMaxReadLevel() *v11.FairLevel { + if x != nil { + return x.FairMaxReadLevel + } + return nil +} + +func (x *SubqueueInfo) GetTopKFairnessCounts() []*FairnessKeyCount { + if x != nil { + return x.TopKFairnessCounts + } + return nil +} + +type FairnessKeyCount struct { + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Count int64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +} + +func (x *FairnessKeyCount) Reset() { + *x = FairnessKeyCount{} + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FairnessKeyCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FairnessKeyCount) ProtoMessage() {} + +func (x *FairnessKeyCount) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FairnessKeyCount.ProtoReflect.Descriptor instead. +func (*FairnessKeyCount) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_tasks_proto_rawDescGZIP(), []int{4} +} + +func (x *FairnessKeyCount) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *FairnessKeyCount) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +type SubqueueKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Each subqueue contains tasks from only one priority level. + Priority int32 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - FireTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=fire_time,json=fireTime,proto3" json:"fire_time,omitempty"` - TaskId int64 `protobuf:"varint,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +func (x *SubqueueKey) Reset() { + *x = SubqueueKey{} + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *TaskKey) Reset() { - *x = TaskKey{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[3] +func (x *SubqueueKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubqueueKey) ProtoMessage() {} + +func (x *SubqueueKey) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use SubqueueKey.ProtoReflect.Descriptor instead. +func (*SubqueueKey) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_tasks_proto_rawDescGZIP(), []int{5} +} + +func (x *SubqueueKey) GetPriority() int32 { + if x != nil { + return x.Priority + } + return 0 +} + +type TaskKey struct { + state protoimpl.MessageState `protogen:"open.v1"` + FireTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=fire_time,json=fireTime,proto3" json:"fire_time,omitempty"` + TaskId int64 `protobuf:"varint,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskKey) Reset() { + *x = TaskKey{} + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskKey) String() string { @@ -329,8 +566,8 @@ func (x *TaskKey) String() string { func (*TaskKey) ProtoMessage() {} func (x *TaskKey) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -342,7 +579,7 @@ func (x *TaskKey) ProtoReflect() protoreflect.Message { // Deprecated: Use TaskKey.ProtoReflect.Descriptor instead. func (*TaskKey) Descriptor() ([]byte, []int) { - return file_temporal_server_api_persistence_v1_tasks_proto_rawDescGZIP(), []int{3} + return file_temporal_server_api_persistence_v1_tasks_proto_rawDescGZIP(), []int{6} } func (x *TaskKey) GetFireTime() *timestamppb.Timestamp { @@ -361,132 +598,109 @@ func (x *TaskKey) GetTaskId() int64 { var File_temporal_server_api_persistence_v1_tasks_proto protoreflect.FileDescriptor -var file_temporal_server_api_persistence_v1_tasks_proto_rawDesc = []byte{ - 0x0a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, - 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, - 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x76, 0x31, - 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x76, 0x0a, 0x11, 0x41, - 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x44, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xd3, 0x03, 0x0a, - 0x08, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, - 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, - 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, - 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x67, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x52, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xff, 0x02, 0x0a, 0x0d, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x16, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, - 0x6b, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x61, 0x63, 0x6b, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x61, 0x63, 0x6b, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x48, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x63, 0x0a, 0x07, 0x54, 0x61, 0x73, 0x6b, 0x4b, 0x65, 0x79, - 0x12, 0x3b, 0x0a, 0x09, 0x66, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x66, 0x69, - 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, - 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} +const file_temporal_server_api_persistence_v1_tasks_proto_rawDesc = "" + + "\n" + + ".temporal/server/api/persistence/v1/tasks.proto\x12\"temporal.server.api.persistence.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a&temporal/api/enums/v1/task_queue.proto\x1a*temporal/server/api/clock/v1/message.proto\x1a.temporal/server/api/taskqueue/v1/message.proto\"\x8b\x01\n" + + "\x11AllocatedTaskInfo\x12@\n" + + "\x04data\x18\x01 \x01(\v2,.temporal.server.api.persistence.v1.TaskInfoR\x04data\x12\x1b\n" + + "\ttask_pass\x18\x03 \x01(\x03R\btaskPass\x12\x17\n" + + "\atask_id\x18\x02 \x01(\x03R\x06taskId\"\xac\x04\n" + + "\bTaskInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12,\n" + + "\x12scheduled_event_id\x18\x04 \x01(\x03R\x10scheduledEventId\x12;\n" + + "\vcreate_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "createTime\x12;\n" + + "\vexpiry_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "expiryTime\x12?\n" + + "\x05clock\x18\a \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12c\n" + + "\x11version_directive\x18\b \x01(\v26.temporal.server.api.taskqueue.v1.TaskVersionDirectiveR\x10versionDirective\x12\x14\n" + + "\x05stamp\x18\t \x01(\x05R\x05stamp\x12<\n" + + "\bpriority\x18\n" + + " \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12#\n" + + "\rcomponent_ref\x18\v \x01(\fR\fcomponentRef\"\x97\x04\n" + + "\rTaskQueueInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12A\n" + + "\ttask_type\x18\x03 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueTypeR\btaskType\x128\n" + + "\x04kind\x18\x04 \x01(\x0e2$.temporal.api.enums.v1.TaskQueueKindR\x04kind\x12\x1b\n" + + "\tack_level\x18\x05 \x01(\x03R\backLevel\x12;\n" + + "\vexpiry_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "expiryTime\x12D\n" + + "\x10last_update_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x0elastUpdateTime\x12:\n" + + "\x19approximate_backlog_count\x18\b \x01(\x03R\x17approximateBacklogCount\x12N\n" + + "\tsubqueues\x18\t \x03(\v20.temporal.server.api.persistence.v1.SubqueueInfoR\tsubqueues\x12&\n" + + "\x0fother_has_tasks\x18\n" + + " \x01(\bR\rotherHasTasks\"\xc2\x03\n" + + "\fSubqueueInfo\x12A\n" + + "\x03key\x18\x01 \x01(\v2/.temporal.server.api.persistence.v1.SubqueueKeyR\x03key\x12\x1b\n" + + "\tack_level\x18\x02 \x01(\x03R\backLevel\x12Q\n" + + "\x0efair_ack_level\x18\x04 \x01(\v2+.temporal.server.api.taskqueue.v1.FairLevelR\ffairAckLevel\x12:\n" + + "\x19approximate_backlog_count\x18\x03 \x01(\x03R\x17approximateBacklogCount\x12Z\n" + + "\x13fair_max_read_level\x18\x05 \x01(\v2+.temporal.server.api.taskqueue.v1.FairLevelR\x10fairMaxReadLevel\x12g\n" + + "\x15top_k_fairness_counts\x18\x06 \x03(\v24.temporal.server.api.persistence.v1.FairnessKeyCountR\x12topKFairnessCounts\":\n" + + "\x10FairnessKeyCount\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05count\x18\x02 \x01(\x03R\x05count\")\n" + + "\vSubqueueKey\x12\x1a\n" + + "\bpriority\x18\x01 \x01(\x05R\bpriority\"[\n" + + "\aTaskKey\x127\n" + + "\tfire_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\bfireTime\x12\x17\n" + + "\atask_id\x18\x02 \x01(\x03R\x06taskIdB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" var ( file_temporal_server_api_persistence_v1_tasks_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_tasks_proto_rawDescData = file_temporal_server_api_persistence_v1_tasks_proto_rawDesc + file_temporal_server_api_persistence_v1_tasks_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_tasks_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_tasks_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_tasks_proto_rawDescData) + file_temporal_server_api_persistence_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_tasks_proto_rawDesc), len(file_temporal_server_api_persistence_v1_tasks_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_tasks_proto_rawDescData } -var file_temporal_server_api_persistence_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_temporal_server_api_persistence_v1_tasks_proto_goTypes = []interface{}{ +var file_temporal_server_api_persistence_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_temporal_server_api_persistence_v1_tasks_proto_goTypes = []any{ (*AllocatedTaskInfo)(nil), // 0: temporal.server.api.persistence.v1.AllocatedTaskInfo (*TaskInfo)(nil), // 1: temporal.server.api.persistence.v1.TaskInfo (*TaskQueueInfo)(nil), // 2: temporal.server.api.persistence.v1.TaskQueueInfo - (*TaskKey)(nil), // 3: temporal.server.api.persistence.v1.TaskKey - (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp - (*v1.VectorClock)(nil), // 5: temporal.server.api.clock.v1.VectorClock - (*v11.TaskVersionDirective)(nil), // 6: temporal.server.api.taskqueue.v1.TaskVersionDirective - (v12.TaskQueueType)(0), // 7: temporal.api.enums.v1.TaskQueueType - (v12.TaskQueueKind)(0), // 8: temporal.api.enums.v1.TaskQueueKind + (*SubqueueInfo)(nil), // 3: temporal.server.api.persistence.v1.SubqueueInfo + (*FairnessKeyCount)(nil), // 4: temporal.server.api.persistence.v1.FairnessKeyCount + (*SubqueueKey)(nil), // 5: temporal.server.api.persistence.v1.SubqueueKey + (*TaskKey)(nil), // 6: temporal.server.api.persistence.v1.TaskKey + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp + (*v1.VectorClock)(nil), // 8: temporal.server.api.clock.v1.VectorClock + (*v11.TaskVersionDirective)(nil), // 9: temporal.server.api.taskqueue.v1.TaskVersionDirective + (*v12.Priority)(nil), // 10: temporal.api.common.v1.Priority + (v13.TaskQueueType)(0), // 11: temporal.api.enums.v1.TaskQueueType + (v13.TaskQueueKind)(0), // 12: temporal.api.enums.v1.TaskQueueKind + (*v11.FairLevel)(nil), // 13: temporal.server.api.taskqueue.v1.FairLevel } var file_temporal_server_api_persistence_v1_tasks_proto_depIdxs = []int32{ 1, // 0: temporal.server.api.persistence.v1.AllocatedTaskInfo.data:type_name -> temporal.server.api.persistence.v1.TaskInfo - 4, // 1: temporal.server.api.persistence.v1.TaskInfo.create_time:type_name -> google.protobuf.Timestamp - 4, // 2: temporal.server.api.persistence.v1.TaskInfo.expiry_time:type_name -> google.protobuf.Timestamp - 5, // 3: temporal.server.api.persistence.v1.TaskInfo.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 6, // 4: temporal.server.api.persistence.v1.TaskInfo.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective - 7, // 5: temporal.server.api.persistence.v1.TaskQueueInfo.task_type:type_name -> temporal.api.enums.v1.TaskQueueType - 8, // 6: temporal.server.api.persistence.v1.TaskQueueInfo.kind:type_name -> temporal.api.enums.v1.TaskQueueKind - 4, // 7: temporal.server.api.persistence.v1.TaskQueueInfo.expiry_time:type_name -> google.protobuf.Timestamp - 4, // 8: temporal.server.api.persistence.v1.TaskQueueInfo.last_update_time:type_name -> google.protobuf.Timestamp - 4, // 9: temporal.server.api.persistence.v1.TaskKey.fire_time:type_name -> google.protobuf.Timestamp - 10, // [10:10] is the sub-list for method output_type - 10, // [10:10] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 7, // 1: temporal.server.api.persistence.v1.TaskInfo.create_time:type_name -> google.protobuf.Timestamp + 7, // 2: temporal.server.api.persistence.v1.TaskInfo.expiry_time:type_name -> google.protobuf.Timestamp + 8, // 3: temporal.server.api.persistence.v1.TaskInfo.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 9, // 4: temporal.server.api.persistence.v1.TaskInfo.version_directive:type_name -> temporal.server.api.taskqueue.v1.TaskVersionDirective + 10, // 5: temporal.server.api.persistence.v1.TaskInfo.priority:type_name -> temporal.api.common.v1.Priority + 11, // 6: temporal.server.api.persistence.v1.TaskQueueInfo.task_type:type_name -> temporal.api.enums.v1.TaskQueueType + 12, // 7: temporal.server.api.persistence.v1.TaskQueueInfo.kind:type_name -> temporal.api.enums.v1.TaskQueueKind + 7, // 8: temporal.server.api.persistence.v1.TaskQueueInfo.expiry_time:type_name -> google.protobuf.Timestamp + 7, // 9: temporal.server.api.persistence.v1.TaskQueueInfo.last_update_time:type_name -> google.protobuf.Timestamp + 3, // 10: temporal.server.api.persistence.v1.TaskQueueInfo.subqueues:type_name -> temporal.server.api.persistence.v1.SubqueueInfo + 5, // 11: temporal.server.api.persistence.v1.SubqueueInfo.key:type_name -> temporal.server.api.persistence.v1.SubqueueKey + 13, // 12: temporal.server.api.persistence.v1.SubqueueInfo.fair_ack_level:type_name -> temporal.server.api.taskqueue.v1.FairLevel + 13, // 13: temporal.server.api.persistence.v1.SubqueueInfo.fair_max_read_level:type_name -> temporal.server.api.taskqueue.v1.FairLevel + 4, // 14: temporal.server.api.persistence.v1.SubqueueInfo.top_k_fairness_counts:type_name -> temporal.server.api.persistence.v1.FairnessKeyCount + 7, // 15: temporal.server.api.persistence.v1.TaskKey.fire_time:type_name -> google.protobuf.Timestamp + 16, // [16:16] is the sub-list for method output_type + 16, // [16:16] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name } func init() { file_temporal_server_api_persistence_v1_tasks_proto_init() } @@ -494,63 +708,13 @@ func file_temporal_server_api_persistence_v1_tasks_proto_init() { if File_temporal_server_api_persistence_v1_tasks_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AllocatedTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskQueueInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_persistence_v1_tasks_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskKey); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_tasks_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_tasks_proto_rawDesc), len(file_temporal_server_api_persistence_v1_tasks_proto_rawDesc)), NumEnums: 0, - NumMessages: 4, + NumMessages: 7, NumExtensions: 0, NumServices: 0, }, @@ -559,7 +723,6 @@ func file_temporal_server_api_persistence_v1_tasks_proto_init() { MessageInfos: file_temporal_server_api_persistence_v1_tasks_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_tasks_proto = out.File - file_temporal_server_api_persistence_v1_tasks_proto_rawDesc = nil file_temporal_server_api_persistence_v1_tasks_proto_goTypes = nil file_temporal_server_api_persistence_v1_tasks_proto_depIdxs = nil } diff --git a/api/persistence/v1/update.go-helpers.pb.go b/api/persistence/v1/update.go-helpers.pb.go new file mode 100644 index 00000000000..360cdd40dc0 --- /dev/null +++ b/api/persistence/v1/update.go-helpers.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package persistence + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type UpdateAdmissionInfo to the protobuf v3 wire format +func (val *UpdateAdmissionInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateAdmissionInfo from the protobuf v3 wire format +func (val *UpdateAdmissionInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateAdmissionInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateAdmissionInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateAdmissionInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateAdmissionInfo + switch t := that.(type) { + case *UpdateAdmissionInfo: + that1 = t + case UpdateAdmissionInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateAcceptanceInfo to the protobuf v3 wire format +func (val *UpdateAcceptanceInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateAcceptanceInfo from the protobuf v3 wire format +func (val *UpdateAcceptanceInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateAcceptanceInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateAcceptanceInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateAcceptanceInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateAcceptanceInfo + switch t := that.(type) { + case *UpdateAcceptanceInfo: + that1 = t + case UpdateAcceptanceInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateCompletionInfo to the protobuf v3 wire format +func (val *UpdateCompletionInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateCompletionInfo from the protobuf v3 wire format +func (val *UpdateCompletionInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateCompletionInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateCompletionInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateCompletionInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateCompletionInfo + switch t := that.(type) { + case *UpdateCompletionInfo: + that1 = t + case UpdateCompletionInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateInfo to the protobuf v3 wire format +func (val *UpdateInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateInfo from the protobuf v3 wire format +func (val *UpdateInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateInfo + switch t := that.(type) { + case *UpdateInfo: + that1 = t + case UpdateInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/persistence/v1/update.pb.go b/api/persistence/v1/update.pb.go new file mode 100644 index 00000000000..f44cb2f429f --- /dev/null +++ b/api/persistence/v1/update.pb.go @@ -0,0 +1,455 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/persistence/v1/update.proto + +package persistence + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// UpdateAdmissionInfo contains information about a durably admitted update. Note that updates in Admitted state are typically +// non-durable (i.e. do not have a corresponding event in history). Durably admitted updates arise as a result of +// workflow reset or history event replication conflict: in these cases a WorkflowExecutionUpdateAdmittedEvent event is +// created when an accepted update (on one branch of workflow history) is converted into an admitted update (on another +// branch). +type UpdateAdmissionInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Location: + // + // *UpdateAdmissionInfo_HistoryPointer_ + Location isUpdateAdmissionInfo_Location `protobuf_oneof:"location"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateAdmissionInfo) Reset() { + *x = UpdateAdmissionInfo{} + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateAdmissionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateAdmissionInfo) ProtoMessage() {} + +func (x *UpdateAdmissionInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateAdmissionInfo.ProtoReflect.Descriptor instead. +func (*UpdateAdmissionInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_update_proto_rawDescGZIP(), []int{0} +} + +func (x *UpdateAdmissionInfo) GetLocation() isUpdateAdmissionInfo_Location { + if x != nil { + return x.Location + } + return nil +} + +func (x *UpdateAdmissionInfo) GetHistoryPointer() *UpdateAdmissionInfo_HistoryPointer { + if x != nil { + if x, ok := x.Location.(*UpdateAdmissionInfo_HistoryPointer_); ok { + return x.HistoryPointer + } + } + return nil +} + +type isUpdateAdmissionInfo_Location interface { + isUpdateAdmissionInfo_Location() +} + +type UpdateAdmissionInfo_HistoryPointer_ struct { + HistoryPointer *UpdateAdmissionInfo_HistoryPointer `protobuf:"bytes,1,opt,name=history_pointer,json=historyPointer,proto3,oneof"` +} + +func (*UpdateAdmissionInfo_HistoryPointer_) isUpdateAdmissionInfo_Location() {} + +// UpdateAcceptanceInfo contains information about an accepted update +type UpdateAcceptanceInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // the event ID of the WorkflowExecutionUpdateAcceptedEvent + EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateAcceptanceInfo) Reset() { + *x = UpdateAcceptanceInfo{} + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateAcceptanceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateAcceptanceInfo) ProtoMessage() {} + +func (x *UpdateAcceptanceInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateAcceptanceInfo.ProtoReflect.Descriptor instead. +func (*UpdateAcceptanceInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_update_proto_rawDescGZIP(), []int{1} +} + +func (x *UpdateAcceptanceInfo) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +// UpdateCompletionInfo contains information about a completed update +type UpdateCompletionInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // the event ID of the WorkflowExecutionUpdateCompletedEvent + EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` + // the ID of the event batch containing the event_id above + EventBatchId int64 `protobuf:"varint,2,opt,name=event_batch_id,json=eventBatchId,proto3" json:"event_batch_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateCompletionInfo) Reset() { + *x = UpdateCompletionInfo{} + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateCompletionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateCompletionInfo) ProtoMessage() {} + +func (x *UpdateCompletionInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateCompletionInfo.ProtoReflect.Descriptor instead. +func (*UpdateCompletionInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_update_proto_rawDescGZIP(), []int{2} +} + +func (x *UpdateCompletionInfo) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +func (x *UpdateCompletionInfo) GetEventBatchId() int64 { + if x != nil { + return x.EventBatchId + } + return 0 +} + +// UpdateInfo is the persistent state of a single update +type UpdateInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Value: + // + // *UpdateInfo_Acceptance + // *UpdateInfo_Completion + // *UpdateInfo_Admission + Value isUpdateInfo_Value `protobuf_oneof:"value"` + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,4,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateInfo) Reset() { + *x = UpdateInfo{} + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateInfo) ProtoMessage() {} + +func (x *UpdateInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateInfo.ProtoReflect.Descriptor instead. +func (*UpdateInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_update_proto_rawDescGZIP(), []int{3} +} + +func (x *UpdateInfo) GetValue() isUpdateInfo_Value { + if x != nil { + return x.Value + } + return nil +} + +func (x *UpdateInfo) GetAcceptance() *UpdateAcceptanceInfo { + if x != nil { + if x, ok := x.Value.(*UpdateInfo_Acceptance); ok { + return x.Acceptance + } + } + return nil +} + +func (x *UpdateInfo) GetCompletion() *UpdateCompletionInfo { + if x != nil { + if x, ok := x.Value.(*UpdateInfo_Completion); ok { + return x.Completion + } + } + return nil +} + +func (x *UpdateInfo) GetAdmission() *UpdateAdmissionInfo { + if x != nil { + if x, ok := x.Value.(*UpdateInfo_Admission); ok { + return x.Admission + } + } + return nil +} + +func (x *UpdateInfo) GetLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.LastUpdateVersionedTransition + } + return nil +} + +type isUpdateInfo_Value interface { + isUpdateInfo_Value() +} + +type UpdateInfo_Acceptance struct { + // update has been accepted and this is the acceptance metadata + Acceptance *UpdateAcceptanceInfo `protobuf:"bytes,1,opt,name=acceptance,proto3,oneof"` +} + +type UpdateInfo_Completion struct { + // update has been completed and this is the completion metadata + Completion *UpdateCompletionInfo `protobuf:"bytes,2,opt,name=completion,proto3,oneof"` +} + +type UpdateInfo_Admission struct { + // update has been admitted and this is the admission metadata + Admission *UpdateAdmissionInfo `protobuf:"bytes,3,opt,name=admission,proto3,oneof"` +} + +func (*UpdateInfo_Acceptance) isUpdateInfo_Value() {} + +func (*UpdateInfo_Completion) isUpdateInfo_Value() {} + +func (*UpdateInfo_Admission) isUpdateInfo_Value() {} + +type UpdateAdmissionInfo_HistoryPointer struct { + state protoimpl.MessageState `protogen:"open.v1"` + // the event ID of the WorkflowExecutionUpdateAdmittedEvent + EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` + // the ID of the event batch containing the event_id + EventBatchId int64 `protobuf:"varint,2,opt,name=event_batch_id,json=eventBatchId,proto3" json:"event_batch_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateAdmissionInfo_HistoryPointer) Reset() { + *x = UpdateAdmissionInfo_HistoryPointer{} + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateAdmissionInfo_HistoryPointer) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateAdmissionInfo_HistoryPointer) ProtoMessage() {} + +func (x *UpdateAdmissionInfo_HistoryPointer) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_update_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateAdmissionInfo_HistoryPointer.ProtoReflect.Descriptor instead. +func (*UpdateAdmissionInfo_HistoryPointer) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_update_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *UpdateAdmissionInfo_HistoryPointer) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +func (x *UpdateAdmissionInfo_HistoryPointer) GetEventBatchId() int64 { + if x != nil { + return x.EventBatchId + } + return 0 +} + +var File_temporal_server_api_persistence_v1_update_proto protoreflect.FileDescriptor + +const file_temporal_server_api_persistence_v1_update_proto_rawDesc = "" + + "\n" + + "/temporal/server/api/persistence/v1/update.proto\x12\"temporal.server.api.persistence.v1\x1a,temporal/server/api/persistence/v1/hsm.proto\"\xe7\x01\n" + + "\x13UpdateAdmissionInfo\x12q\n" + + "\x0fhistory_pointer\x18\x01 \x01(\v2F.temporal.server.api.persistence.v1.UpdateAdmissionInfo.HistoryPointerH\x00R\x0ehistoryPointer\x1aQ\n" + + "\x0eHistoryPointer\x12\x19\n" + + "\bevent_id\x18\x01 \x01(\x03R\aeventId\x12$\n" + + "\x0eevent_batch_id\x18\x02 \x01(\x03R\feventBatchIdB\n" + + "\n" + + "\blocation\"1\n" + + "\x14UpdateAcceptanceInfo\x12\x19\n" + + "\bevent_id\x18\x01 \x01(\x03R\aeventId\"W\n" + + "\x14UpdateCompletionInfo\x12\x19\n" + + "\bevent_id\x18\x01 \x01(\x03R\aeventId\x12$\n" + + "\x0eevent_batch_id\x18\x02 \x01(\x03R\feventBatchId\"\xa9\x03\n" + + "\n" + + "UpdateInfo\x12Z\n" + + "\n" + + "acceptance\x18\x01 \x01(\v28.temporal.server.api.persistence.v1.UpdateAcceptanceInfoH\x00R\n" + + "acceptance\x12Z\n" + + "\n" + + "completion\x18\x02 \x01(\v28.temporal.server.api.persistence.v1.UpdateCompletionInfoH\x00R\n" + + "completion\x12W\n" + + "\tadmission\x18\x03 \x01(\v27.temporal.server.api.persistence.v1.UpdateAdmissionInfoH\x00R\tadmission\x12\x80\x01\n" + + " last_update_versioned_transition\x18\x04 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransitionB\a\n" + + "\x05valueB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" + +var ( + file_temporal_server_api_persistence_v1_update_proto_rawDescOnce sync.Once + file_temporal_server_api_persistence_v1_update_proto_rawDescData []byte +) + +func file_temporal_server_api_persistence_v1_update_proto_rawDescGZIP() []byte { + file_temporal_server_api_persistence_v1_update_proto_rawDescOnce.Do(func() { + file_temporal_server_api_persistence_v1_update_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_update_proto_rawDesc), len(file_temporal_server_api_persistence_v1_update_proto_rawDesc))) + }) + return file_temporal_server_api_persistence_v1_update_proto_rawDescData +} + +var file_temporal_server_api_persistence_v1_update_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_temporal_server_api_persistence_v1_update_proto_goTypes = []any{ + (*UpdateAdmissionInfo)(nil), // 0: temporal.server.api.persistence.v1.UpdateAdmissionInfo + (*UpdateAcceptanceInfo)(nil), // 1: temporal.server.api.persistence.v1.UpdateAcceptanceInfo + (*UpdateCompletionInfo)(nil), // 2: temporal.server.api.persistence.v1.UpdateCompletionInfo + (*UpdateInfo)(nil), // 3: temporal.server.api.persistence.v1.UpdateInfo + (*UpdateAdmissionInfo_HistoryPointer)(nil), // 4: temporal.server.api.persistence.v1.UpdateAdmissionInfo.HistoryPointer + (*VersionedTransition)(nil), // 5: temporal.server.api.persistence.v1.VersionedTransition +} +var file_temporal_server_api_persistence_v1_update_proto_depIdxs = []int32{ + 4, // 0: temporal.server.api.persistence.v1.UpdateAdmissionInfo.history_pointer:type_name -> temporal.server.api.persistence.v1.UpdateAdmissionInfo.HistoryPointer + 1, // 1: temporal.server.api.persistence.v1.UpdateInfo.acceptance:type_name -> temporal.server.api.persistence.v1.UpdateAcceptanceInfo + 2, // 2: temporal.server.api.persistence.v1.UpdateInfo.completion:type_name -> temporal.server.api.persistence.v1.UpdateCompletionInfo + 0, // 3: temporal.server.api.persistence.v1.UpdateInfo.admission:type_name -> temporal.server.api.persistence.v1.UpdateAdmissionInfo + 5, // 4: temporal.server.api.persistence.v1.UpdateInfo.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_persistence_v1_update_proto_init() } +func file_temporal_server_api_persistence_v1_update_proto_init() { + if File_temporal_server_api_persistence_v1_update_proto != nil { + return + } + file_temporal_server_api_persistence_v1_hsm_proto_init() + file_temporal_server_api_persistence_v1_update_proto_msgTypes[0].OneofWrappers = []any{ + (*UpdateAdmissionInfo_HistoryPointer_)(nil), + } + file_temporal_server_api_persistence_v1_update_proto_msgTypes[3].OneofWrappers = []any{ + (*UpdateInfo_Acceptance)(nil), + (*UpdateInfo_Completion)(nil), + (*UpdateInfo_Admission)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_update_proto_rawDesc), len(file_temporal_server_api_persistence_v1_update_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_persistence_v1_update_proto_goTypes, + DependencyIndexes: file_temporal_server_api_persistence_v1_update_proto_depIdxs, + MessageInfos: file_temporal_server_api_persistence_v1_update_proto_msgTypes, + }.Build() + File_temporal_server_api_persistence_v1_update_proto = out.File + file_temporal_server_api_persistence_v1_update_proto_goTypes = nil + file_temporal_server_api_persistence_v1_update_proto_depIdxs = nil +} diff --git a/api/persistence/v1/workflow_mutable_state.go-helpers.pb.go b/api/persistence/v1/workflow_mutable_state.go-helpers.pb.go index 26144b86829..650f4e93c1f 100644 --- a/api/persistence/v1/workflow_mutable_state.go-helpers.pb.go +++ b/api/persistence/v1/workflow_mutable_state.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package persistence @@ -65,3 +41,40 @@ func (this *WorkflowMutableState) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type WorkflowMutableStateMutation to the protobuf v3 wire format +func (val *WorkflowMutableStateMutation) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkflowMutableStateMutation from the protobuf v3 wire format +func (val *WorkflowMutableStateMutation) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkflowMutableStateMutation) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkflowMutableStateMutation values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkflowMutableStateMutation) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkflowMutableStateMutation + switch t := that.(type) { + case *WorkflowMutableStateMutation: + that1 = t + case WorkflowMutableStateMutation: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/persistence/v1/workflow_mutable_state.pb.go b/api/persistence/v1/workflow_mutable_state.pb.go index 92cbf3d5903..f099d8fb536 100644 --- a/api/persistence/v1/workflow_mutable_state.pb.go +++ b/api/persistence/v1/workflow_mutable_state.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package persistence import ( reflect "reflect" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/api/history/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -45,30 +24,28 @@ const ( ) type WorkflowMutableState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ActivityInfos map[int64]*ActivityInfo `protobuf:"bytes,1,rep,name=activity_infos,json=activityInfos,proto3" json:"activity_infos,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - TimerInfos map[string]*TimerInfo `protobuf:"bytes,2,rep,name=timer_infos,json=timerInfos,proto3" json:"timer_infos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ChildExecutionInfos map[int64]*ChildExecutionInfo `protobuf:"bytes,3,rep,name=child_execution_infos,json=childExecutionInfos,proto3" json:"child_execution_infos,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - RequestCancelInfos map[int64]*RequestCancelInfo `protobuf:"bytes,4,rep,name=request_cancel_infos,json=requestCancelInfos,proto3" json:"request_cancel_infos,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - SignalInfos map[int64]*SignalInfo `protobuf:"bytes,5,rep,name=signal_infos,json=signalInfos,proto3" json:"signal_infos,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + ActivityInfos map[int64]*ActivityInfo `protobuf:"bytes,1,rep,name=activity_infos,json=activityInfos,proto3" json:"activity_infos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TimerInfos map[string]*TimerInfo `protobuf:"bytes,2,rep,name=timer_infos,json=timerInfos,proto3" json:"timer_infos,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ChildExecutionInfos map[int64]*ChildExecutionInfo `protobuf:"bytes,3,rep,name=child_execution_infos,json=childExecutionInfos,proto3" json:"child_execution_infos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + RequestCancelInfos map[int64]*RequestCancelInfo `protobuf:"bytes,4,rep,name=request_cancel_infos,json=requestCancelInfos,proto3" json:"request_cancel_infos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SignalInfos map[int64]*SignalInfo `protobuf:"bytes,5,rep,name=signal_infos,json=signalInfos,proto3" json:"signal_infos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ChasmNodes map[string]*ChasmNode `protobuf:"bytes,12,rep,name=chasm_nodes,json=chasmNodes,proto3" json:"chasm_nodes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` SignalRequestedIds []string `protobuf:"bytes,6,rep,name=signal_requested_ids,json=signalRequestedIds,proto3" json:"signal_requested_ids,omitempty"` ExecutionInfo *WorkflowExecutionInfo `protobuf:"bytes,7,opt,name=execution_info,json=executionInfo,proto3" json:"execution_info,omitempty"` ExecutionState *WorkflowExecutionState `protobuf:"bytes,8,opt,name=execution_state,json=executionState,proto3" json:"execution_state,omitempty"` NextEventId int64 `protobuf:"varint,9,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` BufferedEvents []*v1.HistoryEvent `protobuf:"bytes,10,rep,name=buffered_events,json=bufferedEvents,proto3" json:"buffered_events,omitempty"` Checksum *Checksum `protobuf:"bytes,11,opt,name=checksum,proto3" json:"checksum,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *WorkflowMutableState) Reset() { *x = WorkflowMutableState{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *WorkflowMutableState) String() string { @@ -79,7 +56,7 @@ func (*WorkflowMutableState) ProtoMessage() {} func (x *WorkflowMutableState) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -129,6 +106,13 @@ func (x *WorkflowMutableState) GetSignalInfos() map[int64]*SignalInfo { return nil } +func (x *WorkflowMutableState) GetChasmNodes() map[string]*ChasmNode { + if x != nil { + return x.ChasmNodes + } + return nil +} + func (x *WorkflowMutableState) GetSignalRequestedIds() []string { if x != nil { return x.SignalRequestedIds @@ -171,181 +155,380 @@ func (x *WorkflowMutableState) GetChecksum() *Checksum { return nil } -var File_temporal_server_api_persistence_v1_workflow_mutable_state_proto protoreflect.FileDescriptor +type WorkflowMutableStateMutation struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The following updated_* fields are computed based on the + // lastUpdateVersionedTransition field of each sub state machine. + UpdatedActivityInfos map[int64]*ActivityInfo `protobuf:"bytes,1,rep,name=updated_activity_infos,json=updatedActivityInfos,proto3" json:"updated_activity_infos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + UpdatedTimerInfos map[string]*TimerInfo `protobuf:"bytes,2,rep,name=updated_timer_infos,json=updatedTimerInfos,proto3" json:"updated_timer_infos,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + UpdatedChildExecutionInfos map[int64]*ChildExecutionInfo `protobuf:"bytes,3,rep,name=updated_child_execution_infos,json=updatedChildExecutionInfos,proto3" json:"updated_child_execution_infos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + UpdatedRequestCancelInfos map[int64]*RequestCancelInfo `protobuf:"bytes,4,rep,name=updated_request_cancel_infos,json=updatedRequestCancelInfos,proto3" json:"updated_request_cancel_infos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + UpdatedSignalInfos map[int64]*SignalInfo `protobuf:"bytes,5,rep,name=updated_signal_infos,json=updatedSignalInfos,proto3" json:"updated_signal_infos,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + UpdatedUpdateInfos map[string]*UpdateInfo `protobuf:"bytes,6,rep,name=updated_update_infos,json=updatedUpdateInfos,proto3" json:"updated_update_infos,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + UpdatedSubStateMachines []*WorkflowMutableStateMutation_StateMachineNodeMutation `protobuf:"bytes,7,rep,name=updated_sub_state_machines,json=updatedSubStateMachines,proto3" json:"updated_sub_state_machines,omitempty"` + UpdatedChasmNodes map[string]*ChasmNode `protobuf:"bytes,19,rep,name=updated_chasm_nodes,json=updatedChasmNodes,proto3" json:"updated_chasm_nodes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + SignalRequestedIds []string `protobuf:"bytes,15,rep,name=signal_requested_ids,json=signalRequestedIds,proto3" json:"signal_requested_ids,omitempty"` + // Partial WorkflowExecutionInfo. Some fields, such as + // update_infos and sub_state_machines_by_type, are not populated here. + // Instead, only diffs are synced in the deleted_* and updated_* fields above. + ExecutionInfo *WorkflowExecutionInfo `protobuf:"bytes,16,opt,name=execution_info,json=executionInfo,proto3" json:"execution_info,omitempty"` + ExecutionState *WorkflowExecutionState `protobuf:"bytes,17,opt,name=execution_state,json=executionState,proto3" json:"execution_state,omitempty"` + SubStateMachineTombstoneBatches []*StateMachineTombstoneBatch `protobuf:"bytes,18,rep,name=sub_state_machine_tombstone_batches,json=subStateMachineTombstoneBatches,proto3" json:"sub_state_machine_tombstone_batches,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkflowMutableStateMutation) Reset() { + *x = WorkflowMutableStateMutation{} + mi := &file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkflowMutableStateMutation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowMutableStateMutation) ProtoMessage() {} + +func (x *WorkflowMutableStateMutation) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowMutableStateMutation.ProtoReflect.Descriptor instead. +func (*WorkflowMutableStateMutation) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescGZIP(), []int{1} +} + +func (x *WorkflowMutableStateMutation) GetUpdatedActivityInfos() map[int64]*ActivityInfo { + if x != nil { + return x.UpdatedActivityInfos + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetUpdatedTimerInfos() map[string]*TimerInfo { + if x != nil { + return x.UpdatedTimerInfos + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetUpdatedChildExecutionInfos() map[int64]*ChildExecutionInfo { + if x != nil { + return x.UpdatedChildExecutionInfos + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetUpdatedRequestCancelInfos() map[int64]*RequestCancelInfo { + if x != nil { + return x.UpdatedRequestCancelInfos + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetUpdatedSignalInfos() map[int64]*SignalInfo { + if x != nil { + return x.UpdatedSignalInfos + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetUpdatedUpdateInfos() map[string]*UpdateInfo { + if x != nil { + return x.UpdatedUpdateInfos + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetUpdatedSubStateMachines() []*WorkflowMutableStateMutation_StateMachineNodeMutation { + if x != nil { + return x.UpdatedSubStateMachines + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetUpdatedChasmNodes() map[string]*ChasmNode { + if x != nil { + return x.UpdatedChasmNodes + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetSignalRequestedIds() []string { + if x != nil { + return x.SignalRequestedIds + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetExecutionInfo() *WorkflowExecutionInfo { + if x != nil { + return x.ExecutionInfo + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetExecutionState() *WorkflowExecutionState { + if x != nil { + return x.ExecutionState + } + return nil +} + +func (x *WorkflowMutableStateMutation) GetSubStateMachineTombstoneBatches() []*StateMachineTombstoneBatch { + if x != nil { + return x.SubStateMachineTombstoneBatches + } + return nil +} + +type WorkflowMutableStateMutation_StateMachineNodeMutation struct { + state protoimpl.MessageState `protogen:"open.v1"` + Path *StateMachinePath `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + InitialVersionedTransition *VersionedTransition `protobuf:"bytes,3,opt,name=initial_versioned_transition,json=initialVersionedTransition,proto3" json:"initial_versioned_transition,omitempty"` + LastUpdateVersionedTransition *VersionedTransition `protobuf:"bytes,4,opt,name=last_update_versioned_transition,json=lastUpdateVersionedTransition,proto3" json:"last_update_versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkflowMutableStateMutation_StateMachineNodeMutation) Reset() { + *x = WorkflowMutableStateMutation_StateMachineNodeMutation{} + mi := &file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkflowMutableStateMutation_StateMachineNodeMutation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowMutableStateMutation_StateMachineNodeMutation) ProtoMessage() {} + +func (x *WorkflowMutableStateMutation_StateMachineNodeMutation) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowMutableStateMutation_StateMachineNodeMutation.ProtoReflect.Descriptor instead. +func (*WorkflowMutableStateMutation_StateMachineNodeMutation) Descriptor() ([]byte, []int) { + return file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *WorkflowMutableStateMutation_StateMachineNodeMutation) GetPath() *StateMachinePath { + if x != nil { + return x.Path + } + return nil +} + +func (x *WorkflowMutableStateMutation_StateMachineNodeMutation) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} -var file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc = []byte{ - 0x0a, 0x3f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, - 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x75, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x22, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, - 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x33, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x0d, 0x0a, 0x14, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x76, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, - 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x6d, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, - 0x74, 0x69, 0x6d, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x89, 0x01, - 0x0a, 0x15, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x51, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x63, 0x68, 0x69, - 0x6c, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x86, 0x01, 0x0a, 0x14, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x50, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x12, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x49, 0x6e, - 0x66, 0x6f, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x70, 0x0a, 0x0c, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, - 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0b, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x12, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x49, 0x64, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x64, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x67, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x52, 0x0a, 0x0f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x5f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x52, 0x0e, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x4c, 0x0a, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, - 0x6d, 0x52, 0x08, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x42, 0x02, 0x68, 0x00, 0x1a, 0x7a, - 0x0a, 0x12, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, - 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x74, 0x0a, 0x0f, 0x54, 0x69, 0x6d, 0x65, 0x72, 0x49, 0x6e, - 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x86, 0x01, 0x0a, 0x18, 0x43, 0x68, 0x69, 0x6c, 0x64, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x50, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, - 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x69, 0x6c, 0x64, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x84, 0x01, 0x0a, 0x17, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x76, 0x0a, 0x10, 0x53, 0x69, - 0x67, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x14, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x48, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x36, 0x5a, - 0x34, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x6e, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (x *WorkflowMutableStateMutation_StateMachineNodeMutation) GetInitialVersionedTransition() *VersionedTransition { + if x != nil { + return x.InitialVersionedTransition + } + return nil } +func (x *WorkflowMutableStateMutation_StateMachineNodeMutation) GetLastUpdateVersionedTransition() *VersionedTransition { + if x != nil { + return x.LastUpdateVersionedTransition + } + return nil +} + +var File_temporal_server_api_persistence_v1_workflow_mutable_state_proto protoreflect.FileDescriptor + +const file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc = "" + + "\n" + + "?temporal/server/api/persistence/v1/workflow_mutable_state.proto\x12\"temporal.server.api.persistence.v1\x1a%temporal/api/history/v1/message.proto\x1a.temporal/server/api/persistence/v1/chasm.proto\x1a3temporal/server/api/persistence/v1/executions.proto\x1a,temporal/server/api/persistence/v1/hsm.proto\x1a/temporal/server/api/persistence/v1/update.proto\"\xd0\x0e\n" + + "\x14WorkflowMutableState\x12r\n" + + "\x0eactivity_infos\x18\x01 \x03(\v2K.temporal.server.api.persistence.v1.WorkflowMutableState.ActivityInfosEntryR\ractivityInfos\x12i\n" + + "\vtimer_infos\x18\x02 \x03(\v2H.temporal.server.api.persistence.v1.WorkflowMutableState.TimerInfosEntryR\n" + + "timerInfos\x12\x85\x01\n" + + "\x15child_execution_infos\x18\x03 \x03(\v2Q.temporal.server.api.persistence.v1.WorkflowMutableState.ChildExecutionInfosEntryR\x13childExecutionInfos\x12\x82\x01\n" + + "\x14request_cancel_infos\x18\x04 \x03(\v2P.temporal.server.api.persistence.v1.WorkflowMutableState.RequestCancelInfosEntryR\x12requestCancelInfos\x12l\n" + + "\fsignal_infos\x18\x05 \x03(\v2I.temporal.server.api.persistence.v1.WorkflowMutableState.SignalInfosEntryR\vsignalInfos\x12i\n" + + "\vchasm_nodes\x18\f \x03(\v2H.temporal.server.api.persistence.v1.WorkflowMutableState.ChasmNodesEntryR\n" + + "chasmNodes\x120\n" + + "\x14signal_requested_ids\x18\x06 \x03(\tR\x12signalRequestedIds\x12`\n" + + "\x0eexecution_info\x18\a \x01(\v29.temporal.server.api.persistence.v1.WorkflowExecutionInfoR\rexecutionInfo\x12c\n" + + "\x0fexecution_state\x18\b \x01(\v2:.temporal.server.api.persistence.v1.WorkflowExecutionStateR\x0eexecutionState\x12\"\n" + + "\rnext_event_id\x18\t \x01(\x03R\vnextEventId\x12N\n" + + "\x0fbuffered_events\x18\n" + + " \x03(\v2%.temporal.api.history.v1.HistoryEventR\x0ebufferedEvents\x12H\n" + + "\bchecksum\x18\v \x01(\v2,.temporal.server.api.persistence.v1.ChecksumR\bchecksum\x1ar\n" + + "\x12ActivityInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12F\n" + + "\x05value\x18\x02 \x01(\v20.temporal.server.api.persistence.v1.ActivityInfoR\x05value:\x028\x01\x1al\n" + + "\x0fTimerInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12C\n" + + "\x05value\x18\x02 \x01(\v2-.temporal.server.api.persistence.v1.TimerInfoR\x05value:\x028\x01\x1a~\n" + + "\x18ChildExecutionInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12L\n" + + "\x05value\x18\x02 \x01(\v26.temporal.server.api.persistence.v1.ChildExecutionInfoR\x05value:\x028\x01\x1a|\n" + + "\x17RequestCancelInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12K\n" + + "\x05value\x18\x02 \x01(\v25.temporal.server.api.persistence.v1.RequestCancelInfoR\x05value:\x028\x01\x1an\n" + + "\x10SignalInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12D\n" + + "\x05value\x18\x02 \x01(\v2..temporal.server.api.persistence.v1.SignalInfoR\x05value:\x028\x01\x1al\n" + + "\x0fChasmNodesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12C\n" + + "\x05value\x18\x02 \x01(\v2-.temporal.server.api.persistence.v1.ChasmNodeR\x05value:\x028\x01\"\xcd\x16\n" + + "\x1cWorkflowMutableStateMutation\x12\x90\x01\n" + + "\x16updated_activity_infos\x18\x01 \x03(\v2Z.temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedActivityInfosEntryR\x14updatedActivityInfos\x12\x87\x01\n" + + "\x13updated_timer_infos\x18\x02 \x03(\v2W.temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedTimerInfosEntryR\x11updatedTimerInfos\x12\xa3\x01\n" + + "\x1dupdated_child_execution_infos\x18\x03 \x03(\v2`.temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedChildExecutionInfosEntryR\x1aupdatedChildExecutionInfos\x12\xa0\x01\n" + + "\x1cupdated_request_cancel_infos\x18\x04 \x03(\v2_.temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedRequestCancelInfosEntryR\x19updatedRequestCancelInfos\x12\x8a\x01\n" + + "\x14updated_signal_infos\x18\x05 \x03(\v2X.temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedSignalInfosEntryR\x12updatedSignalInfos\x12\x8a\x01\n" + + "\x14updated_update_infos\x18\x06 \x03(\v2X.temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedUpdateInfosEntryR\x12updatedUpdateInfos\x12\x96\x01\n" + + "\x1aupdated_sub_state_machines\x18\a \x03(\v2Y.temporal.server.api.persistence.v1.WorkflowMutableStateMutation.StateMachineNodeMutationR\x17updatedSubStateMachines\x12\x87\x01\n" + + "\x13updated_chasm_nodes\x18\x13 \x03(\v2W.temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedChasmNodesEntryR\x11updatedChasmNodes\x120\n" + + "\x14signal_requested_ids\x18\x0f \x03(\tR\x12signalRequestedIds\x12`\n" + + "\x0eexecution_info\x18\x10 \x01(\v29.temporal.server.api.persistence.v1.WorkflowExecutionInfoR\rexecutionInfo\x12c\n" + + "\x0fexecution_state\x18\x11 \x01(\v2:.temporal.server.api.persistence.v1.WorkflowExecutionStateR\x0eexecutionState\x12\x8c\x01\n" + + "#sub_state_machine_tombstone_batches\x18\x12 \x03(\v2>.temporal.server.api.persistence.v1.StateMachineTombstoneBatchR\x1fsubStateMachineTombstoneBatches\x1a\xf6\x02\n" + + "\x18StateMachineNodeMutation\x12H\n" + + "\x04path\x18\x01 \x01(\v24.temporal.server.api.persistence.v1.StateMachinePathR\x04path\x12\x12\n" + + "\x04data\x18\x02 \x01(\fR\x04data\x12y\n" + + "\x1cinitial_versioned_transition\x18\x03 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1ainitialVersionedTransition\x12\x80\x01\n" + + " last_update_versioned_transition\x18\x04 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x1dlastUpdateVersionedTransition\x1ay\n" + + "\x19UpdatedActivityInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12F\n" + + "\x05value\x18\x02 \x01(\v20.temporal.server.api.persistence.v1.ActivityInfoR\x05value:\x028\x01\x1as\n" + + "\x16UpdatedTimerInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12C\n" + + "\x05value\x18\x02 \x01(\v2-.temporal.server.api.persistence.v1.TimerInfoR\x05value:\x028\x01\x1a\x85\x01\n" + + "\x1fUpdatedChildExecutionInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12L\n" + + "\x05value\x18\x02 \x01(\v26.temporal.server.api.persistence.v1.ChildExecutionInfoR\x05value:\x028\x01\x1a\x83\x01\n" + + "\x1eUpdatedRequestCancelInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12K\n" + + "\x05value\x18\x02 \x01(\v25.temporal.server.api.persistence.v1.RequestCancelInfoR\x05value:\x028\x01\x1au\n" + + "\x17UpdatedSignalInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x12D\n" + + "\x05value\x18\x02 \x01(\v2..temporal.server.api.persistence.v1.SignalInfoR\x05value:\x028\x01\x1au\n" + + "\x17UpdatedUpdateInfosEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12D\n" + + "\x05value\x18\x02 \x01(\v2..temporal.server.api.persistence.v1.UpdateInfoR\x05value:\x028\x01\x1as\n" + + "\x16UpdatedChasmNodesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12C\n" + + "\x05value\x18\x02 \x01(\v2-.temporal.server.api.persistence.v1.ChasmNodeR\x05value:\x028\x01J\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "J\x04\b\n" + + "\x10\vJ\x04\b\v\x10\fJ\x04\b\f\x10\rJ\x04\b\r\x10\x0eJ\x04\b\x0e\x10\x0fB6Z4go.temporal.io/server/api/persistence/v1;persistenceb\x06proto3" + var ( file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescOnce sync.Once - file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescData = file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc + file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescData []byte ) func file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescGZIP() []byte { file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescOnce.Do(func() { - file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescData) + file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc), len(file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc))) }) return file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDescData } -var file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes = make([]protoimpl.MessageInfo, 6) -var file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_goTypes = []interface{}{ - (*WorkflowMutableState)(nil), // 0: temporal.server.api.persistence.v1.WorkflowMutableState - nil, // 1: temporal.server.api.persistence.v1.WorkflowMutableState.ActivityInfosEntry - nil, // 2: temporal.server.api.persistence.v1.WorkflowMutableState.TimerInfosEntry - nil, // 3: temporal.server.api.persistence.v1.WorkflowMutableState.ChildExecutionInfosEntry - nil, // 4: temporal.server.api.persistence.v1.WorkflowMutableState.RequestCancelInfosEntry - nil, // 5: temporal.server.api.persistence.v1.WorkflowMutableState.SignalInfosEntry - (*WorkflowExecutionInfo)(nil), // 6: temporal.server.api.persistence.v1.WorkflowExecutionInfo - (*WorkflowExecutionState)(nil), // 7: temporal.server.api.persistence.v1.WorkflowExecutionState - (*v1.HistoryEvent)(nil), // 8: temporal.api.history.v1.HistoryEvent - (*Checksum)(nil), // 9: temporal.server.api.persistence.v1.Checksum - (*ActivityInfo)(nil), // 10: temporal.server.api.persistence.v1.ActivityInfo - (*TimerInfo)(nil), // 11: temporal.server.api.persistence.v1.TimerInfo - (*ChildExecutionInfo)(nil), // 12: temporal.server.api.persistence.v1.ChildExecutionInfo - (*RequestCancelInfo)(nil), // 13: temporal.server.api.persistence.v1.RequestCancelInfo - (*SignalInfo)(nil), // 14: temporal.server.api.persistence.v1.SignalInfo +var file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_goTypes = []any{ + (*WorkflowMutableState)(nil), // 0: temporal.server.api.persistence.v1.WorkflowMutableState + (*WorkflowMutableStateMutation)(nil), // 1: temporal.server.api.persistence.v1.WorkflowMutableStateMutation + nil, // 2: temporal.server.api.persistence.v1.WorkflowMutableState.ActivityInfosEntry + nil, // 3: temporal.server.api.persistence.v1.WorkflowMutableState.TimerInfosEntry + nil, // 4: temporal.server.api.persistence.v1.WorkflowMutableState.ChildExecutionInfosEntry + nil, // 5: temporal.server.api.persistence.v1.WorkflowMutableState.RequestCancelInfosEntry + nil, // 6: temporal.server.api.persistence.v1.WorkflowMutableState.SignalInfosEntry + nil, // 7: temporal.server.api.persistence.v1.WorkflowMutableState.ChasmNodesEntry + (*WorkflowMutableStateMutation_StateMachineNodeMutation)(nil), // 8: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.StateMachineNodeMutation + nil, // 9: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedActivityInfosEntry + nil, // 10: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedTimerInfosEntry + nil, // 11: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedChildExecutionInfosEntry + nil, // 12: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedRequestCancelInfosEntry + nil, // 13: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedSignalInfosEntry + nil, // 14: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedUpdateInfosEntry + nil, // 15: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedChasmNodesEntry + (*WorkflowExecutionInfo)(nil), // 16: temporal.server.api.persistence.v1.WorkflowExecutionInfo + (*WorkflowExecutionState)(nil), // 17: temporal.server.api.persistence.v1.WorkflowExecutionState + (*v1.HistoryEvent)(nil), // 18: temporal.api.history.v1.HistoryEvent + (*Checksum)(nil), // 19: temporal.server.api.persistence.v1.Checksum + (*StateMachineTombstoneBatch)(nil), // 20: temporal.server.api.persistence.v1.StateMachineTombstoneBatch + (*ActivityInfo)(nil), // 21: temporal.server.api.persistence.v1.ActivityInfo + (*TimerInfo)(nil), // 22: temporal.server.api.persistence.v1.TimerInfo + (*ChildExecutionInfo)(nil), // 23: temporal.server.api.persistence.v1.ChildExecutionInfo + (*RequestCancelInfo)(nil), // 24: temporal.server.api.persistence.v1.RequestCancelInfo + (*SignalInfo)(nil), // 25: temporal.server.api.persistence.v1.SignalInfo + (*ChasmNode)(nil), // 26: temporal.server.api.persistence.v1.ChasmNode + (*StateMachinePath)(nil), // 27: temporal.server.api.persistence.v1.StateMachinePath + (*VersionedTransition)(nil), // 28: temporal.server.api.persistence.v1.VersionedTransition + (*UpdateInfo)(nil), // 29: temporal.server.api.persistence.v1.UpdateInfo } var file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_depIdxs = []int32{ - 1, // 0: temporal.server.api.persistence.v1.WorkflowMutableState.activity_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.ActivityInfosEntry - 2, // 1: temporal.server.api.persistence.v1.WorkflowMutableState.timer_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.TimerInfosEntry - 3, // 2: temporal.server.api.persistence.v1.WorkflowMutableState.child_execution_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.ChildExecutionInfosEntry - 4, // 3: temporal.server.api.persistence.v1.WorkflowMutableState.request_cancel_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.RequestCancelInfosEntry - 5, // 4: temporal.server.api.persistence.v1.WorkflowMutableState.signal_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.SignalInfosEntry - 6, // 5: temporal.server.api.persistence.v1.WorkflowMutableState.execution_info:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo - 7, // 6: temporal.server.api.persistence.v1.WorkflowMutableState.execution_state:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionState - 8, // 7: temporal.server.api.persistence.v1.WorkflowMutableState.buffered_events:type_name -> temporal.api.history.v1.HistoryEvent - 9, // 8: temporal.server.api.persistence.v1.WorkflowMutableState.checksum:type_name -> temporal.server.api.persistence.v1.Checksum - 10, // 9: temporal.server.api.persistence.v1.WorkflowMutableState.ActivityInfosEntry.value:type_name -> temporal.server.api.persistence.v1.ActivityInfo - 11, // 10: temporal.server.api.persistence.v1.WorkflowMutableState.TimerInfosEntry.value:type_name -> temporal.server.api.persistence.v1.TimerInfo - 12, // 11: temporal.server.api.persistence.v1.WorkflowMutableState.ChildExecutionInfosEntry.value:type_name -> temporal.server.api.persistence.v1.ChildExecutionInfo - 13, // 12: temporal.server.api.persistence.v1.WorkflowMutableState.RequestCancelInfosEntry.value:type_name -> temporal.server.api.persistence.v1.RequestCancelInfo - 14, // 13: temporal.server.api.persistence.v1.WorkflowMutableState.SignalInfosEntry.value:type_name -> temporal.server.api.persistence.v1.SignalInfo - 14, // [14:14] is the sub-list for method output_type - 14, // [14:14] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 2, // 0: temporal.server.api.persistence.v1.WorkflowMutableState.activity_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.ActivityInfosEntry + 3, // 1: temporal.server.api.persistence.v1.WorkflowMutableState.timer_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.TimerInfosEntry + 4, // 2: temporal.server.api.persistence.v1.WorkflowMutableState.child_execution_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.ChildExecutionInfosEntry + 5, // 3: temporal.server.api.persistence.v1.WorkflowMutableState.request_cancel_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.RequestCancelInfosEntry + 6, // 4: temporal.server.api.persistence.v1.WorkflowMutableState.signal_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.SignalInfosEntry + 7, // 5: temporal.server.api.persistence.v1.WorkflowMutableState.chasm_nodes:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState.ChasmNodesEntry + 16, // 6: temporal.server.api.persistence.v1.WorkflowMutableState.execution_info:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo + 17, // 7: temporal.server.api.persistence.v1.WorkflowMutableState.execution_state:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionState + 18, // 8: temporal.server.api.persistence.v1.WorkflowMutableState.buffered_events:type_name -> temporal.api.history.v1.HistoryEvent + 19, // 9: temporal.server.api.persistence.v1.WorkflowMutableState.checksum:type_name -> temporal.server.api.persistence.v1.Checksum + 9, // 10: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.updated_activity_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedActivityInfosEntry + 10, // 11: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.updated_timer_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedTimerInfosEntry + 11, // 12: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.updated_child_execution_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedChildExecutionInfosEntry + 12, // 13: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.updated_request_cancel_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedRequestCancelInfosEntry + 13, // 14: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.updated_signal_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedSignalInfosEntry + 14, // 15: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.updated_update_infos:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedUpdateInfosEntry + 8, // 16: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.updated_sub_state_machines:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation.StateMachineNodeMutation + 15, // 17: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.updated_chasm_nodes:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedChasmNodesEntry + 16, // 18: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.execution_info:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionInfo + 17, // 19: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.execution_state:type_name -> temporal.server.api.persistence.v1.WorkflowExecutionState + 20, // 20: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.sub_state_machine_tombstone_batches:type_name -> temporal.server.api.persistence.v1.StateMachineTombstoneBatch + 21, // 21: temporal.server.api.persistence.v1.WorkflowMutableState.ActivityInfosEntry.value:type_name -> temporal.server.api.persistence.v1.ActivityInfo + 22, // 22: temporal.server.api.persistence.v1.WorkflowMutableState.TimerInfosEntry.value:type_name -> temporal.server.api.persistence.v1.TimerInfo + 23, // 23: temporal.server.api.persistence.v1.WorkflowMutableState.ChildExecutionInfosEntry.value:type_name -> temporal.server.api.persistence.v1.ChildExecutionInfo + 24, // 24: temporal.server.api.persistence.v1.WorkflowMutableState.RequestCancelInfosEntry.value:type_name -> temporal.server.api.persistence.v1.RequestCancelInfo + 25, // 25: temporal.server.api.persistence.v1.WorkflowMutableState.SignalInfosEntry.value:type_name -> temporal.server.api.persistence.v1.SignalInfo + 26, // 26: temporal.server.api.persistence.v1.WorkflowMutableState.ChasmNodesEntry.value:type_name -> temporal.server.api.persistence.v1.ChasmNode + 27, // 27: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.StateMachineNodeMutation.path:type_name -> temporal.server.api.persistence.v1.StateMachinePath + 28, // 28: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.StateMachineNodeMutation.initial_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 28, // 29: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.StateMachineNodeMutation.last_update_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 21, // 30: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedActivityInfosEntry.value:type_name -> temporal.server.api.persistence.v1.ActivityInfo + 22, // 31: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedTimerInfosEntry.value:type_name -> temporal.server.api.persistence.v1.TimerInfo + 23, // 32: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedChildExecutionInfosEntry.value:type_name -> temporal.server.api.persistence.v1.ChildExecutionInfo + 24, // 33: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedRequestCancelInfosEntry.value:type_name -> temporal.server.api.persistence.v1.RequestCancelInfo + 25, // 34: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedSignalInfosEntry.value:type_name -> temporal.server.api.persistence.v1.SignalInfo + 29, // 35: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedUpdateInfosEntry.value:type_name -> temporal.server.api.persistence.v1.UpdateInfo + 26, // 36: temporal.server.api.persistence.v1.WorkflowMutableStateMutation.UpdatedChasmNodesEntry.value:type_name -> temporal.server.api.persistence.v1.ChasmNode + 37, // [37:37] is the sub-list for method output_type + 37, // [37:37] is the sub-list for method input_type + 37, // [37:37] is the sub-list for extension type_name + 37, // [37:37] is the sub-list for extension extendee + 0, // [0:37] is the sub-list for field type_name } func init() { file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_init() } @@ -353,28 +536,17 @@ func file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_init() if File_temporal_server_api_persistence_v1_workflow_mutable_state_proto != nil { return } + file_temporal_server_api_persistence_v1_chasm_proto_init() file_temporal_server_api_persistence_v1_executions_proto_init() - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkflowMutableState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } + file_temporal_server_api_persistence_v1_hsm_proto_init() + file_temporal_server_api_persistence_v1_update_proto_init() type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc), len(file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc)), NumEnums: 0, - NumMessages: 6, + NumMessages: 16, NumExtensions: 0, NumServices: 0, }, @@ -383,7 +555,6 @@ func file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_init() MessageInfos: file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_msgTypes, }.Build() File_temporal_server_api_persistence_v1_workflow_mutable_state_proto = out.File - file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_rawDesc = nil file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_goTypes = nil file_temporal_server_api_persistence_v1_workflow_mutable_state_proto_depIdxs = nil } diff --git a/api/replication/v1/message.go-helpers.pb.go b/api/replication/v1/message.go-helpers.pb.go index fd5138e7055..a4b44907168 100644 --- a/api/replication/v1/message.go-helpers.pb.go +++ b/api/replication/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package repication @@ -177,6 +153,43 @@ func (this *SyncReplicationState) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type ReplicationState to the protobuf v3 wire format +func (val *ReplicationState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ReplicationState from the protobuf v3 wire format +func (val *ReplicationState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ReplicationState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ReplicationState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ReplicationState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ReplicationState + switch t := that.(type) { + case *ReplicationState: + that1 = t + case ReplicationState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type ReplicationMessages to the protobuf v3 wire format func (val *ReplicationMessages) Marshal() ([]byte, error) { return proto.Marshal(val) @@ -509,3 +522,336 @@ func (this *TaskQueueUserDataAttributes) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type SyncHSMAttributes to the protobuf v3 wire format +func (val *SyncHSMAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncHSMAttributes from the protobuf v3 wire format +func (val *SyncHSMAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncHSMAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncHSMAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncHSMAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncHSMAttributes + switch t := that.(type) { + case *SyncHSMAttributes: + that1 = t + case SyncHSMAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type BackfillHistoryTaskAttributes to the protobuf v3 wire format +func (val *BackfillHistoryTaskAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type BackfillHistoryTaskAttributes from the protobuf v3 wire format +func (val *BackfillHistoryTaskAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *BackfillHistoryTaskAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two BackfillHistoryTaskAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *BackfillHistoryTaskAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *BackfillHistoryTaskAttributes + switch t := that.(type) { + case *BackfillHistoryTaskAttributes: + that1 = t + case BackfillHistoryTaskAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type NewRunInfo to the protobuf v3 wire format +func (val *NewRunInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NewRunInfo from the protobuf v3 wire format +func (val *NewRunInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NewRunInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NewRunInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NewRunInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NewRunInfo + switch t := that.(type) { + case *NewRunInfo: + that1 = t + case NewRunInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncWorkflowStateMutationAttributes to the protobuf v3 wire format +func (val *SyncWorkflowStateMutationAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncWorkflowStateMutationAttributes from the protobuf v3 wire format +func (val *SyncWorkflowStateMutationAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncWorkflowStateMutationAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncWorkflowStateMutationAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncWorkflowStateMutationAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncWorkflowStateMutationAttributes + switch t := that.(type) { + case *SyncWorkflowStateMutationAttributes: + that1 = t + case SyncWorkflowStateMutationAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncWorkflowStateSnapshotAttributes to the protobuf v3 wire format +func (val *SyncWorkflowStateSnapshotAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncWorkflowStateSnapshotAttributes from the protobuf v3 wire format +func (val *SyncWorkflowStateSnapshotAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncWorkflowStateSnapshotAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncWorkflowStateSnapshotAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncWorkflowStateSnapshotAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncWorkflowStateSnapshotAttributes + switch t := that.(type) { + case *SyncWorkflowStateSnapshotAttributes: + that1 = t + case SyncWorkflowStateSnapshotAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type VerifyVersionedTransitionTaskAttributes to the protobuf v3 wire format +func (val *VerifyVersionedTransitionTaskAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type VerifyVersionedTransitionTaskAttributes from the protobuf v3 wire format +func (val *VerifyVersionedTransitionTaskAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *VerifyVersionedTransitionTaskAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two VerifyVersionedTransitionTaskAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *VerifyVersionedTransitionTaskAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *VerifyVersionedTransitionTaskAttributes + switch t := that.(type) { + case *VerifyVersionedTransitionTaskAttributes: + that1 = t + case VerifyVersionedTransitionTaskAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SyncVersionedTransitionTaskAttributes to the protobuf v3 wire format +func (val *SyncVersionedTransitionTaskAttributes) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SyncVersionedTransitionTaskAttributes from the protobuf v3 wire format +func (val *SyncVersionedTransitionTaskAttributes) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SyncVersionedTransitionTaskAttributes) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SyncVersionedTransitionTaskAttributes values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SyncVersionedTransitionTaskAttributes) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SyncVersionedTransitionTaskAttributes + switch t := that.(type) { + case *SyncVersionedTransitionTaskAttributes: + that1 = t + case SyncVersionedTransitionTaskAttributes: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type VersionedTransitionArtifact to the protobuf v3 wire format +func (val *VersionedTransitionArtifact) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type VersionedTransitionArtifact from the protobuf v3 wire format +func (val *VersionedTransitionArtifact) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *VersionedTransitionArtifact) Size() int { + return proto.Size(val) +} + +// Equal returns whether two VersionedTransitionArtifact values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *VersionedTransitionArtifact) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *VersionedTransitionArtifact + switch t := that.(type) { + case *VersionedTransitionArtifact: + that1 = t + case VersionedTransitionArtifact: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MigrationExecutionInfo to the protobuf v3 wire format +func (val *MigrationExecutionInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MigrationExecutionInfo from the protobuf v3 wire format +func (val *MigrationExecutionInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MigrationExecutionInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MigrationExecutionInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MigrationExecutionInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MigrationExecutionInfo + switch t := that.(type) { + case *MigrationExecutionInfo: + that1 = t + case MigrationExecutionInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/replication/v1/message.pb.go b/api/replication/v1/message.pb.go index 53b1e46ce3d..1fdcca398b9 100644 --- a/api/replication/v1/message.pb.go +++ b/api/replication/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,17 +9,19 @@ package repication import ( reflect "reflect" sync "sync" + unsafe "unsafe" v11 "go.temporal.io/api/common/v1" - v14 "go.temporal.io/api/failure/v1" - v12 "go.temporal.io/api/namespace/v1" - v13 "go.temporal.io/api/replication/v1" + v15 "go.temporal.io/api/failure/v1" + v13 "go.temporal.io/api/namespace/v1" + v14 "go.temporal.io/api/replication/v1" v1 "go.temporal.io/server/api/enums/v1" - v15 "go.temporal.io/server/api/history/v1" - v17 "go.temporal.io/server/api/persistence/v1" - v16 "go.temporal.io/server/api/workflow/v1" + v16 "go.temporal.io/server/api/history/v1" + v12 "go.temporal.io/server/api/persistence/v1" + v17 "go.temporal.io/server/api/workflow/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) @@ -53,13 +33,10 @@ const ( ) type ReplicationTask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` TaskType v1.ReplicationTaskType `protobuf:"varint,1,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.ReplicationTaskType" json:"task_type,omitempty"` SourceTaskId int64 `protobuf:"varint,2,opt,name=source_task_id,json=sourceTaskId,proto3" json:"source_task_id,omitempty"` - // Types that are assignable to Attributes: + // Types that are valid to be assigned to Attributes: // // *ReplicationTask_NamespaceTaskAttributes // *ReplicationTask_SyncShardStatusTaskAttributes @@ -67,20 +44,27 @@ type ReplicationTask struct { // *ReplicationTask_HistoryTaskAttributes // *ReplicationTask_SyncWorkflowStateTaskAttributes // *ReplicationTask_TaskQueueUserDataAttributes + // *ReplicationTask_SyncHsmAttributes + // *ReplicationTask_BackfillHistoryTaskAttributes + // *ReplicationTask_VerifyVersionedTransitionTaskAttributes + // *ReplicationTask_SyncVersionedTransitionTaskAttributes Attributes isReplicationTask_Attributes `protobuf_oneof:"attributes"` // All attributes should be deprecated and replaced by this field. // The task_type + data provide more flexibility in future use cases. - Data *v11.DataBlob `protobuf:"bytes,12,opt,name=data,proto3" json:"data,omitempty"` - VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + Data *v11.DataBlob `protobuf:"bytes,12,opt,name=data,proto3" json:"data,omitempty"` + VisibilityTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=visibility_time,json=visibilityTime,proto3" json:"visibility_time,omitempty"` + Priority v1.TaskPriority `protobuf:"varint,13,opt,name=priority,proto3,enum=temporal.server.api.enums.v1.TaskPriority" json:"priority,omitempty"` + VersionedTransition *v12.VersionedTransition `protobuf:"bytes,15,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + RawTaskInfo *v12.ReplicationTaskInfo `protobuf:"bytes,17,opt,name=raw_task_info,json=rawTaskInfo,proto3" json:"raw_task_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReplicationTask) Reset() { *x = ReplicationTask{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReplicationTask) String() string { @@ -91,7 +75,7 @@ func (*ReplicationTask) ProtoMessage() {} func (x *ReplicationTask) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -120,51 +104,99 @@ func (x *ReplicationTask) GetSourceTaskId() int64 { return 0 } -func (m *ReplicationTask) GetAttributes() isReplicationTask_Attributes { - if m != nil { - return m.Attributes +func (x *ReplicationTask) GetAttributes() isReplicationTask_Attributes { + if x != nil { + return x.Attributes } return nil } func (x *ReplicationTask) GetNamespaceTaskAttributes() *NamespaceTaskAttributes { - if x, ok := x.GetAttributes().(*ReplicationTask_NamespaceTaskAttributes); ok { - return x.NamespaceTaskAttributes + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_NamespaceTaskAttributes); ok { + return x.NamespaceTaskAttributes + } } return nil } func (x *ReplicationTask) GetSyncShardStatusTaskAttributes() *SyncShardStatusTaskAttributes { - if x, ok := x.GetAttributes().(*ReplicationTask_SyncShardStatusTaskAttributes); ok { - return x.SyncShardStatusTaskAttributes + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_SyncShardStatusTaskAttributes); ok { + return x.SyncShardStatusTaskAttributes + } } return nil } func (x *ReplicationTask) GetSyncActivityTaskAttributes() *SyncActivityTaskAttributes { - if x, ok := x.GetAttributes().(*ReplicationTask_SyncActivityTaskAttributes); ok { - return x.SyncActivityTaskAttributes + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_SyncActivityTaskAttributes); ok { + return x.SyncActivityTaskAttributes + } } return nil } func (x *ReplicationTask) GetHistoryTaskAttributes() *HistoryTaskAttributes { - if x, ok := x.GetAttributes().(*ReplicationTask_HistoryTaskAttributes); ok { - return x.HistoryTaskAttributes + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_HistoryTaskAttributes); ok { + return x.HistoryTaskAttributes + } } return nil } func (x *ReplicationTask) GetSyncWorkflowStateTaskAttributes() *SyncWorkflowStateTaskAttributes { - if x, ok := x.GetAttributes().(*ReplicationTask_SyncWorkflowStateTaskAttributes); ok { - return x.SyncWorkflowStateTaskAttributes + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_SyncWorkflowStateTaskAttributes); ok { + return x.SyncWorkflowStateTaskAttributes + } } return nil } func (x *ReplicationTask) GetTaskQueueUserDataAttributes() *TaskQueueUserDataAttributes { - if x, ok := x.GetAttributes().(*ReplicationTask_TaskQueueUserDataAttributes); ok { - return x.TaskQueueUserDataAttributes + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_TaskQueueUserDataAttributes); ok { + return x.TaskQueueUserDataAttributes + } + } + return nil +} + +func (x *ReplicationTask) GetSyncHsmAttributes() *SyncHSMAttributes { + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_SyncHsmAttributes); ok { + return x.SyncHsmAttributes + } + } + return nil +} + +func (x *ReplicationTask) GetBackfillHistoryTaskAttributes() *BackfillHistoryTaskAttributes { + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_BackfillHistoryTaskAttributes); ok { + return x.BackfillHistoryTaskAttributes + } + } + return nil +} + +func (x *ReplicationTask) GetVerifyVersionedTransitionTaskAttributes() *VerifyVersionedTransitionTaskAttributes { + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_VerifyVersionedTransitionTaskAttributes); ok { + return x.VerifyVersionedTransitionTaskAttributes + } + } + return nil +} + +func (x *ReplicationTask) GetSyncVersionedTransitionTaskAttributes() *SyncVersionedTransitionTaskAttributes { + if x != nil { + if x, ok := x.Attributes.(*ReplicationTask_SyncVersionedTransitionTaskAttributes); ok { + return x.SyncVersionedTransitionTaskAttributes + } } return nil } @@ -183,6 +215,27 @@ func (x *ReplicationTask) GetVisibilityTime() *timestamppb.Timestamp { return nil } +func (x *ReplicationTask) GetPriority() v1.TaskPriority { + if x != nil { + return x.Priority + } + return v1.TaskPriority(0) +} + +func (x *ReplicationTask) GetVersionedTransition() *v12.VersionedTransition { + if x != nil { + return x.VersionedTransition + } + return nil +} + +func (x *ReplicationTask) GetRawTaskInfo() *v12.ReplicationTaskInfo { + if x != nil { + return x.RawTaskInfo + } + return nil +} + type isReplicationTask_Attributes interface { isReplicationTask_Attributes() } @@ -211,6 +264,22 @@ type ReplicationTask_TaskQueueUserDataAttributes struct { TaskQueueUserDataAttributes *TaskQueueUserDataAttributes `protobuf:"bytes,11,opt,name=task_queue_user_data_attributes,json=taskQueueUserDataAttributes,proto3,oneof"` } +type ReplicationTask_SyncHsmAttributes struct { + SyncHsmAttributes *SyncHSMAttributes `protobuf:"bytes,14,opt,name=sync_hsm_attributes,json=syncHsmAttributes,proto3,oneof"` +} + +type ReplicationTask_BackfillHistoryTaskAttributes struct { + BackfillHistoryTaskAttributes *BackfillHistoryTaskAttributes `protobuf:"bytes,16,opt,name=backfill_history_task_attributes,json=backfillHistoryTaskAttributes,proto3,oneof"` +} + +type ReplicationTask_VerifyVersionedTransitionTaskAttributes struct { + VerifyVersionedTransitionTaskAttributes *VerifyVersionedTransitionTaskAttributes `protobuf:"bytes,18,opt,name=verify_versioned_transition_task_attributes,json=verifyVersionedTransitionTaskAttributes,proto3,oneof"` +} + +type ReplicationTask_SyncVersionedTransitionTaskAttributes struct { + SyncVersionedTransitionTaskAttributes *SyncVersionedTransitionTaskAttributes `protobuf:"bytes,19,opt,name=sync_versioned_transition_task_attributes,json=syncVersionedTransitionTaskAttributes,proto3,oneof"` +} + func (*ReplicationTask_NamespaceTaskAttributes) isReplicationTask_Attributes() {} func (*ReplicationTask_SyncShardStatusTaskAttributes) isReplicationTask_Attributes() {} @@ -223,12 +292,17 @@ func (*ReplicationTask_SyncWorkflowStateTaskAttributes) isReplicationTask_Attrib func (*ReplicationTask_TaskQueueUserDataAttributes) isReplicationTask_Attributes() {} -type ReplicationToken struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (*ReplicationTask_SyncHsmAttributes) isReplicationTask_Attributes() {} + +func (*ReplicationTask_BackfillHistoryTaskAttributes) isReplicationTask_Attributes() {} + +func (*ReplicationTask_VerifyVersionedTransitionTaskAttributes) isReplicationTask_Attributes() {} - ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` +func (*ReplicationTask_SyncVersionedTransitionTaskAttributes) isReplicationTask_Attributes() {} + +type ReplicationToken struct { + state protoimpl.MessageState `protogen:"open.v1"` + ShardId int32 `protobuf:"varint,1,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` // lastRetrievedMessageId is where the next fetch should begin with. LastRetrievedMessageId int64 `protobuf:"varint,2,opt,name=last_retrieved_message_id,json=lastRetrievedMessageId,proto3" json:"last_retrieved_message_id,omitempty"` // lastProcessedMessageId is the last messageId that is processed on the passive side. @@ -236,15 +310,15 @@ type ReplicationToken struct { LastProcessedMessageId int64 `protobuf:"varint,3,opt,name=last_processed_message_id,json=lastProcessedMessageId,proto3" json:"last_processed_message_id,omitempty"` // The VisibilityTime of last processed ReplicationTask LastProcessedVisibilityTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_processed_visibility_time,json=lastProcessedVisibilityTime,proto3" json:"last_processed_visibility_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReplicationToken) Reset() { *x = ReplicationToken{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReplicationToken) String() string { @@ -255,7 +329,7 @@ func (*ReplicationToken) ProtoMessage() {} func (x *ReplicationToken) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -299,20 +373,17 @@ func (x *ReplicationToken) GetLastProcessedVisibilityTime() *timestamppb.Timesta } type SyncShardStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + StatusTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=status_time,json=statusTime,proto3" json:"status_time,omitempty"` unknownFields protoimpl.UnknownFields - - StatusTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=status_time,json=statusTime,proto3" json:"status_time,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SyncShardStatus) Reset() { *x = SyncShardStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SyncShardStatus) String() string { @@ -323,7 +394,7 @@ func (*SyncShardStatus) ProtoMessage() {} func (x *SyncShardStatus) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -346,21 +417,22 @@ func (x *SyncShardStatus) GetStatusTime() *timestamppb.Timestamp { } type SyncReplicationState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - InclusiveLowWatermark int64 `protobuf:"varint,1,opt,name=inclusive_low_watermark,json=inclusiveLowWatermark,proto3" json:"inclusive_low_watermark,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + // deprecated in favor of using ReplicationState object + InclusiveLowWatermark int64 `protobuf:"varint,1,opt,name=inclusive_low_watermark,json=inclusiveLowWatermark,proto3" json:"inclusive_low_watermark,omitempty"` + // deprecated in favor of using ReplicationState object InclusiveLowWatermarkTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=inclusive_low_watermark_time,json=inclusiveLowWatermarkTime,proto3" json:"inclusive_low_watermark_time,omitempty"` + HighPriorityState *ReplicationState `protobuf:"bytes,3,opt,name=high_priority_state,json=highPriorityState,proto3" json:"high_priority_state,omitempty"` + LowPriorityState *ReplicationState `protobuf:"bytes,4,opt,name=low_priority_state,json=lowPriorityState,proto3" json:"low_priority_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SyncReplicationState) Reset() { *x = SyncReplicationState{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SyncReplicationState) String() string { @@ -371,7 +443,7 @@ func (*SyncReplicationState) ProtoMessage() {} func (x *SyncReplicationState) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -400,26 +472,97 @@ func (x *SyncReplicationState) GetInclusiveLowWatermarkTime() *timestamppb.Times return nil } -type ReplicationMessages struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SyncReplicationState) GetHighPriorityState() *ReplicationState { + if x != nil { + return x.HighPriorityState + } + return nil +} + +func (x *SyncReplicationState) GetLowPriorityState() *ReplicationState { + if x != nil { + return x.LowPriorityState + } + return nil +} + +type ReplicationState struct { + state protoimpl.MessageState `protogen:"open.v1"` + InclusiveLowWatermark int64 `protobuf:"varint,1,opt,name=inclusive_low_watermark,json=inclusiveLowWatermark,proto3" json:"inclusive_low_watermark,omitempty"` + InclusiveLowWatermarkTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=inclusive_low_watermark_time,json=inclusiveLowWatermarkTime,proto3" json:"inclusive_low_watermark_time,omitempty"` + FlowControlCommand v1.ReplicationFlowControlCommand `protobuf:"varint,3,opt,name=flow_control_command,json=flowControlCommand,proto3,enum=temporal.server.api.enums.v1.ReplicationFlowControlCommand" json:"flow_control_command,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ReplicationState) Reset() { + *x = ReplicationState{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ReplicationState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReplicationState) ProtoMessage() {} + +func (x *ReplicationState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReplicationState.ProtoReflect.Descriptor instead. +func (*ReplicationState) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{4} +} + +func (x *ReplicationState) GetInclusiveLowWatermark() int64 { + if x != nil { + return x.InclusiveLowWatermark + } + return 0 +} + +func (x *ReplicationState) GetInclusiveLowWatermarkTime() *timestamppb.Timestamp { + if x != nil { + return x.InclusiveLowWatermarkTime + } + return nil +} + +func (x *ReplicationState) GetFlowControlCommand() v1.ReplicationFlowControlCommand { + if x != nil { + return x.FlowControlCommand + } + return v1.ReplicationFlowControlCommand(0) +} - ReplicationTasks []*ReplicationTask `protobuf:"bytes,1,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` +type ReplicationMessages struct { + state protoimpl.MessageState `protogen:"open.v1"` + ReplicationTasks []*ReplicationTask `protobuf:"bytes,1,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` // This can be different than the last taskId in the above list, because sender can decide to skip tasks (e.g. for completed workflows). LastRetrievedMessageId int64 `protobuf:"varint,2,opt,name=last_retrieved_message_id,json=lastRetrievedMessageId,proto3" json:"last_retrieved_message_id,omitempty"` // Hint for flow control. HasMore bool `protobuf:"varint,3,opt,name=has_more,json=hasMore,proto3" json:"has_more,omitempty"` SyncShardStatus *SyncShardStatus `protobuf:"bytes,4,opt,name=sync_shard_status,json=syncShardStatus,proto3" json:"sync_shard_status,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReplicationMessages) Reset() { *x = ReplicationMessages{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReplicationMessages) String() string { @@ -429,8 +572,8 @@ func (x *ReplicationMessages) String() string { func (*ReplicationMessages) ProtoMessage() {} func (x *ReplicationMessages) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -442,7 +585,7 @@ func (x *ReplicationMessages) ProtoReflect() protoreflect.Message { // Deprecated: Use ReplicationMessages.ProtoReflect.Descriptor instead. func (*ReplicationMessages) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{4} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{5} } func (x *ReplicationMessages) GetReplicationTasks() []*ReplicationTask { @@ -474,23 +617,21 @@ func (x *ReplicationMessages) GetSyncShardStatus() *SyncShardStatus { } type WorkflowReplicationMessages struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ReplicationTasks []*ReplicationTask `protobuf:"bytes,1,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ReplicationTasks []*ReplicationTask `protobuf:"bytes,1,rep,name=replication_tasks,json=replicationTasks,proto3" json:"replication_tasks,omitempty"` // This can be different than the last taskId in the above list, because sender can decide to skip tasks (e.g. for completed workflows). ExclusiveHighWatermark int64 `protobuf:"varint,2,opt,name=exclusive_high_watermark,json=exclusiveHighWatermark,proto3" json:"exclusive_high_watermark,omitempty"` ExclusiveHighWatermarkTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=exclusive_high_watermark_time,json=exclusiveHighWatermarkTime,proto3" json:"exclusive_high_watermark_time,omitempty"` + Priority v1.TaskPriority `protobuf:"varint,4,opt,name=priority,proto3,enum=temporal.server.api.enums.v1.TaskPriority" json:"priority,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *WorkflowReplicationMessages) Reset() { *x = WorkflowReplicationMessages{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *WorkflowReplicationMessages) String() string { @@ -500,8 +641,8 @@ func (x *WorkflowReplicationMessages) String() string { func (*WorkflowReplicationMessages) ProtoMessage() {} func (x *WorkflowReplicationMessages) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -513,7 +654,7 @@ func (x *WorkflowReplicationMessages) ProtoReflect() protoreflect.Message { // Deprecated: Use WorkflowReplicationMessages.ProtoReflect.Descriptor instead. func (*WorkflowReplicationMessages) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{5} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{6} } func (x *WorkflowReplicationMessages) GetReplicationTasks() []*ReplicationTask { @@ -537,29 +678,35 @@ func (x *WorkflowReplicationMessages) GetExclusiveHighWatermarkTime() *timestamp return nil } -type ReplicationTaskInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *WorkflowReplicationMessages) GetPriority() v1.TaskPriority { + if x != nil { + return x.Priority + } + return v1.TaskPriority(0) +} - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` - TaskId int64 `protobuf:"varint,5,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` - Version int64 `protobuf:"varint,6,opt,name=version,proto3" json:"version,omitempty"` - FirstEventId int64 `protobuf:"varint,7,opt,name=first_event_id,json=firstEventId,proto3" json:"first_event_id,omitempty"` - NextEventId int64 `protobuf:"varint,8,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` - ScheduledEventId int64 `protobuf:"varint,9,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` +// TODO: Deprecate this definition, it only used by the deprecated replication DLQ v1 logic +type ReplicationTaskInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + TaskType v1.TaskType `protobuf:"varint,4,opt,name=task_type,json=taskType,proto3,enum=temporal.server.api.enums.v1.TaskType" json:"task_type,omitempty"` + TaskId int64 `protobuf:"varint,5,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + Version int64 `protobuf:"varint,6,opt,name=version,proto3" json:"version,omitempty"` + FirstEventId int64 `protobuf:"varint,7,opt,name=first_event_id,json=firstEventId,proto3" json:"first_event_id,omitempty"` + NextEventId int64 `protobuf:"varint,8,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` + ScheduledEventId int64 `protobuf:"varint,9,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + Priority v1.TaskPriority `protobuf:"varint,10,opt,name=priority,proto3,enum=temporal.server.api.enums.v1.TaskPriority" json:"priority,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReplicationTaskInfo) Reset() { *x = ReplicationTaskInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReplicationTaskInfo) String() string { @@ -569,8 +716,8 @@ func (x *ReplicationTaskInfo) String() string { func (*ReplicationTaskInfo) ProtoMessage() {} func (x *ReplicationTaskInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[7] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -582,7 +729,7 @@ func (x *ReplicationTaskInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use ReplicationTaskInfo.ProtoReflect.Descriptor instead. func (*ReplicationTaskInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{6} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{7} } func (x *ReplicationTaskInfo) GetNamespaceId() string { @@ -648,28 +795,32 @@ func (x *ReplicationTaskInfo) GetScheduledEventId() int64 { return 0 } -type NamespaceTaskAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ReplicationTaskInfo) GetPriority() v1.TaskPriority { + if x != nil { + return x.Priority + } + return v1.TaskPriority(0) +} +type NamespaceTaskAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` NamespaceOperation v1.NamespaceOperation `protobuf:"varint,1,opt,name=namespace_operation,json=namespaceOperation,proto3,enum=temporal.server.api.enums.v1.NamespaceOperation" json:"namespace_operation,omitempty"` Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Info *v12.NamespaceInfo `protobuf:"bytes,3,opt,name=info,proto3" json:"info,omitempty"` - Config *v12.NamespaceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - ReplicationConfig *v13.NamespaceReplicationConfig `protobuf:"bytes,5,opt,name=replication_config,json=replicationConfig,proto3" json:"replication_config,omitempty"` + Info *v13.NamespaceInfo `protobuf:"bytes,3,opt,name=info,proto3" json:"info,omitempty"` + Config *v13.NamespaceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + ReplicationConfig *v14.NamespaceReplicationConfig `protobuf:"bytes,5,opt,name=replication_config,json=replicationConfig,proto3" json:"replication_config,omitempty"` ConfigVersion int64 `protobuf:"varint,6,opt,name=config_version,json=configVersion,proto3" json:"config_version,omitempty"` FailoverVersion int64 `protobuf:"varint,7,opt,name=failover_version,json=failoverVersion,proto3" json:"failover_version,omitempty"` - FailoverHistory []*v13.FailoverStatus `protobuf:"bytes,8,rep,name=failover_history,json=failoverHistory,proto3" json:"failover_history,omitempty"` + FailoverHistory []*v14.FailoverStatus `protobuf:"bytes,8,rep,name=failover_history,json=failoverHistory,proto3" json:"failover_history,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NamespaceTaskAttributes) Reset() { *x = NamespaceTaskAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NamespaceTaskAttributes) String() string { @@ -679,8 +830,8 @@ func (x *NamespaceTaskAttributes) String() string { func (*NamespaceTaskAttributes) ProtoMessage() {} func (x *NamespaceTaskAttributes) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -692,7 +843,7 @@ func (x *NamespaceTaskAttributes) ProtoReflect() protoreflect.Message { // Deprecated: Use NamespaceTaskAttributes.ProtoReflect.Descriptor instead. func (*NamespaceTaskAttributes) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{7} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{8} } func (x *NamespaceTaskAttributes) GetNamespaceOperation() v1.NamespaceOperation { @@ -709,21 +860,21 @@ func (x *NamespaceTaskAttributes) GetId() string { return "" } -func (x *NamespaceTaskAttributes) GetInfo() *v12.NamespaceInfo { +func (x *NamespaceTaskAttributes) GetInfo() *v13.NamespaceInfo { if x != nil { return x.Info } return nil } -func (x *NamespaceTaskAttributes) GetConfig() *v12.NamespaceConfig { +func (x *NamespaceTaskAttributes) GetConfig() *v13.NamespaceConfig { if x != nil { return x.Config } return nil } -func (x *NamespaceTaskAttributes) GetReplicationConfig() *v13.NamespaceReplicationConfig { +func (x *NamespaceTaskAttributes) GetReplicationConfig() *v14.NamespaceReplicationConfig { if x != nil { return x.ReplicationConfig } @@ -744,7 +895,7 @@ func (x *NamespaceTaskAttributes) GetFailoverVersion() int64 { return 0 } -func (x *NamespaceTaskAttributes) GetFailoverHistory() []*v13.FailoverStatus { +func (x *NamespaceTaskAttributes) GetFailoverHistory() []*v14.FailoverStatus { if x != nil { return x.FailoverHistory } @@ -752,22 +903,19 @@ func (x *NamespaceTaskAttributes) GetFailoverHistory() []*v13.FailoverStatus { } type SyncShardStatusTaskAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` SourceCluster string `protobuf:"bytes,1,opt,name=source_cluster,json=sourceCluster,proto3" json:"source_cluster,omitempty"` ShardId int32 `protobuf:"varint,2,opt,name=shard_id,json=shardId,proto3" json:"shard_id,omitempty"` StatusTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=status_time,json=statusTime,proto3" json:"status_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SyncShardStatusTaskAttributes) Reset() { *x = SyncShardStatusTaskAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SyncShardStatusTaskAttributes) String() string { @@ -777,8 +925,8 @@ func (x *SyncShardStatusTaskAttributes) String() string { func (*SyncShardStatusTaskAttributes) ProtoMessage() {} func (x *SyncShardStatusTaskAttributes) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[9] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -790,7 +938,7 @@ func (x *SyncShardStatusTaskAttributes) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncShardStatusTaskAttributes.ProtoReflect.Descriptor instead. func (*SyncShardStatusTaskAttributes) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{8} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{9} } func (x *SyncShardStatusTaskAttributes) GetSourceCluster() string { @@ -815,10 +963,7 @@ func (x *SyncShardStatusTaskAttributes) GetStatusTime() *timestamppb.Timestamp { } type SyncActivityTaskAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` @@ -830,19 +975,38 @@ type SyncActivityTaskAttributes struct { LastHeartbeatTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=last_heartbeat_time,json=lastHeartbeatTime,proto3" json:"last_heartbeat_time,omitempty"` Details *v11.Payloads `protobuf:"bytes,10,opt,name=details,proto3" json:"details,omitempty"` Attempt int32 `protobuf:"varint,11,opt,name=attempt,proto3" json:"attempt,omitempty"` - LastFailure *v14.Failure `protobuf:"bytes,12,opt,name=last_failure,json=lastFailure,proto3" json:"last_failure,omitempty"` + LastFailure *v15.Failure `protobuf:"bytes,12,opt,name=last_failure,json=lastFailure,proto3" json:"last_failure,omitempty"` LastWorkerIdentity string `protobuf:"bytes,13,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` - VersionHistory *v15.VersionHistory `protobuf:"bytes,14,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` - BaseExecutionInfo *v16.BaseExecutionInfo `protobuf:"bytes,15,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` + VersionHistory *v16.VersionHistory `protobuf:"bytes,14,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + BaseExecutionInfo *v17.BaseExecutionInfo `protobuf:"bytes,15,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` + // build ID of the worker who received this activity last time + LastStartedBuildId string `protobuf:"bytes,16,opt,name=last_started_build_id,json=lastStartedBuildId,proto3" json:"last_started_build_id,omitempty"` + // workflows redirect_counter value when this activity started last time + LastStartedRedirectCounter int64 `protobuf:"varint,17,opt,name=last_started_redirect_counter,json=lastStartedRedirectCounter,proto3" json:"last_started_redirect_counter,omitempty"` + // The first time the activity was scheduled. + FirstScheduledTime *timestamppb.Timestamp `protobuf:"bytes,18,opt,name=first_scheduled_time,json=firstScheduledTime,proto3" json:"first_scheduled_time,omitempty"` + // The last time an activity attempt completion was recorded by the server. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // Stamp represents the internal “version” of the activity options and can/will be changed with Activity API. + // It monotonically increments when the activity options are changed. + Stamp int32 `protobuf:"varint,20,opt,name=stamp,proto3" json:"stamp,omitempty"` + // Flag indicating whether the activity is currently paused. + Paused bool `protobuf:"varint,21,opt,name=paused,proto3" json:"paused,omitempty"` + // Retry policy for the activity. It needs to be replicated now, since the activity properties can be updated. + RetryInitialInterval *durationpb.Duration `protobuf:"bytes,22,opt,name=retry_initial_interval,json=retryInitialInterval,proto3" json:"retry_initial_interval,omitempty"` + RetryMaximumInterval *durationpb.Duration `protobuf:"bytes,23,opt,name=retry_maximum_interval,json=retryMaximumInterval,proto3" json:"retry_maximum_interval,omitempty"` + RetryMaximumAttempts int32 `protobuf:"varint,24,opt,name=retry_maximum_attempts,json=retryMaximumAttempts,proto3" json:"retry_maximum_attempts,omitempty"` + RetryBackoffCoefficient float64 `protobuf:"fixed64,25,opt,name=retry_backoff_coefficient,json=retryBackoffCoefficient,proto3" json:"retry_backoff_coefficient,omitempty"` + StartVersion int64 `protobuf:"varint,26,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SyncActivityTaskAttributes) Reset() { *x = SyncActivityTaskAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SyncActivityTaskAttributes) String() string { @@ -852,8 +1016,8 @@ func (x *SyncActivityTaskAttributes) String() string { func (*SyncActivityTaskAttributes) ProtoMessage() {} func (x *SyncActivityTaskAttributes) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[10] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -865,7 +1029,7 @@ func (x *SyncActivityTaskAttributes) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncActivityTaskAttributes.ProtoReflect.Descriptor instead. func (*SyncActivityTaskAttributes) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{9} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{10} } func (x *SyncActivityTaskAttributes) GetNamespaceId() string { @@ -945,7 +1109,7 @@ func (x *SyncActivityTaskAttributes) GetAttempt() int32 { return 0 } -func (x *SyncActivityTaskAttributes) GetLastFailure() *v14.Failure { +func (x *SyncActivityTaskAttributes) GetLastFailure() *v15.Failure { if x != nil { return x.LastFailure } @@ -959,66 +1123,142 @@ func (x *SyncActivityTaskAttributes) GetLastWorkerIdentity() string { return "" } -func (x *SyncActivityTaskAttributes) GetVersionHistory() *v15.VersionHistory { +func (x *SyncActivityTaskAttributes) GetVersionHistory() *v16.VersionHistory { if x != nil { return x.VersionHistory } return nil } -func (x *SyncActivityTaskAttributes) GetBaseExecutionInfo() *v16.BaseExecutionInfo { +func (x *SyncActivityTaskAttributes) GetBaseExecutionInfo() *v17.BaseExecutionInfo { if x != nil { return x.BaseExecutionInfo } return nil } -type HistoryTaskAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,4,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - VersionHistoryItems []*v15.VersionHistoryItem `protobuf:"bytes,5,rep,name=version_history_items,json=versionHistoryItems,proto3" json:"version_history_items,omitempty"` - Events *v11.DataBlob `protobuf:"bytes,6,opt,name=events,proto3" json:"events,omitempty"` - // New run events does not need version history since there is no prior events. - NewRunEvents *v11.DataBlob `protobuf:"bytes,7,opt,name=new_run_events,json=newRunEvents,proto3" json:"new_run_events,omitempty"` - BaseExecutionInfo *v16.BaseExecutionInfo `protobuf:"bytes,8,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` - NewRunId string `protobuf:"bytes,9,opt,name=new_run_id,json=newRunId,proto3" json:"new_run_id,omitempty"` +func (x *SyncActivityTaskAttributes) GetLastStartedBuildId() string { + if x != nil { + return x.LastStartedBuildId + } + return "" } -func (x *HistoryTaskAttributes) Reset() { - *x = HistoryTaskAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (x *SyncActivityTaskAttributes) GetLastStartedRedirectCounter() int64 { + if x != nil { + return x.LastStartedRedirectCounter } + return 0 } -func (x *HistoryTaskAttributes) String() string { - return protoimpl.X.MessageStringOf(x) +func (x *SyncActivityTaskAttributes) GetFirstScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.FirstScheduledTime + } + return nil } -func (*HistoryTaskAttributes) ProtoMessage() {} +func (x *SyncActivityTaskAttributes) GetLastAttemptCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.LastAttemptCompleteTime + } + return nil +} -func (x *HistoryTaskAttributes) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *SyncActivityTaskAttributes) GetStamp() int32 { + if x != nil { + return x.Stamp } - return mi.MessageOf(x) + return 0 } -// Deprecated: Use HistoryTaskAttributes.ProtoReflect.Descriptor instead. +func (x *SyncActivityTaskAttributes) GetPaused() bool { + if x != nil { + return x.Paused + } + return false +} + +func (x *SyncActivityTaskAttributes) GetRetryInitialInterval() *durationpb.Duration { + if x != nil { + return x.RetryInitialInterval + } + return nil +} + +func (x *SyncActivityTaskAttributes) GetRetryMaximumInterval() *durationpb.Duration { + if x != nil { + return x.RetryMaximumInterval + } + return nil +} + +func (x *SyncActivityTaskAttributes) GetRetryMaximumAttempts() int32 { + if x != nil { + return x.RetryMaximumAttempts + } + return 0 +} + +func (x *SyncActivityTaskAttributes) GetRetryBackoffCoefficient() float64 { + if x != nil { + return x.RetryBackoffCoefficient + } + return 0 +} + +func (x *SyncActivityTaskAttributes) GetStartVersion() int64 { + if x != nil { + return x.StartVersion + } + return 0 +} + +type HistoryTaskAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,4,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + VersionHistoryItems []*v16.VersionHistoryItem `protobuf:"bytes,5,rep,name=version_history_items,json=versionHistoryItems,proto3" json:"version_history_items,omitempty"` + // to be deprecated in favor of using events_batches + Events *v11.DataBlob `protobuf:"bytes,6,opt,name=events,proto3" json:"events,omitempty"` + // New run events does not need version history since there is no prior events. + NewRunEvents *v11.DataBlob `protobuf:"bytes,7,opt,name=new_run_events,json=newRunEvents,proto3" json:"new_run_events,omitempty"` + BaseExecutionInfo *v17.BaseExecutionInfo `protobuf:"bytes,8,opt,name=base_execution_info,json=baseExecutionInfo,proto3" json:"base_execution_info,omitempty"` + NewRunId string `protobuf:"bytes,9,opt,name=new_run_id,json=newRunId,proto3" json:"new_run_id,omitempty"` + EventsBatches []*v11.DataBlob `protobuf:"bytes,10,rep,name=events_batches,json=eventsBatches,proto3" json:"events_batches,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HistoryTaskAttributes) Reset() { + *x = HistoryTaskAttributes{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HistoryTaskAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HistoryTaskAttributes) ProtoMessage() {} + +func (x *HistoryTaskAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HistoryTaskAttributes.ProtoReflect.Descriptor instead. func (*HistoryTaskAttributes) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{10} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{11} } func (x *HistoryTaskAttributes) GetNamespaceId() string { @@ -1042,7 +1282,7 @@ func (x *HistoryTaskAttributes) GetRunId() string { return "" } -func (x *HistoryTaskAttributes) GetVersionHistoryItems() []*v15.VersionHistoryItem { +func (x *HistoryTaskAttributes) GetVersionHistoryItems() []*v16.VersionHistoryItem { if x != nil { return x.VersionHistoryItems } @@ -1063,7 +1303,7 @@ func (x *HistoryTaskAttributes) GetNewRunEvents() *v11.DataBlob { return nil } -func (x *HistoryTaskAttributes) GetBaseExecutionInfo() *v16.BaseExecutionInfo { +func (x *HistoryTaskAttributes) GetBaseExecutionInfo() *v17.BaseExecutionInfo { if x != nil { return x.BaseExecutionInfo } @@ -1077,21 +1317,27 @@ func (x *HistoryTaskAttributes) GetNewRunId() string { return "" } -type SyncWorkflowStateTaskAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *HistoryTaskAttributes) GetEventsBatches() []*v11.DataBlob { + if x != nil { + return x.EventsBatches + } + return nil +} - WorkflowState *v17.WorkflowMutableState `protobuf:"bytes,1,opt,name=workflow_state,json=workflowState,proto3" json:"workflow_state,omitempty"` +type SyncWorkflowStateTaskAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + WorkflowState *v12.WorkflowMutableState `protobuf:"bytes,1,opt,name=workflow_state,json=workflowState,proto3" json:"workflow_state,omitempty"` + IsForceReplication bool `protobuf:"varint,2,opt,name=is_force_replication,json=isForceReplication,proto3" json:"is_force_replication,omitempty"` + IsCloseTransferTaskAcked bool `protobuf:"varint,3,opt,name=is_close_transfer_task_acked,json=isCloseTransferTaskAcked,proto3" json:"is_close_transfer_task_acked,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SyncWorkflowStateTaskAttributes) Reset() { *x = SyncWorkflowStateTaskAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SyncWorkflowStateTaskAttributes) String() string { @@ -1101,8 +1347,8 @@ func (x *SyncWorkflowStateTaskAttributes) String() string { func (*SyncWorkflowStateTaskAttributes) ProtoMessage() {} func (x *SyncWorkflowStateTaskAttributes) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[12] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1114,33 +1360,44 @@ func (x *SyncWorkflowStateTaskAttributes) ProtoReflect() protoreflect.Message { // Deprecated: Use SyncWorkflowStateTaskAttributes.ProtoReflect.Descriptor instead. func (*SyncWorkflowStateTaskAttributes) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{11} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{12} } -func (x *SyncWorkflowStateTaskAttributes) GetWorkflowState() *v17.WorkflowMutableState { +func (x *SyncWorkflowStateTaskAttributes) GetWorkflowState() *v12.WorkflowMutableState { if x != nil { return x.WorkflowState } return nil } -type TaskQueueUserDataAttributes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SyncWorkflowStateTaskAttributes) GetIsForceReplication() bool { + if x != nil { + return x.IsForceReplication + } + return false +} +func (x *SyncWorkflowStateTaskAttributes) GetIsCloseTransferTaskAcked() bool { + if x != nil { + return x.IsCloseTransferTaskAcked + } + return false +} + +type TaskQueueUserDataAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` TaskQueueName string `protobuf:"bytes,2,opt,name=task_queue_name,json=taskQueueName,proto3" json:"task_queue_name,omitempty"` - UserData *v17.TaskQueueUserData `protobuf:"bytes,3,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + UserData *v12.TaskQueueUserData `protobuf:"bytes,3,opt,name=user_data,json=userData,proto3" json:"user_data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TaskQueueUserDataAttributes) Reset() { *x = TaskQueueUserDataAttributes{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskQueueUserDataAttributes) String() string { @@ -1150,8 +1407,8 @@ func (x *TaskQueueUserDataAttributes) String() string { func (*TaskQueueUserDataAttributes) ProtoMessage() {} func (x *TaskQueueUserDataAttributes) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[13] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1163,7 +1420,7 @@ func (x *TaskQueueUserDataAttributes) ProtoReflect() protoreflect.Message { // Deprecated: Use TaskQueueUserDataAttributes.ProtoReflect.Descriptor instead. func (*TaskQueueUserDataAttributes) Descriptor() ([]byte, []int) { - return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{12} + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{13} } func (x *TaskQueueUserDataAttributes) GetNamespaceId() string { @@ -1180,631 +1437,1037 @@ func (x *TaskQueueUserDataAttributes) GetTaskQueueName() string { return "" } -func (x *TaskQueueUserDataAttributes) GetUserData() *v17.TaskQueueUserData { +func (x *TaskQueueUserDataAttributes) GetUserData() *v12.TaskQueueUserData { if x != nil { return x.UserData } return nil } -var File_temporal_server_api_replication_v1_message_proto protoreflect.FileDescriptor +type SyncHSMAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + VersionHistory *v16.VersionHistory `protobuf:"bytes,4,opt,name=version_history,json=versionHistory,proto3" json:"version_history,omitempty"` + StateMachineNode *v12.StateMachineNode `protobuf:"bytes,5,opt,name=state_machine_node,json=stateMachineNode,proto3" json:"state_machine_node,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} -var file_temporal_server_api_replication_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x30, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, - 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x3f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, - 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6d, 0x75, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x34, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, - 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, - 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xea, 0x08, 0x0a, 0x0f, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x52, 0x0a, 0x09, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, - 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x7d, 0x0a, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, - 0x52, 0x17, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x91, 0x01, 0x0a, - 0x21, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x68, - 0x61, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x1d, 0x73, 0x79, 0x6e, 0x63, 0x53, 0x68, 0x61, - 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x87, 0x01, 0x0a, 0x1d, 0x73, 0x79, 0x6e, - 0x63, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x3e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x1a, 0x73, - 0x79, 0x6e, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x41, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x77, 0x0a, 0x17, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x15, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x97, 0x01, 0x0a, 0x23, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, - 0x79, 0x6e, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, - 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x1f, - 0x73, 0x79, 0x6e, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x8b, 0x01, 0x0a, 0x1f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, - 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, - 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x1b, 0x74, 0x61, 0x73, 0x6b, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x38, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, - 0x61, 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x47, 0x0a, 0x0f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x52, 0x0e, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x69, - 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x42, 0x0c, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x94, - 0x02, 0x0a, 0x10, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x3d, 0x0a, 0x19, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, - 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x19, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6c, - 0x61, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x63, 0x0a, 0x1e, 0x6c, 0x61, 0x73, 0x74, 0x5f, - 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x1b, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, - 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x52, 0x0a, 0x0f, 0x53, 0x79, 0x6e, 0x63, 0x53, - 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3f, 0x0a, 0x0b, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb3, 0x01, 0x0a, 0x14, 0x53, 0x79, 0x6e, 0x63, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x3a, 0x0a, 0x17, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x77, 0x5f, - 0x77, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x15, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4c, 0x6f, 0x77, 0x57, 0x61, 0x74, 0x65, - 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5f, 0x0a, 0x1c, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6c, 0x6f, 0x77, 0x5f, 0x77, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, - 0x72, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x19, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x4c, 0x6f, 0x77, 0x57, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xbe, 0x02, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x11, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3d, 0x0a, 0x19, - 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, - 0x6c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x68, 0x61, 0x73, 0x5f, - 0x6d, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x68, 0x61, 0x73, 0x4d, 0x6f, - 0x72, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x63, 0x0a, 0x11, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x68, - 0x61, 0x72, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x33, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x0f, 0x73, 0x79, 0x6e, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x42, 0x02, 0x68, 0x00, 0x22, 0xa4, 0x02, 0x0a, 0x1b, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x64, 0x0a, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, - 0x6b, 0x52, 0x10, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x73, - 0x6b, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x18, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x5f, 0x68, 0x69, 0x67, 0x68, 0x5f, 0x77, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x48, 0x69, 0x67, 0x68, 0x57, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x61, 0x0a, 0x1d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x69, - 0x67, 0x68, 0x5f, 0x77, 0x61, 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x1a, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x48, 0x69, 0x67, 0x68, 0x57, 0x61, - 0x74, 0x65, 0x72, 0x6d, 0x61, 0x72, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, - 0x84, 0x03, 0x0a, 0x13, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, - 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, - 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, - 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, - 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, - 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x74, - 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x66, 0x69, - 0x72, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, - 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, - 0x02, 0x68, 0x00, 0x22, 0xc0, 0x04, 0x0a, 0x17, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x54, 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x65, 0x0a, - 0x13, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, - 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x12, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x40, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x46, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x37, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2d, 0x0a, 0x10, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, - 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, - 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x5a, 0x0a, 0x10, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x66, 0x61, 0x69, 0x6c, 0x6f, 0x76, 0x65, 0x72, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x22, 0xaa, 0x01, 0x0a, 0x1d, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x68, 0x61, 0x72, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x0e, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x1d, 0x0a, 0x08, 0x73, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x73, 0x68, 0x61, 0x72, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x3f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xfd, - 0x06, 0x0a, 0x1a, 0x53, 0x79, 0x6e, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0c, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x45, 0x0a, 0x0e, 0x73, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x73, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, - 0x10, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x11, 0x6c, 0x61, 0x73, 0x74, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x47, - 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x61, 0x69, - 0x6c, 0x75, 0x72, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x34, 0x0a, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x77, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x12, 0x6c, 0x61, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x0e, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x66, 0x0a, 0x13, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x65, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x62, 0x61, - 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, - 0x68, 0x00, 0x22, 0x84, 0x04, 0x0a, 0x15, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x61, - 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0c, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x6a, 0x0a, - 0x15, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, - 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x13, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x3c, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, - 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x4a, 0x0a, 0x0e, 0x6e, 0x65, 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, - 0x74, 0x61, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x0c, 0x6e, 0x65, 0x77, 0x52, 0x75, 0x6e, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x66, 0x0a, 0x13, 0x62, 0x61, 0x73, 0x65, 0x5f, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x62, 0x61, 0x73, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, 0x0a, 0x6e, 0x65, - 0x77, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, - 0x65, 0x77, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, - 0x22, 0x86, 0x01, 0x0a, 0x1f, 0x53, 0x79, 0x6e, 0x63, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x12, 0x63, 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, - 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x22, 0xc8, 0x01, 0x0a, 0x1b, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, - 0x55, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2a, 0x0a, 0x0f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, - 0x65, 0x75, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x56, 0x0a, 0x09, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x55, - 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, - 0x42, 0x02, 0x68, 0x00, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x72, 0x65, - 0x70, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (x *SyncHSMAttributes) Reset() { + *x = SyncHSMAttributes{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -var ( - file_temporal_server_api_replication_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_replication_v1_message_proto_rawDescData = file_temporal_server_api_replication_v1_message_proto_rawDesc -) +func (x *SyncHSMAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} -func file_temporal_server_api_replication_v1_message_proto_rawDescGZIP() []byte { - file_temporal_server_api_replication_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_replication_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_replication_v1_message_proto_rawDescData) - }) - return file_temporal_server_api_replication_v1_message_proto_rawDescData +func (*SyncHSMAttributes) ProtoMessage() {} + +func (x *SyncHSMAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var file_temporal_server_api_replication_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_temporal_server_api_replication_v1_message_proto_goTypes = []interface{}{ - (*ReplicationTask)(nil), // 0: temporal.server.api.replication.v1.ReplicationTask - (*ReplicationToken)(nil), // 1: temporal.server.api.replication.v1.ReplicationToken - (*SyncShardStatus)(nil), // 2: temporal.server.api.replication.v1.SyncShardStatus - (*SyncReplicationState)(nil), // 3: temporal.server.api.replication.v1.SyncReplicationState - (*ReplicationMessages)(nil), // 4: temporal.server.api.replication.v1.ReplicationMessages - (*WorkflowReplicationMessages)(nil), // 5: temporal.server.api.replication.v1.WorkflowReplicationMessages - (*ReplicationTaskInfo)(nil), // 6: temporal.server.api.replication.v1.ReplicationTaskInfo - (*NamespaceTaskAttributes)(nil), // 7: temporal.server.api.replication.v1.NamespaceTaskAttributes - (*SyncShardStatusTaskAttributes)(nil), // 8: temporal.server.api.replication.v1.SyncShardStatusTaskAttributes - (*SyncActivityTaskAttributes)(nil), // 9: temporal.server.api.replication.v1.SyncActivityTaskAttributes - (*HistoryTaskAttributes)(nil), // 10: temporal.server.api.replication.v1.HistoryTaskAttributes - (*SyncWorkflowStateTaskAttributes)(nil), // 11: temporal.server.api.replication.v1.SyncWorkflowStateTaskAttributes - (*TaskQueueUserDataAttributes)(nil), // 12: temporal.server.api.replication.v1.TaskQueueUserDataAttributes - (v1.ReplicationTaskType)(0), // 13: temporal.server.api.enums.v1.ReplicationTaskType - (*v11.DataBlob)(nil), // 14: temporal.api.common.v1.DataBlob - (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp - (v1.TaskType)(0), // 16: temporal.server.api.enums.v1.TaskType - (v1.NamespaceOperation)(0), // 17: temporal.server.api.enums.v1.NamespaceOperation - (*v12.NamespaceInfo)(nil), // 18: temporal.api.namespace.v1.NamespaceInfo - (*v12.NamespaceConfig)(nil), // 19: temporal.api.namespace.v1.NamespaceConfig - (*v13.NamespaceReplicationConfig)(nil), // 20: temporal.api.replication.v1.NamespaceReplicationConfig - (*v13.FailoverStatus)(nil), // 21: temporal.api.replication.v1.FailoverStatus - (*v11.Payloads)(nil), // 22: temporal.api.common.v1.Payloads - (*v14.Failure)(nil), // 23: temporal.api.failure.v1.Failure - (*v15.VersionHistory)(nil), // 24: temporal.server.api.history.v1.VersionHistory - (*v16.BaseExecutionInfo)(nil), // 25: temporal.server.api.workflow.v1.BaseExecutionInfo - (*v15.VersionHistoryItem)(nil), // 26: temporal.server.api.history.v1.VersionHistoryItem - (*v17.WorkflowMutableState)(nil), // 27: temporal.server.api.persistence.v1.WorkflowMutableState - (*v17.TaskQueueUserData)(nil), // 28: temporal.server.api.persistence.v1.TaskQueueUserData +// Deprecated: Use SyncHSMAttributes.ProtoReflect.Descriptor instead. +func (*SyncHSMAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{14} } -var file_temporal_server_api_replication_v1_message_proto_depIdxs = []int32{ - 13, // 0: temporal.server.api.replication.v1.ReplicationTask.task_type:type_name -> temporal.server.api.enums.v1.ReplicationTaskType - 7, // 1: temporal.server.api.replication.v1.ReplicationTask.namespace_task_attributes:type_name -> temporal.server.api.replication.v1.NamespaceTaskAttributes - 8, // 2: temporal.server.api.replication.v1.ReplicationTask.sync_shard_status_task_attributes:type_name -> temporal.server.api.replication.v1.SyncShardStatusTaskAttributes - 9, // 3: temporal.server.api.replication.v1.ReplicationTask.sync_activity_task_attributes:type_name -> temporal.server.api.replication.v1.SyncActivityTaskAttributes - 10, // 4: temporal.server.api.replication.v1.ReplicationTask.history_task_attributes:type_name -> temporal.server.api.replication.v1.HistoryTaskAttributes - 11, // 5: temporal.server.api.replication.v1.ReplicationTask.sync_workflow_state_task_attributes:type_name -> temporal.server.api.replication.v1.SyncWorkflowStateTaskAttributes - 12, // 6: temporal.server.api.replication.v1.ReplicationTask.task_queue_user_data_attributes:type_name -> temporal.server.api.replication.v1.TaskQueueUserDataAttributes - 14, // 7: temporal.server.api.replication.v1.ReplicationTask.data:type_name -> temporal.api.common.v1.DataBlob - 15, // 8: temporal.server.api.replication.v1.ReplicationTask.visibility_time:type_name -> google.protobuf.Timestamp - 15, // 9: temporal.server.api.replication.v1.ReplicationToken.last_processed_visibility_time:type_name -> google.protobuf.Timestamp - 15, // 10: temporal.server.api.replication.v1.SyncShardStatus.status_time:type_name -> google.protobuf.Timestamp - 15, // 11: temporal.server.api.replication.v1.SyncReplicationState.inclusive_low_watermark_time:type_name -> google.protobuf.Timestamp - 0, // 12: temporal.server.api.replication.v1.ReplicationMessages.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask - 2, // 13: temporal.server.api.replication.v1.ReplicationMessages.sync_shard_status:type_name -> temporal.server.api.replication.v1.SyncShardStatus - 0, // 14: temporal.server.api.replication.v1.WorkflowReplicationMessages.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask - 15, // 15: temporal.server.api.replication.v1.WorkflowReplicationMessages.exclusive_high_watermark_time:type_name -> google.protobuf.Timestamp - 16, // 16: temporal.server.api.replication.v1.ReplicationTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType - 17, // 17: temporal.server.api.replication.v1.NamespaceTaskAttributes.namespace_operation:type_name -> temporal.server.api.enums.v1.NamespaceOperation - 18, // 18: temporal.server.api.replication.v1.NamespaceTaskAttributes.info:type_name -> temporal.api.namespace.v1.NamespaceInfo - 19, // 19: temporal.server.api.replication.v1.NamespaceTaskAttributes.config:type_name -> temporal.api.namespace.v1.NamespaceConfig - 20, // 20: temporal.server.api.replication.v1.NamespaceTaskAttributes.replication_config:type_name -> temporal.api.replication.v1.NamespaceReplicationConfig - 21, // 21: temporal.server.api.replication.v1.NamespaceTaskAttributes.failover_history:type_name -> temporal.api.replication.v1.FailoverStatus - 15, // 22: temporal.server.api.replication.v1.SyncShardStatusTaskAttributes.status_time:type_name -> google.protobuf.Timestamp - 15, // 23: temporal.server.api.replication.v1.SyncActivityTaskAttributes.scheduled_time:type_name -> google.protobuf.Timestamp - 15, // 24: temporal.server.api.replication.v1.SyncActivityTaskAttributes.started_time:type_name -> google.protobuf.Timestamp - 15, // 25: temporal.server.api.replication.v1.SyncActivityTaskAttributes.last_heartbeat_time:type_name -> google.protobuf.Timestamp - 22, // 26: temporal.server.api.replication.v1.SyncActivityTaskAttributes.details:type_name -> temporal.api.common.v1.Payloads - 23, // 27: temporal.server.api.replication.v1.SyncActivityTaskAttributes.last_failure:type_name -> temporal.api.failure.v1.Failure - 24, // 28: temporal.server.api.replication.v1.SyncActivityTaskAttributes.version_history:type_name -> temporal.server.api.history.v1.VersionHistory - 25, // 29: temporal.server.api.replication.v1.SyncActivityTaskAttributes.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo - 26, // 30: temporal.server.api.replication.v1.HistoryTaskAttributes.version_history_items:type_name -> temporal.server.api.history.v1.VersionHistoryItem - 14, // 31: temporal.server.api.replication.v1.HistoryTaskAttributes.events:type_name -> temporal.api.common.v1.DataBlob - 14, // 32: temporal.server.api.replication.v1.HistoryTaskAttributes.new_run_events:type_name -> temporal.api.common.v1.DataBlob - 25, // 33: temporal.server.api.replication.v1.HistoryTaskAttributes.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo - 27, // 34: temporal.server.api.replication.v1.SyncWorkflowStateTaskAttributes.workflow_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState - 28, // 35: temporal.server.api.replication.v1.TaskQueueUserDataAttributes.user_data:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData - 36, // [36:36] is the sub-list for method output_type - 36, // [36:36] is the sub-list for method input_type - 36, // [36:36] is the sub-list for extension type_name - 36, // [36:36] is the sub-list for extension extendee - 0, // [0:36] is the sub-list for field type_name + +func (x *SyncHSMAttributes) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" } -func init() { file_temporal_server_api_replication_v1_message_proto_init() } -func file_temporal_server_api_replication_v1_message_proto_init() { - if File_temporal_server_api_replication_v1_message_proto != nil { - return +func (x *SyncHSMAttributes) GetWorkflowId() string { + if x != nil { + return x.WorkflowId } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_replication_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationTask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationToken); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncShardStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncReplicationState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationMessages); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WorkflowReplicationMessages); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReplicationTaskInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamespaceTaskAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncShardStatusTaskAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncActivityTaskAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryTaskAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_replication_v1_message_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SyncWorkflowStateTaskAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return "" +} + +func (x *SyncHSMAttributes) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *SyncHSMAttributes) GetVersionHistory() *v16.VersionHistory { + if x != nil { + return x.VersionHistory + } + return nil +} + +func (x *SyncHSMAttributes) GetStateMachineNode() *v12.StateMachineNode { + if x != nil { + return x.StateMachineNode + } + return nil +} + +type BackfillHistoryTaskAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + EventVersionHistory []*v16.VersionHistoryItem `protobuf:"bytes,5,rep,name=event_version_history,json=eventVersionHistory,proto3" json:"event_version_history,omitempty"` + EventBatches []*v11.DataBlob `protobuf:"bytes,6,rep,name=event_batches,json=eventBatches,proto3" json:"event_batches,omitempty"` + NewRunInfo *NewRunInfo `protobuf:"bytes,7,opt,name=new_run_info,json=newRunInfo,proto3" json:"new_run_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackfillHistoryTaskAttributes) Reset() { + *x = BackfillHistoryTaskAttributes{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackfillHistoryTaskAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackfillHistoryTaskAttributes) ProtoMessage() {} + +func (x *BackfillHistoryTaskAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - file_temporal_server_api_replication_v1_message_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskQueueUserDataAttributes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackfillHistoryTaskAttributes.ProtoReflect.Descriptor instead. +func (*BackfillHistoryTaskAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{15} +} + +func (x *BackfillHistoryTaskAttributes) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *BackfillHistoryTaskAttributes) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *BackfillHistoryTaskAttributes) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *BackfillHistoryTaskAttributes) GetEventVersionHistory() []*v16.VersionHistoryItem { + if x != nil { + return x.EventVersionHistory + } + return nil +} + +func (x *BackfillHistoryTaskAttributes) GetEventBatches() []*v11.DataBlob { + if x != nil { + return x.EventBatches + } + return nil +} + +func (x *BackfillHistoryTaskAttributes) GetNewRunInfo() *NewRunInfo { + if x != nil { + return x.NewRunInfo + } + return nil +} + +type NewRunInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + EventBatch *v11.DataBlob `protobuf:"bytes,2,opt,name=event_batch,json=eventBatch,proto3" json:"event_batch,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NewRunInfo) Reset() { + *x = NewRunInfo{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NewRunInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NewRunInfo) ProtoMessage() {} + +func (x *NewRunInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - file_temporal_server_api_replication_v1_message_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*ReplicationTask_NamespaceTaskAttributes)(nil), - (*ReplicationTask_SyncShardStatusTaskAttributes)(nil), - (*ReplicationTask_SyncActivityTaskAttributes)(nil), - (*ReplicationTask_HistoryTaskAttributes)(nil), - (*ReplicationTask_SyncWorkflowStateTaskAttributes)(nil), - (*ReplicationTask_TaskQueueUserDataAttributes)(nil), + return mi.MessageOf(x) +} + +// Deprecated: Use NewRunInfo.ProtoReflect.Descriptor instead. +func (*NewRunInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{16} +} + +func (x *NewRunInfo) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *NewRunInfo) GetEventBatch() *v11.DataBlob { + if x != nil { + return x.EventBatch + } + return nil +} + +type SyncWorkflowStateMutationAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + ExclusiveStartVersionedTransition *v12.VersionedTransition `protobuf:"bytes,1,opt,name=exclusive_start_versioned_transition,json=exclusiveStartVersionedTransition,proto3" json:"exclusive_start_versioned_transition,omitempty"` + StateMutation *v12.WorkflowMutableStateMutation `protobuf:"bytes,2,opt,name=state_mutation,json=stateMutation,proto3" json:"state_mutation,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncWorkflowStateMutationAttributes) Reset() { + *x = SyncWorkflowStateMutationAttributes{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncWorkflowStateMutationAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncWorkflowStateMutationAttributes) ProtoMessage() {} + +func (x *SyncWorkflowStateMutationAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncWorkflowStateMutationAttributes.ProtoReflect.Descriptor instead. +func (*SyncWorkflowStateMutationAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{17} +} + +func (x *SyncWorkflowStateMutationAttributes) GetExclusiveStartVersionedTransition() *v12.VersionedTransition { + if x != nil { + return x.ExclusiveStartVersionedTransition + } + return nil +} + +func (x *SyncWorkflowStateMutationAttributes) GetStateMutation() *v12.WorkflowMutableStateMutation { + if x != nil { + return x.StateMutation + } + return nil +} + +type SyncWorkflowStateSnapshotAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + State *v12.WorkflowMutableState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncWorkflowStateSnapshotAttributes) Reset() { + *x = SyncWorkflowStateSnapshotAttributes{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncWorkflowStateSnapshotAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncWorkflowStateSnapshotAttributes) ProtoMessage() {} + +func (x *SyncWorkflowStateSnapshotAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncWorkflowStateSnapshotAttributes.ProtoReflect.Descriptor instead. +func (*SyncWorkflowStateSnapshotAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{18} +} + +func (x *SyncWorkflowStateSnapshotAttributes) GetState() *v12.WorkflowMutableState { + if x != nil { + return x.State + } + return nil +} + +type VerifyVersionedTransitionTaskAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + NextEventId int64 `protobuf:"varint,4,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` + EventVersionHistory []*v16.VersionHistoryItem `protobuf:"bytes,5,rep,name=event_version_history,json=eventVersionHistory,proto3" json:"event_version_history,omitempty"` + NewRunId string `protobuf:"bytes,6,opt,name=new_run_id,json=newRunId,proto3" json:"new_run_id,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,7,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VerifyVersionedTransitionTaskAttributes) Reset() { + *x = VerifyVersionedTransitionTaskAttributes{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VerifyVersionedTransitionTaskAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VerifyVersionedTransitionTaskAttributes) ProtoMessage() {} + +func (x *VerifyVersionedTransitionTaskAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VerifyVersionedTransitionTaskAttributes.ProtoReflect.Descriptor instead. +func (*VerifyVersionedTransitionTaskAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{19} +} + +func (x *VerifyVersionedTransitionTaskAttributes) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *VerifyVersionedTransitionTaskAttributes) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *VerifyVersionedTransitionTaskAttributes) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *VerifyVersionedTransitionTaskAttributes) GetNextEventId() int64 { + if x != nil { + return x.NextEventId + } + return 0 +} + +func (x *VerifyVersionedTransitionTaskAttributes) GetEventVersionHistory() []*v16.VersionHistoryItem { + if x != nil { + return x.EventVersionHistory + } + return nil +} + +func (x *VerifyVersionedTransitionTaskAttributes) GetNewRunId() string { + if x != nil { + return x.NewRunId + } + return "" +} + +func (x *VerifyVersionedTransitionTaskAttributes) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +type SyncVersionedTransitionTaskAttributes struct { + state protoimpl.MessageState `protogen:"open.v1"` + VersionedTransitionArtifact *VersionedTransitionArtifact `protobuf:"bytes,5,opt,name=versioned_transition_artifact,json=versionedTransitionArtifact,proto3" json:"versioned_transition_artifact,omitempty"` + NamespaceId string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,7,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,8,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,9,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SyncVersionedTransitionTaskAttributes) Reset() { + *x = SyncVersionedTransitionTaskAttributes{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SyncVersionedTransitionTaskAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SyncVersionedTransitionTaskAttributes) ProtoMessage() {} + +func (x *SyncVersionedTransitionTaskAttributes) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SyncVersionedTransitionTaskAttributes.ProtoReflect.Descriptor instead. +func (*SyncVersionedTransitionTaskAttributes) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{20} +} + +func (x *SyncVersionedTransitionTaskAttributes) GetVersionedTransitionArtifact() *VersionedTransitionArtifact { + if x != nil { + return x.VersionedTransitionArtifact + } + return nil +} + +func (x *SyncVersionedTransitionTaskAttributes) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *SyncVersionedTransitionTaskAttributes) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *SyncVersionedTransitionTaskAttributes) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *SyncVersionedTransitionTaskAttributes) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +type VersionedTransitionArtifact struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to StateAttributes: + // + // *VersionedTransitionArtifact_SyncWorkflowStateMutationAttributes + // *VersionedTransitionArtifact_SyncWorkflowStateSnapshotAttributes + StateAttributes isVersionedTransitionArtifact_StateAttributes `protobuf_oneof:"state_attributes"` + EventBatches []*v11.DataBlob `protobuf:"bytes,3,rep,name=event_batches,json=eventBatches,proto3" json:"event_batches,omitempty"` + NewRunInfo *NewRunInfo `protobuf:"bytes,4,opt,name=new_run_info,json=newRunInfo,proto3" json:"new_run_info,omitempty"` + IsFirstSync bool `protobuf:"varint,5,opt,name=is_first_sync,json=isFirstSync,proto3" json:"is_first_sync,omitempty"` + IsCloseTransferTaskAcked bool `protobuf:"varint,6,opt,name=is_close_transfer_task_acked,json=isCloseTransferTaskAcked,proto3" json:"is_close_transfer_task_acked,omitempty"` + IsForceReplication bool `protobuf:"varint,7,opt,name=is_force_replication,json=isForceReplication,proto3" json:"is_force_replication,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VersionedTransitionArtifact) Reset() { + *x = VersionedTransitionArtifact{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VersionedTransitionArtifact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionedTransitionArtifact) ProtoMessage() {} + +func (x *VersionedTransitionArtifact) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionedTransitionArtifact.ProtoReflect.Descriptor instead. +func (*VersionedTransitionArtifact) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{21} +} + +func (x *VersionedTransitionArtifact) GetStateAttributes() isVersionedTransitionArtifact_StateAttributes { + if x != nil { + return x.StateAttributes + } + return nil +} + +func (x *VersionedTransitionArtifact) GetSyncWorkflowStateMutationAttributes() *SyncWorkflowStateMutationAttributes { + if x != nil { + if x, ok := x.StateAttributes.(*VersionedTransitionArtifact_SyncWorkflowStateMutationAttributes); ok { + return x.SyncWorkflowStateMutationAttributes + } + } + return nil +} + +func (x *VersionedTransitionArtifact) GetSyncWorkflowStateSnapshotAttributes() *SyncWorkflowStateSnapshotAttributes { + if x != nil { + if x, ok := x.StateAttributes.(*VersionedTransitionArtifact_SyncWorkflowStateSnapshotAttributes); ok { + return x.SyncWorkflowStateSnapshotAttributes + } + } + return nil +} + +func (x *VersionedTransitionArtifact) GetEventBatches() []*v11.DataBlob { + if x != nil { + return x.EventBatches + } + return nil +} + +func (x *VersionedTransitionArtifact) GetNewRunInfo() *NewRunInfo { + if x != nil { + return x.NewRunInfo + } + return nil +} + +func (x *VersionedTransitionArtifact) GetIsFirstSync() bool { + if x != nil { + return x.IsFirstSync + } + return false +} + +func (x *VersionedTransitionArtifact) GetIsCloseTransferTaskAcked() bool { + if x != nil { + return x.IsCloseTransferTaskAcked + } + return false +} + +func (x *VersionedTransitionArtifact) GetIsForceReplication() bool { + if x != nil { + return x.IsForceReplication + } + return false +} + +type isVersionedTransitionArtifact_StateAttributes interface { + isVersionedTransitionArtifact_StateAttributes() +} + +type VersionedTransitionArtifact_SyncWorkflowStateMutationAttributes struct { + SyncWorkflowStateMutationAttributes *SyncWorkflowStateMutationAttributes `protobuf:"bytes,1,opt,name=sync_workflow_state_mutation_attributes,json=syncWorkflowStateMutationAttributes,proto3,oneof"` +} + +type VersionedTransitionArtifact_SyncWorkflowStateSnapshotAttributes struct { + SyncWorkflowStateSnapshotAttributes *SyncWorkflowStateSnapshotAttributes `protobuf:"bytes,2,opt,name=sync_workflow_state_snapshot_attributes,json=syncWorkflowStateSnapshotAttributes,proto3,oneof"` +} + +func (*VersionedTransitionArtifact_SyncWorkflowStateMutationAttributes) isVersionedTransitionArtifact_StateAttributes() { +} + +func (*VersionedTransitionArtifact_SyncWorkflowStateSnapshotAttributes) isVersionedTransitionArtifact_StateAttributes() { +} + +// MigrationExecutionInfo is only used in unit tests for validation compatibility. +// Remove it after v1.30 is released. +type MigrationExecutionInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The proto json name of this field needs to be "workflowId", + // to be backward compatibility with commonpb.WorkflowExecution, + // which is what used to be used in migration workflow's activity + // input/output. + BusinessId string `protobuf:"bytes,1,opt,name=business_id,json=workflowId,proto3" json:"business_id,omitempty"` + RunId string `protobuf:"bytes,2,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,3,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MigrationExecutionInfo) Reset() { + *x = MigrationExecutionInfo{} + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MigrationExecutionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrationExecutionInfo) ProtoMessage() {} + +func (x *MigrationExecutionInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_replication_v1_message_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrationExecutionInfo.ProtoReflect.Descriptor instead. +func (*MigrationExecutionInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_replication_v1_message_proto_rawDescGZIP(), []int{22} +} + +func (x *MigrationExecutionInfo) GetBusinessId() string { + if x != nil { + return x.BusinessId + } + return "" +} + +func (x *MigrationExecutionInfo) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *MigrationExecutionInfo) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +var File_temporal_server_api_replication_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_api_replication_v1_message_proto_rawDesc = "" + + "\n" + + "0temporal/server/api/replication/v1/message.proto\x12\"temporal.server.api.replication.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a%temporal/api/failure/v1/message.proto\x1a'temporal/api/namespace/v1/message.proto\x1a)temporal/api/replication/v1/message.proto\x1a.temporal/server/api/enums/v1/replication.proto\x1a'temporal/server/api/enums/v1/task.proto\x1a,temporal/server/api/history/v1/message.proto\x1a3temporal/server/api/persistence/v1/executions.proto\x1a,temporal/server/api/persistence/v1/hsm.proto\x1a4temporal/server/api/persistence/v1/task_queues.proto\x1a?temporal/server/api/persistence/v1/workflow_mutable_state.proto\x1a-temporal/server/api/workflow/v1/message.proto\"\xa1\x0f\n" + + "\x0fReplicationTask\x12N\n" + + "\ttask_type\x18\x01 \x01(\x0e21.temporal.server.api.enums.v1.ReplicationTaskTypeR\btaskType\x12$\n" + + "\x0esource_task_id\x18\x02 \x01(\x03R\fsourceTaskId\x12y\n" + + "\x19namespace_task_attributes\x18\x03 \x01(\v2;.temporal.server.api.replication.v1.NamespaceTaskAttributesH\x00R\x17namespaceTaskAttributes\x12\x8d\x01\n" + + "!sync_shard_status_task_attributes\x18\x05 \x01(\v2A.temporal.server.api.replication.v1.SyncShardStatusTaskAttributesH\x00R\x1dsyncShardStatusTaskAttributes\x12\x83\x01\n" + + "\x1dsync_activity_task_attributes\x18\x06 \x01(\v2>.temporal.server.api.replication.v1.SyncActivityTaskAttributesH\x00R\x1asyncActivityTaskAttributes\x12s\n" + + "\x17history_task_attributes\x18\b \x01(\v29.temporal.server.api.replication.v1.HistoryTaskAttributesH\x00R\x15historyTaskAttributes\x12\x93\x01\n" + + "#sync_workflow_state_task_attributes\x18\n" + + " \x01(\v2C.temporal.server.api.replication.v1.SyncWorkflowStateTaskAttributesH\x00R\x1fsyncWorkflowStateTaskAttributes\x12\x87\x01\n" + + "\x1ftask_queue_user_data_attributes\x18\v \x01(\v2?.temporal.server.api.replication.v1.TaskQueueUserDataAttributesH\x00R\x1btaskQueueUserDataAttributes\x12g\n" + + "\x13sync_hsm_attributes\x18\x0e \x01(\v25.temporal.server.api.replication.v1.SyncHSMAttributesH\x00R\x11syncHsmAttributes\x12\x8c\x01\n" + + " backfill_history_task_attributes\x18\x10 \x01(\v2A.temporal.server.api.replication.v1.BackfillHistoryTaskAttributesH\x00R\x1dbackfillHistoryTaskAttributes\x12\xab\x01\n" + + "+verify_versioned_transition_task_attributes\x18\x12 \x01(\v2K.temporal.server.api.replication.v1.VerifyVersionedTransitionTaskAttributesH\x00R'verifyVersionedTransitionTaskAttributes\x12\xa5\x01\n" + + ")sync_versioned_transition_task_attributes\x18\x13 \x01(\v2I.temporal.server.api.replication.v1.SyncVersionedTransitionTaskAttributesH\x00R%syncVersionedTransitionTaskAttributes\x124\n" + + "\x04data\x18\f \x01(\v2 .temporal.api.common.v1.DataBlobR\x04data\x12C\n" + + "\x0fvisibility_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\x0evisibilityTime\x12F\n" + + "\bpriority\x18\r \x01(\x0e2*.temporal.server.api.enums.v1.TaskPriorityR\bpriority\x12j\n" + + "\x14versioned_transition\x18\x0f \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransition\x12[\n" + + "\rraw_task_info\x18\x11 \x01(\v27.temporal.server.api.persistence.v1.ReplicationTaskInfoR\vrawTaskInfoB\f\n" + + "\n" + + "attributesJ\x04\b\x04\x10\x05J\x04\b\a\x10\b\"\x84\x02\n" + + "\x10ReplicationToken\x12\x19\n" + + "\bshard_id\x18\x01 \x01(\x05R\ashardId\x129\n" + + "\x19last_retrieved_message_id\x18\x02 \x01(\x03R\x16lastRetrievedMessageId\x129\n" + + "\x19last_processed_message_id\x18\x03 \x01(\x03R\x16lastProcessedMessageId\x12_\n" + + "\x1elast_processed_visibility_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x1blastProcessedVisibilityTime\"N\n" + + "\x0fSyncShardStatus\x12;\n" + + "\vstatus_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "statusTime\"\xf5\x02\n" + + "\x14SyncReplicationState\x126\n" + + "\x17inclusive_low_watermark\x18\x01 \x01(\x03R\x15inclusiveLowWatermark\x12[\n" + + "\x1cinclusive_low_watermark_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x19inclusiveLowWatermarkTime\x12d\n" + + "\x13high_priority_state\x18\x03 \x01(\v24.temporal.server.api.replication.v1.ReplicationStateR\x11highPriorityState\x12b\n" + + "\x12low_priority_state\x18\x04 \x01(\v24.temporal.server.api.replication.v1.ReplicationStateR\x10lowPriorityState\"\x96\x02\n" + + "\x10ReplicationState\x126\n" + + "\x17inclusive_low_watermark\x18\x01 \x01(\x03R\x15inclusiveLowWatermark\x12[\n" + + "\x1cinclusive_low_watermark_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x19inclusiveLowWatermarkTime\x12m\n" + + "\x14flow_control_command\x18\x03 \x01(\x0e2;.temporal.server.api.enums.v1.ReplicationFlowControlCommandR\x12flowControlCommand\"\xae\x02\n" + + "\x13ReplicationMessages\x12`\n" + + "\x11replication_tasks\x18\x01 \x03(\v23.temporal.server.api.replication.v1.ReplicationTaskR\x10replicationTasks\x129\n" + + "\x19last_retrieved_message_id\x18\x02 \x01(\x03R\x16lastRetrievedMessageId\x12\x19\n" + + "\bhas_more\x18\x03 \x01(\bR\ahasMore\x12_\n" + + "\x11sync_shard_status\x18\x04 \x01(\v23.temporal.server.api.replication.v1.SyncShardStatusR\x0fsyncShardStatus\"\xe0\x02\n" + + "\x1bWorkflowReplicationMessages\x12`\n" + + "\x11replication_tasks\x18\x01 \x03(\v23.temporal.server.api.replication.v1.ReplicationTaskR\x10replicationTasks\x128\n" + + "\x18exclusive_high_watermark\x18\x02 \x01(\x03R\x16exclusiveHighWatermark\x12]\n" + + "\x1dexclusive_high_watermark_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x1aexclusiveHighWatermarkTime\x12F\n" + + "\bpriority\x18\x04 \x01(\x0e2*.temporal.server.api.enums.v1.TaskPriorityR\bpriority\"\xa8\x03\n" + + "\x13ReplicationTaskInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12C\n" + + "\ttask_type\x18\x04 \x01(\x0e2&.temporal.server.api.enums.v1.TaskTypeR\btaskType\x12\x17\n" + + "\atask_id\x18\x05 \x01(\x03R\x06taskId\x12\x18\n" + + "\aversion\x18\x06 \x01(\x03R\aversion\x12$\n" + + "\x0efirst_event_id\x18\a \x01(\x03R\ffirstEventId\x12\"\n" + + "\rnext_event_id\x18\b \x01(\x03R\vnextEventId\x12,\n" + + "\x12scheduled_event_id\x18\t \x01(\x03R\x10scheduledEventId\x12F\n" + + "\bpriority\x18\n" + + " \x01(\x0e2*.temporal.server.api.enums.v1.TaskPriorityR\bpriority\"\xa0\x04\n" + + "\x17NamespaceTaskAttributes\x12a\n" + + "\x13namespace_operation\x18\x01 \x01(\x0e20.temporal.server.api.enums.v1.NamespaceOperationR\x12namespaceOperation\x12\x0e\n" + + "\x02id\x18\x02 \x01(\tR\x02id\x12<\n" + + "\x04info\x18\x03 \x01(\v2(.temporal.api.namespace.v1.NamespaceInfoR\x04info\x12B\n" + + "\x06config\x18\x04 \x01(\v2*.temporal.api.namespace.v1.NamespaceConfigR\x06config\x12f\n" + + "\x12replication_config\x18\x05 \x01(\v27.temporal.api.replication.v1.NamespaceReplicationConfigR\x11replicationConfig\x12%\n" + + "\x0econfig_version\x18\x06 \x01(\x03R\rconfigVersion\x12)\n" + + "\x10failover_version\x18\a \x01(\x03R\x0ffailoverVersion\x12V\n" + + "\x10failover_history\x18\b \x03(\v2+.temporal.api.replication.v1.FailoverStatusR\x0ffailoverHistory\"\x9e\x01\n" + + "\x1dSyncShardStatusTaskAttributes\x12%\n" + + "\x0esource_cluster\x18\x01 \x01(\tR\rsourceCluster\x12\x19\n" + + "\bshard_id\x18\x02 \x01(\x05R\ashardId\x12;\n" + + "\vstatus_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "statusTime\"\xc5\v\n" + + "\x1aSyncActivityTaskAttributes\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12\x18\n" + + "\aversion\x18\x04 \x01(\x03R\aversion\x12,\n" + + "\x12scheduled_event_id\x18\x05 \x01(\x03R\x10scheduledEventId\x12A\n" + + "\x0escheduled_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12(\n" + + "\x10started_event_id\x18\a \x01(\x03R\x0estartedEventId\x12=\n" + + "\fstarted_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12J\n" + + "\x13last_heartbeat_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\x11lastHeartbeatTime\x12:\n" + + "\adetails\x18\n" + + " \x01(\v2 .temporal.api.common.v1.PayloadsR\adetails\x12\x18\n" + + "\aattempt\x18\v \x01(\x05R\aattempt\x12C\n" + + "\flast_failure\x18\f \x01(\v2 .temporal.api.failure.v1.FailureR\vlastFailure\x120\n" + + "\x14last_worker_identity\x18\r \x01(\tR\x12lastWorkerIdentity\x12W\n" + + "\x0fversion_history\x18\x0e \x01(\v2..temporal.server.api.history.v1.VersionHistoryR\x0eversionHistory\x12b\n" + + "\x13base_execution_info\x18\x0f \x01(\v22.temporal.server.api.workflow.v1.BaseExecutionInfoR\x11baseExecutionInfo\x121\n" + + "\x15last_started_build_id\x18\x10 \x01(\tR\x12lastStartedBuildId\x12A\n" + + "\x1dlast_started_redirect_counter\x18\x11 \x01(\x03R\x1alastStartedRedirectCounter\x12L\n" + + "\x14first_scheduled_time\x18\x12 \x01(\v2\x1a.google.protobuf.TimestampR\x12firstScheduledTime\x12W\n" + + "\x1alast_attempt_complete_time\x18\x13 \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12\x14\n" + + "\x05stamp\x18\x14 \x01(\x05R\x05stamp\x12\x16\n" + + "\x06paused\x18\x15 \x01(\bR\x06paused\x12O\n" + + "\x16retry_initial_interval\x18\x16 \x01(\v2\x19.google.protobuf.DurationR\x14retryInitialInterval\x12O\n" + + "\x16retry_maximum_interval\x18\x17 \x01(\v2\x19.google.protobuf.DurationR\x14retryMaximumInterval\x124\n" + + "\x16retry_maximum_attempts\x18\x18 \x01(\x05R\x14retryMaximumAttempts\x12:\n" + + "\x19retry_backoff_coefficient\x18\x19 \x01(\x01R\x17retryBackoffCoefficient\x12#\n" + + "\rstart_version\x18\x1a \x01(\x03R\fstartVersion\"\xad\x04\n" + + "\x15HistoryTaskAttributes\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x03 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x04 \x01(\tR\x05runId\x12f\n" + + "\x15version_history_items\x18\x05 \x03(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x13versionHistoryItems\x128\n" + + "\x06events\x18\x06 \x01(\v2 .temporal.api.common.v1.DataBlobR\x06events\x12F\n" + + "\x0enew_run_events\x18\a \x01(\v2 .temporal.api.common.v1.DataBlobR\fnewRunEvents\x12b\n" + + "\x13base_execution_info\x18\b \x01(\v22.temporal.server.api.workflow.v1.BaseExecutionInfoR\x11baseExecutionInfo\x12\x1c\n" + + "\n" + + "new_run_id\x18\t \x01(\tR\bnewRunId\x12G\n" + + "\x0eevents_batches\x18\n" + + " \x03(\v2 .temporal.api.common.v1.DataBlobR\reventsBatchesJ\x04\b\x01\x10\x02\"\xf4\x01\n" + + "\x1fSyncWorkflowStateTaskAttributes\x12_\n" + + "\x0eworkflow_state\x18\x01 \x01(\v28.temporal.server.api.persistence.v1.WorkflowMutableStateR\rworkflowState\x120\n" + + "\x14is_force_replication\x18\x02 \x01(\bR\x12isForceReplication\x12>\n" + + "\x1cis_close_transfer_task_acked\x18\x03 \x01(\bR\x18isCloseTransferTaskAcked\"\xbc\x01\n" + + "\x1bTaskQueueUserDataAttributes\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12&\n" + + "\x0ftask_queue_name\x18\x02 \x01(\tR\rtaskQueueName\x12R\n" + + "\tuser_data\x18\x03 \x01(\v25.temporal.server.api.persistence.v1.TaskQueueUserDataR\buserData\"\xab\x02\n" + + "\x11SyncHSMAttributes\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12W\n" + + "\x0fversion_history\x18\x04 \x01(\v2..temporal.server.api.history.v1.VersionHistoryR\x0eversionHistory\x12b\n" + + "\x12state_machine_node\x18\x05 \x01(\v24.temporal.server.api.persistence.v1.StateMachineNodeR\x10stateMachineNode\"\xfb\x02\n" + + "\x1dBackfillHistoryTaskAttributes\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12f\n" + + "\x15event_version_history\x18\x05 \x03(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x13eventVersionHistory\x12E\n" + + "\revent_batches\x18\x06 \x03(\v2 .temporal.api.common.v1.DataBlobR\feventBatches\x12P\n" + + "\fnew_run_info\x18\a \x01(\v2..temporal.server.api.replication.v1.NewRunInfoR\n" + + "newRunInfo\"f\n" + + "\n" + + "NewRunInfo\x12\x15\n" + + "\x06run_id\x18\x01 \x01(\tR\x05runId\x12A\n" + + "\vevent_batch\x18\x02 \x01(\v2 .temporal.api.common.v1.DataBlobR\n" + + "eventBatch\"\x99\x02\n" + + "#SyncWorkflowStateMutationAttributes\x12\x88\x01\n" + + "$exclusive_start_versioned_transition\x18\x01 \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR!exclusiveStartVersionedTransition\x12g\n" + + "\x0estate_mutation\x18\x02 \x01(\v2@.temporal.server.api.persistence.v1.WorkflowMutableStateMutationR\rstateMutation\"u\n" + + "#SyncWorkflowStateSnapshotAttributes\x12N\n" + + "\x05state\x18\x01 \x01(\v28.temporal.server.api.persistence.v1.WorkflowMutableStateR\x05state\"\xd1\x02\n" + + "'VerifyVersionedTransitionTaskAttributes\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12\"\n" + + "\rnext_event_id\x18\x04 \x01(\x03R\vnextEventId\x12f\n" + + "\x15event_version_history\x18\x05 \x03(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x13eventVersionHistory\x12\x1c\n" + + "\n" + + "new_run_id\x18\x06 \x01(\tR\bnewRunId\x12!\n" + + "\farchetype_id\x18\a \x01(\rR\varchetypeId\"\xc3\x02\n" + + "%SyncVersionedTransitionTaskAttributes\x12\x83\x01\n" + + "\x1dversioned_transition_artifact\x18\x05 \x01(\v2?.temporal.server.api.replication.v1.VersionedTransitionArtifactR\x1bversionedTransitionArtifact\x12!\n" + + "\fnamespace_id\x18\x06 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\a \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\b \x01(\tR\x05runId\x12!\n" + + "\farchetype_id\x18\t \x01(\rR\varchetypeIdJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03J\x04\b\x03\x10\x04J\x04\b\x04\x10\x05\"\xa4\x05\n" + + "\x1bVersionedTransitionArtifact\x12\x9f\x01\n" + + "'sync_workflow_state_mutation_attributes\x18\x01 \x01(\v2G.temporal.server.api.replication.v1.SyncWorkflowStateMutationAttributesH\x00R#syncWorkflowStateMutationAttributes\x12\x9f\x01\n" + + "'sync_workflow_state_snapshot_attributes\x18\x02 \x01(\v2G.temporal.server.api.replication.v1.SyncWorkflowStateSnapshotAttributesH\x00R#syncWorkflowStateSnapshotAttributes\x12E\n" + + "\revent_batches\x18\x03 \x03(\v2 .temporal.api.common.v1.DataBlobR\feventBatches\x12P\n" + + "\fnew_run_info\x18\x04 \x01(\v2..temporal.server.api.replication.v1.NewRunInfoR\n" + + "newRunInfo\x12\"\n" + + "\ris_first_sync\x18\x05 \x01(\bR\visFirstSync\x12>\n" + + "\x1cis_close_transfer_task_acked\x18\x06 \x01(\bR\x18isCloseTransferTaskAcked\x120\n" + + "\x14is_force_replication\x18\a \x01(\bR\x12isForceReplicationB\x12\n" + + "\x10state_attributes\"s\n" + + "\x16MigrationExecutionInfo\x12\x1f\n" + + "\vbusiness_id\x18\x01 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x02 \x01(\tR\x05runId\x12!\n" + + "\farchetype_id\x18\x03 \x01(\rR\varchetypeIdB5Z3go.temporal.io/server/api/replication/v1;repicationb\x06proto3" + +var ( + file_temporal_server_api_replication_v1_message_proto_rawDescOnce sync.Once + file_temporal_server_api_replication_v1_message_proto_rawDescData []byte +) + +func file_temporal_server_api_replication_v1_message_proto_rawDescGZIP() []byte { + file_temporal_server_api_replication_v1_message_proto_rawDescOnce.Do(func() { + file_temporal_server_api_replication_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_replication_v1_message_proto_rawDesc), len(file_temporal_server_api_replication_v1_message_proto_rawDesc))) + }) + return file_temporal_server_api_replication_v1_message_proto_rawDescData +} + +var file_temporal_server_api_replication_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 23) +var file_temporal_server_api_replication_v1_message_proto_goTypes = []any{ + (*ReplicationTask)(nil), // 0: temporal.server.api.replication.v1.ReplicationTask + (*ReplicationToken)(nil), // 1: temporal.server.api.replication.v1.ReplicationToken + (*SyncShardStatus)(nil), // 2: temporal.server.api.replication.v1.SyncShardStatus + (*SyncReplicationState)(nil), // 3: temporal.server.api.replication.v1.SyncReplicationState + (*ReplicationState)(nil), // 4: temporal.server.api.replication.v1.ReplicationState + (*ReplicationMessages)(nil), // 5: temporal.server.api.replication.v1.ReplicationMessages + (*WorkflowReplicationMessages)(nil), // 6: temporal.server.api.replication.v1.WorkflowReplicationMessages + (*ReplicationTaskInfo)(nil), // 7: temporal.server.api.replication.v1.ReplicationTaskInfo + (*NamespaceTaskAttributes)(nil), // 8: temporal.server.api.replication.v1.NamespaceTaskAttributes + (*SyncShardStatusTaskAttributes)(nil), // 9: temporal.server.api.replication.v1.SyncShardStatusTaskAttributes + (*SyncActivityTaskAttributes)(nil), // 10: temporal.server.api.replication.v1.SyncActivityTaskAttributes + (*HistoryTaskAttributes)(nil), // 11: temporal.server.api.replication.v1.HistoryTaskAttributes + (*SyncWorkflowStateTaskAttributes)(nil), // 12: temporal.server.api.replication.v1.SyncWorkflowStateTaskAttributes + (*TaskQueueUserDataAttributes)(nil), // 13: temporal.server.api.replication.v1.TaskQueueUserDataAttributes + (*SyncHSMAttributes)(nil), // 14: temporal.server.api.replication.v1.SyncHSMAttributes + (*BackfillHistoryTaskAttributes)(nil), // 15: temporal.server.api.replication.v1.BackfillHistoryTaskAttributes + (*NewRunInfo)(nil), // 16: temporal.server.api.replication.v1.NewRunInfo + (*SyncWorkflowStateMutationAttributes)(nil), // 17: temporal.server.api.replication.v1.SyncWorkflowStateMutationAttributes + (*SyncWorkflowStateSnapshotAttributes)(nil), // 18: temporal.server.api.replication.v1.SyncWorkflowStateSnapshotAttributes + (*VerifyVersionedTransitionTaskAttributes)(nil), // 19: temporal.server.api.replication.v1.VerifyVersionedTransitionTaskAttributes + (*SyncVersionedTransitionTaskAttributes)(nil), // 20: temporal.server.api.replication.v1.SyncVersionedTransitionTaskAttributes + (*VersionedTransitionArtifact)(nil), // 21: temporal.server.api.replication.v1.VersionedTransitionArtifact + (*MigrationExecutionInfo)(nil), // 22: temporal.server.api.replication.v1.MigrationExecutionInfo + (v1.ReplicationTaskType)(0), // 23: temporal.server.api.enums.v1.ReplicationTaskType + (*v11.DataBlob)(nil), // 24: temporal.api.common.v1.DataBlob + (*timestamppb.Timestamp)(nil), // 25: google.protobuf.Timestamp + (v1.TaskPriority)(0), // 26: temporal.server.api.enums.v1.TaskPriority + (*v12.VersionedTransition)(nil), // 27: temporal.server.api.persistence.v1.VersionedTransition + (*v12.ReplicationTaskInfo)(nil), // 28: temporal.server.api.persistence.v1.ReplicationTaskInfo + (v1.ReplicationFlowControlCommand)(0), // 29: temporal.server.api.enums.v1.ReplicationFlowControlCommand + (v1.TaskType)(0), // 30: temporal.server.api.enums.v1.TaskType + (v1.NamespaceOperation)(0), // 31: temporal.server.api.enums.v1.NamespaceOperation + (*v13.NamespaceInfo)(nil), // 32: temporal.api.namespace.v1.NamespaceInfo + (*v13.NamespaceConfig)(nil), // 33: temporal.api.namespace.v1.NamespaceConfig + (*v14.NamespaceReplicationConfig)(nil), // 34: temporal.api.replication.v1.NamespaceReplicationConfig + (*v14.FailoverStatus)(nil), // 35: temporal.api.replication.v1.FailoverStatus + (*v11.Payloads)(nil), // 36: temporal.api.common.v1.Payloads + (*v15.Failure)(nil), // 37: temporal.api.failure.v1.Failure + (*v16.VersionHistory)(nil), // 38: temporal.server.api.history.v1.VersionHistory + (*v17.BaseExecutionInfo)(nil), // 39: temporal.server.api.workflow.v1.BaseExecutionInfo + (*durationpb.Duration)(nil), // 40: google.protobuf.Duration + (*v16.VersionHistoryItem)(nil), // 41: temporal.server.api.history.v1.VersionHistoryItem + (*v12.WorkflowMutableState)(nil), // 42: temporal.server.api.persistence.v1.WorkflowMutableState + (*v12.TaskQueueUserData)(nil), // 43: temporal.server.api.persistence.v1.TaskQueueUserData + (*v12.StateMachineNode)(nil), // 44: temporal.server.api.persistence.v1.StateMachineNode + (*v12.WorkflowMutableStateMutation)(nil), // 45: temporal.server.api.persistence.v1.WorkflowMutableStateMutation +} +var file_temporal_server_api_replication_v1_message_proto_depIdxs = []int32{ + 23, // 0: temporal.server.api.replication.v1.ReplicationTask.task_type:type_name -> temporal.server.api.enums.v1.ReplicationTaskType + 8, // 1: temporal.server.api.replication.v1.ReplicationTask.namespace_task_attributes:type_name -> temporal.server.api.replication.v1.NamespaceTaskAttributes + 9, // 2: temporal.server.api.replication.v1.ReplicationTask.sync_shard_status_task_attributes:type_name -> temporal.server.api.replication.v1.SyncShardStatusTaskAttributes + 10, // 3: temporal.server.api.replication.v1.ReplicationTask.sync_activity_task_attributes:type_name -> temporal.server.api.replication.v1.SyncActivityTaskAttributes + 11, // 4: temporal.server.api.replication.v1.ReplicationTask.history_task_attributes:type_name -> temporal.server.api.replication.v1.HistoryTaskAttributes + 12, // 5: temporal.server.api.replication.v1.ReplicationTask.sync_workflow_state_task_attributes:type_name -> temporal.server.api.replication.v1.SyncWorkflowStateTaskAttributes + 13, // 6: temporal.server.api.replication.v1.ReplicationTask.task_queue_user_data_attributes:type_name -> temporal.server.api.replication.v1.TaskQueueUserDataAttributes + 14, // 7: temporal.server.api.replication.v1.ReplicationTask.sync_hsm_attributes:type_name -> temporal.server.api.replication.v1.SyncHSMAttributes + 15, // 8: temporal.server.api.replication.v1.ReplicationTask.backfill_history_task_attributes:type_name -> temporal.server.api.replication.v1.BackfillHistoryTaskAttributes + 19, // 9: temporal.server.api.replication.v1.ReplicationTask.verify_versioned_transition_task_attributes:type_name -> temporal.server.api.replication.v1.VerifyVersionedTransitionTaskAttributes + 20, // 10: temporal.server.api.replication.v1.ReplicationTask.sync_versioned_transition_task_attributes:type_name -> temporal.server.api.replication.v1.SyncVersionedTransitionTaskAttributes + 24, // 11: temporal.server.api.replication.v1.ReplicationTask.data:type_name -> temporal.api.common.v1.DataBlob + 25, // 12: temporal.server.api.replication.v1.ReplicationTask.visibility_time:type_name -> google.protobuf.Timestamp + 26, // 13: temporal.server.api.replication.v1.ReplicationTask.priority:type_name -> temporal.server.api.enums.v1.TaskPriority + 27, // 14: temporal.server.api.replication.v1.ReplicationTask.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 28, // 15: temporal.server.api.replication.v1.ReplicationTask.raw_task_info:type_name -> temporal.server.api.persistence.v1.ReplicationTaskInfo + 25, // 16: temporal.server.api.replication.v1.ReplicationToken.last_processed_visibility_time:type_name -> google.protobuf.Timestamp + 25, // 17: temporal.server.api.replication.v1.SyncShardStatus.status_time:type_name -> google.protobuf.Timestamp + 25, // 18: temporal.server.api.replication.v1.SyncReplicationState.inclusive_low_watermark_time:type_name -> google.protobuf.Timestamp + 4, // 19: temporal.server.api.replication.v1.SyncReplicationState.high_priority_state:type_name -> temporal.server.api.replication.v1.ReplicationState + 4, // 20: temporal.server.api.replication.v1.SyncReplicationState.low_priority_state:type_name -> temporal.server.api.replication.v1.ReplicationState + 25, // 21: temporal.server.api.replication.v1.ReplicationState.inclusive_low_watermark_time:type_name -> google.protobuf.Timestamp + 29, // 22: temporal.server.api.replication.v1.ReplicationState.flow_control_command:type_name -> temporal.server.api.enums.v1.ReplicationFlowControlCommand + 0, // 23: temporal.server.api.replication.v1.ReplicationMessages.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask + 2, // 24: temporal.server.api.replication.v1.ReplicationMessages.sync_shard_status:type_name -> temporal.server.api.replication.v1.SyncShardStatus + 0, // 25: temporal.server.api.replication.v1.WorkflowReplicationMessages.replication_tasks:type_name -> temporal.server.api.replication.v1.ReplicationTask + 25, // 26: temporal.server.api.replication.v1.WorkflowReplicationMessages.exclusive_high_watermark_time:type_name -> google.protobuf.Timestamp + 26, // 27: temporal.server.api.replication.v1.WorkflowReplicationMessages.priority:type_name -> temporal.server.api.enums.v1.TaskPriority + 30, // 28: temporal.server.api.replication.v1.ReplicationTaskInfo.task_type:type_name -> temporal.server.api.enums.v1.TaskType + 26, // 29: temporal.server.api.replication.v1.ReplicationTaskInfo.priority:type_name -> temporal.server.api.enums.v1.TaskPriority + 31, // 30: temporal.server.api.replication.v1.NamespaceTaskAttributes.namespace_operation:type_name -> temporal.server.api.enums.v1.NamespaceOperation + 32, // 31: temporal.server.api.replication.v1.NamespaceTaskAttributes.info:type_name -> temporal.api.namespace.v1.NamespaceInfo + 33, // 32: temporal.server.api.replication.v1.NamespaceTaskAttributes.config:type_name -> temporal.api.namespace.v1.NamespaceConfig + 34, // 33: temporal.server.api.replication.v1.NamespaceTaskAttributes.replication_config:type_name -> temporal.api.replication.v1.NamespaceReplicationConfig + 35, // 34: temporal.server.api.replication.v1.NamespaceTaskAttributes.failover_history:type_name -> temporal.api.replication.v1.FailoverStatus + 25, // 35: temporal.server.api.replication.v1.SyncShardStatusTaskAttributes.status_time:type_name -> google.protobuf.Timestamp + 25, // 36: temporal.server.api.replication.v1.SyncActivityTaskAttributes.scheduled_time:type_name -> google.protobuf.Timestamp + 25, // 37: temporal.server.api.replication.v1.SyncActivityTaskAttributes.started_time:type_name -> google.protobuf.Timestamp + 25, // 38: temporal.server.api.replication.v1.SyncActivityTaskAttributes.last_heartbeat_time:type_name -> google.protobuf.Timestamp + 36, // 39: temporal.server.api.replication.v1.SyncActivityTaskAttributes.details:type_name -> temporal.api.common.v1.Payloads + 37, // 40: temporal.server.api.replication.v1.SyncActivityTaskAttributes.last_failure:type_name -> temporal.api.failure.v1.Failure + 38, // 41: temporal.server.api.replication.v1.SyncActivityTaskAttributes.version_history:type_name -> temporal.server.api.history.v1.VersionHistory + 39, // 42: temporal.server.api.replication.v1.SyncActivityTaskAttributes.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo + 25, // 43: temporal.server.api.replication.v1.SyncActivityTaskAttributes.first_scheduled_time:type_name -> google.protobuf.Timestamp + 25, // 44: temporal.server.api.replication.v1.SyncActivityTaskAttributes.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 40, // 45: temporal.server.api.replication.v1.SyncActivityTaskAttributes.retry_initial_interval:type_name -> google.protobuf.Duration + 40, // 46: temporal.server.api.replication.v1.SyncActivityTaskAttributes.retry_maximum_interval:type_name -> google.protobuf.Duration + 41, // 47: temporal.server.api.replication.v1.HistoryTaskAttributes.version_history_items:type_name -> temporal.server.api.history.v1.VersionHistoryItem + 24, // 48: temporal.server.api.replication.v1.HistoryTaskAttributes.events:type_name -> temporal.api.common.v1.DataBlob + 24, // 49: temporal.server.api.replication.v1.HistoryTaskAttributes.new_run_events:type_name -> temporal.api.common.v1.DataBlob + 39, // 50: temporal.server.api.replication.v1.HistoryTaskAttributes.base_execution_info:type_name -> temporal.server.api.workflow.v1.BaseExecutionInfo + 24, // 51: temporal.server.api.replication.v1.HistoryTaskAttributes.events_batches:type_name -> temporal.api.common.v1.DataBlob + 42, // 52: temporal.server.api.replication.v1.SyncWorkflowStateTaskAttributes.workflow_state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState + 43, // 53: temporal.server.api.replication.v1.TaskQueueUserDataAttributes.user_data:type_name -> temporal.server.api.persistence.v1.TaskQueueUserData + 38, // 54: temporal.server.api.replication.v1.SyncHSMAttributes.version_history:type_name -> temporal.server.api.history.v1.VersionHistory + 44, // 55: temporal.server.api.replication.v1.SyncHSMAttributes.state_machine_node:type_name -> temporal.server.api.persistence.v1.StateMachineNode + 41, // 56: temporal.server.api.replication.v1.BackfillHistoryTaskAttributes.event_version_history:type_name -> temporal.server.api.history.v1.VersionHistoryItem + 24, // 57: temporal.server.api.replication.v1.BackfillHistoryTaskAttributes.event_batches:type_name -> temporal.api.common.v1.DataBlob + 16, // 58: temporal.server.api.replication.v1.BackfillHistoryTaskAttributes.new_run_info:type_name -> temporal.server.api.replication.v1.NewRunInfo + 24, // 59: temporal.server.api.replication.v1.NewRunInfo.event_batch:type_name -> temporal.api.common.v1.DataBlob + 27, // 60: temporal.server.api.replication.v1.SyncWorkflowStateMutationAttributes.exclusive_start_versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 45, // 61: temporal.server.api.replication.v1.SyncWorkflowStateMutationAttributes.state_mutation:type_name -> temporal.server.api.persistence.v1.WorkflowMutableStateMutation + 42, // 62: temporal.server.api.replication.v1.SyncWorkflowStateSnapshotAttributes.state:type_name -> temporal.server.api.persistence.v1.WorkflowMutableState + 41, // 63: temporal.server.api.replication.v1.VerifyVersionedTransitionTaskAttributes.event_version_history:type_name -> temporal.server.api.history.v1.VersionHistoryItem + 21, // 64: temporal.server.api.replication.v1.SyncVersionedTransitionTaskAttributes.versioned_transition_artifact:type_name -> temporal.server.api.replication.v1.VersionedTransitionArtifact + 17, // 65: temporal.server.api.replication.v1.VersionedTransitionArtifact.sync_workflow_state_mutation_attributes:type_name -> temporal.server.api.replication.v1.SyncWorkflowStateMutationAttributes + 18, // 66: temporal.server.api.replication.v1.VersionedTransitionArtifact.sync_workflow_state_snapshot_attributes:type_name -> temporal.server.api.replication.v1.SyncWorkflowStateSnapshotAttributes + 24, // 67: temporal.server.api.replication.v1.VersionedTransitionArtifact.event_batches:type_name -> temporal.api.common.v1.DataBlob + 16, // 68: temporal.server.api.replication.v1.VersionedTransitionArtifact.new_run_info:type_name -> temporal.server.api.replication.v1.NewRunInfo + 69, // [69:69] is the sub-list for method output_type + 69, // [69:69] is the sub-list for method input_type + 69, // [69:69] is the sub-list for extension type_name + 69, // [69:69] is the sub-list for extension extendee + 0, // [0:69] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_replication_v1_message_proto_init() } +func file_temporal_server_api_replication_v1_message_proto_init() { + if File_temporal_server_api_replication_v1_message_proto != nil { + return + } + file_temporal_server_api_replication_v1_message_proto_msgTypes[0].OneofWrappers = []any{ + (*ReplicationTask_NamespaceTaskAttributes)(nil), + (*ReplicationTask_SyncShardStatusTaskAttributes)(nil), + (*ReplicationTask_SyncActivityTaskAttributes)(nil), + (*ReplicationTask_HistoryTaskAttributes)(nil), + (*ReplicationTask_SyncWorkflowStateTaskAttributes)(nil), + (*ReplicationTask_TaskQueueUserDataAttributes)(nil), + (*ReplicationTask_SyncHsmAttributes)(nil), + (*ReplicationTask_BackfillHistoryTaskAttributes)(nil), + (*ReplicationTask_VerifyVersionedTransitionTaskAttributes)(nil), + (*ReplicationTask_SyncVersionedTransitionTaskAttributes)(nil), + } + file_temporal_server_api_replication_v1_message_proto_msgTypes[21].OneofWrappers = []any{ + (*VersionedTransitionArtifact_SyncWorkflowStateMutationAttributes)(nil), + (*VersionedTransitionArtifact_SyncWorkflowStateSnapshotAttributes)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_replication_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_replication_v1_message_proto_rawDesc), len(file_temporal_server_api_replication_v1_message_proto_rawDesc)), NumEnums: 0, - NumMessages: 13, + NumMessages: 23, NumExtensions: 0, NumServices: 0, }, @@ -1813,7 +2476,6 @@ func file_temporal_server_api_replication_v1_message_proto_init() { MessageInfos: file_temporal_server_api_replication_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_replication_v1_message_proto = out.File - file_temporal_server_api_replication_v1_message_proto_rawDesc = nil file_temporal_server_api_replication_v1_message_proto_goTypes = nil file_temporal_server_api_replication_v1_message_proto_depIdxs = nil } diff --git a/api/routing/v1/extension.go-helpers.pb.go b/api/routing/v1/extension.go-helpers.pb.go new file mode 100644 index 00000000000..d15af590420 --- /dev/null +++ b/api/routing/v1/extension.go-helpers.pb.go @@ -0,0 +1,43 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package routing + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type RoutingOptions to the protobuf v3 wire format +func (val *RoutingOptions) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RoutingOptions from the protobuf v3 wire format +func (val *RoutingOptions) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RoutingOptions) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RoutingOptions values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RoutingOptions) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RoutingOptions + switch t := that.(type) { + case *RoutingOptions: + that1 = t + case RoutingOptions: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/routing/v1/extension.pb.go b/api/routing/v1/extension.pb.go new file mode 100644 index 00000000000..0b8e977e2c1 --- /dev/null +++ b/api/routing/v1/extension.pb.go @@ -0,0 +1,168 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/routing/v1/extension.proto + +package routing + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RoutingOptions struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Requests will be routed to a random shard. + Random bool `protobuf:"varint,1,opt,name=random,proto3" json:"random,omitempty"` + // Requests may specify how to obtain the namespace ID. Defaults to the "namespace_id" field. + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // Request will be routed by resolving the namespace ID and business ID to a given shard. + BusinessId string `protobuf:"bytes,3,opt,name=business_id,json=businessId,proto3" json:"business_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RoutingOptions) Reset() { + *x = RoutingOptions{} + mi := &file_temporal_server_api_routing_v1_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RoutingOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RoutingOptions) ProtoMessage() {} + +func (x *RoutingOptions) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_routing_v1_extension_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RoutingOptions.ProtoReflect.Descriptor instead. +func (*RoutingOptions) Descriptor() ([]byte, []int) { + return file_temporal_server_api_routing_v1_extension_proto_rawDescGZIP(), []int{0} +} + +func (x *RoutingOptions) GetRandom() bool { + if x != nil { + return x.Random + } + return false +} + +func (x *RoutingOptions) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *RoutingOptions) GetBusinessId() string { + if x != nil { + return x.BusinessId + } + return "" +} + +var file_temporal_server_api_routing_v1_extension_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*RoutingOptions)(nil), + Field: 50234, + Name: "temporal.server.api.routing.v1.routing", + Tag: "bytes,50234,opt,name=routing", + Filename: "temporal/server/api/routing/v1/extension.proto", + }, +} + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional temporal.server.api.routing.v1.RoutingOptions routing = 50234; + E_Routing = &file_temporal_server_api_routing_v1_extension_proto_extTypes[0] +) + +var File_temporal_server_api_routing_v1_extension_proto protoreflect.FileDescriptor + +const file_temporal_server_api_routing_v1_extension_proto_rawDesc = "" + + "\n" + + ".temporal/server/api/routing/v1/extension.proto\x12\x1etemporal.server.api.routing.v1\x1a google/protobuf/descriptor.proto\"l\n" + + "\x0eRoutingOptions\x12\x16\n" + + "\x06random\x18\x01 \x01(\bR\x06random\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vbusiness_id\x18\x03 \x01(\tR\n" + + "businessId:m\n" + + "\arouting\x12\x1e.google.protobuf.MethodOptions\x18\xba\x88\x03 \x01(\v2..temporal.server.api.routing.v1.RoutingOptionsR\arouting\x88\x01\x01B.Z,go.temporal.io/server/api/routing/v1;routingb\x06proto3" + +var ( + file_temporal_server_api_routing_v1_extension_proto_rawDescOnce sync.Once + file_temporal_server_api_routing_v1_extension_proto_rawDescData []byte +) + +func file_temporal_server_api_routing_v1_extension_proto_rawDescGZIP() []byte { + file_temporal_server_api_routing_v1_extension_proto_rawDescOnce.Do(func() { + file_temporal_server_api_routing_v1_extension_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_routing_v1_extension_proto_rawDesc), len(file_temporal_server_api_routing_v1_extension_proto_rawDesc))) + }) + return file_temporal_server_api_routing_v1_extension_proto_rawDescData +} + +var file_temporal_server_api_routing_v1_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_temporal_server_api_routing_v1_extension_proto_goTypes = []any{ + (*RoutingOptions)(nil), // 0: temporal.server.api.routing.v1.RoutingOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions +} +var file_temporal_server_api_routing_v1_extension_proto_depIdxs = []int32{ + 1, // 0: temporal.server.api.routing.v1.routing:extendee -> google.protobuf.MethodOptions + 0, // 1: temporal.server.api.routing.v1.routing:type_name -> temporal.server.api.routing.v1.RoutingOptions + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 1, // [1:2] is the sub-list for extension type_name + 0, // [0:1] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_routing_v1_extension_proto_init() } +func file_temporal_server_api_routing_v1_extension_proto_init() { + if File_temporal_server_api_routing_v1_extension_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_routing_v1_extension_proto_rawDesc), len(file_temporal_server_api_routing_v1_extension_proto_rawDesc)), + NumEnums: 0, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_routing_v1_extension_proto_goTypes, + DependencyIndexes: file_temporal_server_api_routing_v1_extension_proto_depIdxs, + MessageInfos: file_temporal_server_api_routing_v1_extension_proto_msgTypes, + ExtensionInfos: file_temporal_server_api_routing_v1_extension_proto_extTypes, + }.Build() + File_temporal_server_api_routing_v1_extension_proto = out.File + file_temporal_server_api_routing_v1_extension_proto_goTypes = nil + file_temporal_server_api_routing_v1_extension_proto_depIdxs = nil +} diff --git a/api/schedule/v1/message.go-helpers.pb.go b/api/schedule/v1/message.go-helpers.pb.go index 0dfd27ffc98..6f01b7b49a6 100644 --- a/api/schedule/v1/message.go-helpers.pb.go +++ b/api/schedule/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package schedule @@ -66,6 +42,43 @@ func (this *BufferedStart) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type CompletedResult to the protobuf v3 wire format +func (val *CompletedResult) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CompletedResult from the protobuf v3 wire format +func (val *CompletedResult) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CompletedResult) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CompletedResult values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CompletedResult) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CompletedResult + switch t := that.(type) { + case *CompletedResult: + that1 = t + case CompletedResult: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type InternalState to the protobuf v3 wire format func (val *InternalState) Marshal() ([]byte, error) { return proto.Marshal(val) diff --git a/api/schedule/v1/message.pb.go b/api/schedule/v1/message.pb.go index 5161d25b61c..513c6c1d61d 100644 --- a/api/schedule/v1/message.pb.go +++ b/api/schedule/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package schedule import ( reflect "reflect" sync "sync" + unsafe "unsafe" v12 "go.temporal.io/api/common/v1" v1 "go.temporal.io/api/enums/v1" @@ -50,26 +29,56 @@ const ( ) type BufferedStart struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Nominal (pre-jitter) and Actual (post-jitter) time of action NominalTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=nominal_time,json=nominalTime,proto3" json:"nominal_time,omitempty"` ActualTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=actual_time,json=actualTime,proto3" json:"actual_time,omitempty"` + // Desired time is usually nil, which should be interpreted as == actual time, but for starts + // that are blocked behind another action, it is set to the close time of the previous action + // for more meaningful metrics. + DesiredTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=desired_time,json=desiredTime,proto3" json:"desired_time,omitempty"` // Overridden overlap policy OverlapPolicy v1.ScheduleOverlapPolicy `protobuf:"varint,3,opt,name=overlap_policy,json=overlapPolicy,proto3,enum=temporal.api.enums.v1.ScheduleOverlapPolicy" json:"overlap_policy,omitempty"` // Trigger-immediately or backfill Manual bool `protobuf:"varint,4,opt,name=manual,proto3" json:"manual,omitempty"` + // An ID generated when the action is buffered for deduplication during + // execution. Only used by the CHASM scheduler (otherwise left empty). + RequestId string `protobuf:"bytes,6,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Initially 0. Once a BufferedStart is ready to execute (overlap policies + // are resolved), its attempt count is set to 1. If a BufferedStart fails + // execution, its attempt count here is incremented. Only used by the CHASM + // scheduler (otherwise left empty). + Attempt int64 `protobuf:"varint,7,opt,name=attempt,proto3" json:"attempt,omitempty"` + // If a BufferedStart is rate limited or needs to backoff while retrying, + // this time will be set, and the start will be held in the buffer until + // the backoff time has passed. Only used by the CHASM scheduler (otherwise + // ignored). + BackoffTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=backoff_time,json=backoffTime,proto3" json:"backoff_time,omitempty"` + // The precomputed workflow ID that should be used (as-is) when executing + // this start. Only used by the CHASM scheduler (otherwise ignored). + WorkflowId string `protobuf:"bytes,9,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + // The run ID of the started workflow. Populated when the workflow is + // successfully started. Only used by the CHASM scheduler. + RunId string `protobuf:"bytes,10,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + // The actual time the workflow was started. Populated when the workflow is + // successfully started. Only used by the CHASM scheduler. + StartTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // Populated when the workflow execution completes. Presence indicates the + // action is complete and retained for history. Only used by the CHASM scheduler. + Completed *CompletedResult `protobuf:"bytes,12,opt,name=completed,proto3" json:"completed,omitempty"` + // True when a running BufferedStart is known to have a Nexus callback + // attached. False when a BufferedStart originated from a migrated V1 + // workflow. Only used by CHASM scheduler, for migration from V1. + HasCallback bool `protobuf:"varint,13,opt,name=has_callback,json=hasCallback,proto3" json:"has_callback,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BufferedStart) Reset() { *x = BufferedStart{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BufferedStart) String() string { @@ -80,7 +89,7 @@ func (*BufferedStart) ProtoMessage() {} func (x *BufferedStart) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -109,6 +118,13 @@ func (x *BufferedStart) GetActualTime() *timestamppb.Timestamp { return nil } +func (x *BufferedStart) GetDesiredTime() *timestamppb.Timestamp { + if x != nil { + return x.DesiredTime + } + return nil +} + func (x *BufferedStart) GetOverlapPolicy() v1.ScheduleOverlapPolicy { if x != nil { return x.OverlapPolicy @@ -123,11 +139,120 @@ func (x *BufferedStart) GetManual() bool { return false } -type InternalState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +func (x *BufferedStart) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *BufferedStart) GetAttempt() int64 { + if x != nil { + return x.Attempt + } + return 0 +} + +func (x *BufferedStart) GetBackoffTime() *timestamppb.Timestamp { + if x != nil { + return x.BackoffTime + } + return nil +} + +func (x *BufferedStart) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *BufferedStart) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *BufferedStart) GetStartTime() *timestamppb.Timestamp { + if x != nil { + return x.StartTime + } + return nil +} + +func (x *BufferedStart) GetCompleted() *CompletedResult { + if x != nil { + return x.Completed + } + return nil +} + +func (x *BufferedStart) GetHasCallback() bool { + if x != nil { + return x.HasCallback + } + return false +} + +// Result when a workflow execution has completed. +// Only used by the CHASM scheduler. +type CompletedResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The final status of the workflow execution. + Status v1.WorkflowExecutionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"status,omitempty"` + // The time the workflow closed. + CloseTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} +func (x *CompletedResult) Reset() { + *x = CompletedResult{} + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CompletedResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CompletedResult) ProtoMessage() {} + +func (x *CompletedResult) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CompletedResult.ProtoReflect.Descriptor instead. +func (*CompletedResult) Descriptor() ([]byte, []int) { + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{1} +} + +func (x *CompletedResult) GetStatus() v1.WorkflowExecutionStatus { + if x != nil { + return x.Status + } + return v1.WorkflowExecutionStatus(0) +} + +func (x *CompletedResult) GetCloseTime() *timestamppb.Timestamp { + if x != nil { + return x.CloseTime + } + return nil +} + +type InternalState struct { + state protoimpl.MessageState `protogen:"open.v1"` Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` ScheduleId string `protobuf:"bytes,8,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` @@ -138,17 +263,18 @@ type InternalState struct { LastCompletionResult *v12.Payloads `protobuf:"bytes,5,opt,name=last_completion_result,json=lastCompletionResult,proto3" json:"last_completion_result,omitempty"` ContinuedFailure *v13.Failure `protobuf:"bytes,6,opt,name=continued_failure,json=continuedFailure,proto3" json:"continued_failure,omitempty"` // conflict token is implemented as simple sequence number - ConflictToken int64 `protobuf:"varint,7,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` - NeedRefresh bool `protobuf:"varint,9,opt,name=need_refresh,json=needRefresh,proto3" json:"need_refresh,omitempty"` + ConflictToken int64 `protobuf:"varint,7,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + NeedRefresh bool `protobuf:"varint,9,opt,name=need_refresh,json=needRefresh,proto3" json:"need_refresh,omitempty"` + PendingMigration bool `protobuf:"varint,11,opt,name=pending_migration,json=pendingMigration,proto3" json:"pending_migration,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *InternalState) Reset() { *x = InternalState{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InternalState) String() string { @@ -158,8 +284,8 @@ func (x *InternalState) String() string { func (*InternalState) ProtoMessage() {} func (x *InternalState) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -171,7 +297,7 @@ func (x *InternalState) ProtoReflect() protoreflect.Message { // Deprecated: Use InternalState.ProtoReflect.Descriptor instead. func (*InternalState) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{1} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{2} } func (x *InternalState) GetNamespace() string { @@ -244,24 +370,28 @@ func (x *InternalState) GetNeedRefresh() bool { return false } +func (x *InternalState) GetPendingMigration() bool { + if x != nil { + return x.PendingMigration + } + return false +} + type StartScheduleArgs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Schedule *v11.Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule,omitempty"` + Info *v11.ScheduleInfo `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + InitialPatch *v11.SchedulePatch `protobuf:"bytes,3,opt,name=initial_patch,json=initialPatch,proto3" json:"initial_patch,omitempty"` + State *InternalState `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` unknownFields protoimpl.UnknownFields - - Schedule *v11.Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule,omitempty"` - Info *v11.ScheduleInfo `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - InitialPatch *v11.SchedulePatch `protobuf:"bytes,3,opt,name=initial_patch,json=initialPatch,proto3" json:"initial_patch,omitempty"` - State *InternalState `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StartScheduleArgs) Reset() { *x = StartScheduleArgs{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartScheduleArgs) String() string { @@ -271,8 +401,8 @@ func (x *StartScheduleArgs) String() string { func (*StartScheduleArgs) ProtoMessage() {} func (x *StartScheduleArgs) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[3] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -284,7 +414,7 @@ func (x *StartScheduleArgs) ProtoReflect() protoreflect.Message { // Deprecated: Use StartScheduleArgs.ProtoReflect.Descriptor instead. func (*StartScheduleArgs) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{2} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{3} } func (x *StartScheduleArgs) GetSchedule() *v11.Schedule { @@ -316,21 +446,19 @@ func (x *StartScheduleArgs) GetState() *InternalState { } type FullUpdateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Schedule *v11.Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule,omitempty"` - ConflictToken int64 `protobuf:"varint,2,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Schedule *v11.Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule,omitempty"` + ConflictToken int64 `protobuf:"varint,2,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + SearchAttributes *v12.SearchAttributes `protobuf:"bytes,3,opt,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FullUpdateRequest) Reset() { *x = FullUpdateRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *FullUpdateRequest) String() string { @@ -340,8 +468,8 @@ func (x *FullUpdateRequest) String() string { func (*FullUpdateRequest) ProtoMessage() {} func (x *FullUpdateRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[4] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -353,7 +481,7 @@ func (x *FullUpdateRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FullUpdateRequest.ProtoReflect.Descriptor instead. func (*FullUpdateRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{3} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{4} } func (x *FullUpdateRequest) GetSchedule() *v11.Schedule { @@ -370,23 +498,27 @@ func (x *FullUpdateRequest) GetConflictToken() int64 { return 0 } +func (x *FullUpdateRequest) GetSearchAttributes() *v12.SearchAttributes { + if x != nil { + return x.SearchAttributes + } + return nil +} + type DescribeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Schedule *v11.Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule,omitempty"` + Info *v11.ScheduleInfo `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + ConflictToken int64 `protobuf:"varint,3,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` unknownFields protoimpl.UnknownFields - - Schedule *v11.Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule,omitempty"` - Info *v11.ScheduleInfo `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - ConflictToken int64 `protobuf:"varint,3,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DescribeResponse) Reset() { *x = DescribeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DescribeResponse) String() string { @@ -396,8 +528,8 @@ func (x *DescribeResponse) String() string { func (*DescribeResponse) ProtoMessage() {} func (x *DescribeResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[5] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -409,7 +541,7 @@ func (x *DescribeResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DescribeResponse.ProtoReflect.Descriptor instead. func (*DescribeResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{4} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{5} } func (x *DescribeResponse) GetSchedule() *v11.Schedule { @@ -434,24 +566,21 @@ func (x *DescribeResponse) GetConflictToken() int64 { } type WatchWorkflowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Note: this will be sent to the activity with empty execution.run_id, and // the run id that we started in first_execution_run_id. Execution *v12.WorkflowExecution `protobuf:"bytes,3,opt,name=execution,proto3" json:"execution,omitempty"` FirstExecutionRunId string `protobuf:"bytes,4,opt,name=first_execution_run_id,json=firstExecutionRunId,proto3" json:"first_execution_run_id,omitempty"` LongPoll bool `protobuf:"varint,5,opt,name=long_poll,json=longPoll,proto3" json:"long_poll,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *WatchWorkflowRequest) Reset() { *x = WatchWorkflowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *WatchWorkflowRequest) String() string { @@ -461,8 +590,8 @@ func (x *WatchWorkflowRequest) String() string { func (*WatchWorkflowRequest) ProtoMessage() {} func (x *WatchWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[6] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -474,7 +603,7 @@ func (x *WatchWorkflowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WatchWorkflowRequest.ProtoReflect.Descriptor instead. func (*WatchWorkflowRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{5} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{6} } func (x *WatchWorkflowRequest) GetExecution() *v12.WorkflowExecution { @@ -499,25 +628,24 @@ func (x *WatchWorkflowRequest) GetLongPoll() bool { } type WatchWorkflowResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Status v1.WorkflowExecutionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=temporal.api.enums.v1.WorkflowExecutionStatus" json:"status,omitempty"` - // Types that are assignable to ResultFailure: + // Types that are valid to be assigned to ResultFailure: // // *WatchWorkflowResponse_Result // *WatchWorkflowResponse_Failure ResultFailure isWatchWorkflowResponse_ResultFailure `protobuf_oneof:"result_failure"` + // Timestamp of close event + CloseTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=close_time,json=closeTime,proto3" json:"close_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *WatchWorkflowResponse) Reset() { *x = WatchWorkflowResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *WatchWorkflowResponse) String() string { @@ -527,8 +655,8 @@ func (x *WatchWorkflowResponse) String() string { func (*WatchWorkflowResponse) ProtoMessage() {} func (x *WatchWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[7] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -540,7 +668,7 @@ func (x *WatchWorkflowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WatchWorkflowResponse.ProtoReflect.Descriptor instead. func (*WatchWorkflowResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{6} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{7} } func (x *WatchWorkflowResponse) GetStatus() v1.WorkflowExecutionStatus { @@ -550,23 +678,34 @@ func (x *WatchWorkflowResponse) GetStatus() v1.WorkflowExecutionStatus { return v1.WorkflowExecutionStatus(0) } -func (m *WatchWorkflowResponse) GetResultFailure() isWatchWorkflowResponse_ResultFailure { - if m != nil { - return m.ResultFailure +func (x *WatchWorkflowResponse) GetResultFailure() isWatchWorkflowResponse_ResultFailure { + if x != nil { + return x.ResultFailure } return nil } func (x *WatchWorkflowResponse) GetResult() *v12.Payloads { - if x, ok := x.GetResultFailure().(*WatchWorkflowResponse_Result); ok { - return x.Result + if x != nil { + if x, ok := x.ResultFailure.(*WatchWorkflowResponse_Result); ok { + return x.Result + } } return nil } func (x *WatchWorkflowResponse) GetFailure() *v13.Failure { - if x, ok := x.GetResultFailure().(*WatchWorkflowResponse_Failure); ok { - return x.Failure + if x != nil { + if x, ok := x.ResultFailure.(*WatchWorkflowResponse_Failure); ok { + return x.Failure + } + } + return nil +} + +func (x *WatchWorkflowResponse) GetCloseTime() *timestamppb.Timestamp { + if x != nil { + return x.CloseTime } return nil } @@ -588,21 +727,18 @@ func (*WatchWorkflowResponse_Result) isWatchWorkflowResponse_ResultFailure() {} func (*WatchWorkflowResponse_Failure) isWatchWorkflowResponse_ResultFailure() {} type StartWorkflowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Request *v14.StartWorkflowExecutionRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` CompletedRateLimitSleep bool `protobuf:"varint,6,opt,name=completed_rate_limit_sleep,json=completedRateLimitSleep,proto3" json:"completed_rate_limit_sleep,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StartWorkflowRequest) Reset() { *x = StartWorkflowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartWorkflowRequest) String() string { @@ -612,8 +748,8 @@ func (x *StartWorkflowRequest) String() string { func (*StartWorkflowRequest) ProtoMessage() {} func (x *StartWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[8] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -625,7 +761,7 @@ func (x *StartWorkflowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartWorkflowRequest.ProtoReflect.Descriptor instead. func (*StartWorkflowRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{7} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{8} } func (x *StartWorkflowRequest) GetRequest() *v14.StartWorkflowExecutionRequest { @@ -643,21 +779,18 @@ func (x *StartWorkflowRequest) GetCompletedRateLimitSleep() bool { } type StartWorkflowResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` RealStartTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=real_start_time,json=realStartTime,proto3" json:"real_start_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StartWorkflowResponse) Reset() { *x = StartWorkflowResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StartWorkflowResponse) String() string { @@ -667,8 +800,8 @@ func (x *StartWorkflowResponse) String() string { func (*StartWorkflowResponse) ProtoMessage() {} func (x *StartWorkflowResponse) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[9] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -680,7 +813,7 @@ func (x *StartWorkflowResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartWorkflowResponse.ProtoReflect.Descriptor instead. func (*StartWorkflowResponse) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{8} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{9} } func (x *StartWorkflowResponse) GetRunId() string { @@ -698,24 +831,21 @@ func (x *StartWorkflowResponse) GetRealStartTime() *timestamppb.Timestamp { } type CancelWorkflowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Identity string `protobuf:"bytes,4,opt,name=identity,proto3" json:"identity,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Identity string `protobuf:"bytes,4,opt,name=identity,proto3" json:"identity,omitempty"` // Note: run id in execution is first execution run id - Execution *v12.WorkflowExecution `protobuf:"bytes,5,opt,name=execution,proto3" json:"execution,omitempty"` - Reason string `protobuf:"bytes,6,opt,name=reason,proto3" json:"reason,omitempty"` + Execution *v12.WorkflowExecution `protobuf:"bytes,5,opt,name=execution,proto3" json:"execution,omitempty"` + Reason string `protobuf:"bytes,6,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CancelWorkflowRequest) Reset() { *x = CancelWorkflowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *CancelWorkflowRequest) String() string { @@ -725,8 +855,8 @@ func (x *CancelWorkflowRequest) String() string { func (*CancelWorkflowRequest) ProtoMessage() {} func (x *CancelWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[10] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -738,7 +868,7 @@ func (x *CancelWorkflowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelWorkflowRequest.ProtoReflect.Descriptor instead. func (*CancelWorkflowRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{9} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{10} } func (x *CancelWorkflowRequest) GetRequestId() string { @@ -770,24 +900,21 @@ func (x *CancelWorkflowRequest) GetReason() string { } type TerminateWorkflowRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Identity string `protobuf:"bytes,4,opt,name=identity,proto3" json:"identity,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Identity string `protobuf:"bytes,4,opt,name=identity,proto3" json:"identity,omitempty"` // Note: run id in execution is first execution run id - Execution *v12.WorkflowExecution `protobuf:"bytes,5,opt,name=execution,proto3" json:"execution,omitempty"` - Reason string `protobuf:"bytes,6,opt,name=reason,proto3" json:"reason,omitempty"` + Execution *v12.WorkflowExecution `protobuf:"bytes,5,opt,name=execution,proto3" json:"execution,omitempty"` + Reason string `protobuf:"bytes,6,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TerminateWorkflowRequest) Reset() { *x = TerminateWorkflowRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TerminateWorkflowRequest) String() string { @@ -797,8 +924,8 @@ func (x *TerminateWorkflowRequest) String() string { func (*TerminateWorkflowRequest) ProtoMessage() {} func (x *TerminateWorkflowRequest) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[11] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -810,7 +937,7 @@ func (x *TerminateWorkflowRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use TerminateWorkflowRequest.ProtoReflect.Descriptor instead. func (*TerminateWorkflowRequest) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{10} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{11} } func (x *TerminateWorkflowRequest) GetRequestId() string { @@ -842,10 +969,7 @@ func (x *TerminateWorkflowRequest) GetReason() string { } type NextTimeCache struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // workflow logic version (invalidate when changed) Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // start time that the results were calculated from @@ -854,18 +978,18 @@ type NextTimeCache struct { // offset from start_time. next_times has one value for each time in the cache. // nominal_times may have up to the same number of values, but it may also be shorter (or // empty), if the corresponding nominal time is equal to the next time. - NextTimes []int64 `protobuf:"varint,3,rep,packed,name=next_times,json=nextTimes,proto3" json:"next_times,omitempty"` - NominalTimes []int64 `protobuf:"varint,4,rep,packed,name=nominal_times,json=nominalTimes,proto3" json:"nominal_times,omitempty"` - Completed bool `protobuf:"varint,5,opt,name=completed,proto3" json:"completed,omitempty"` + NextTimes []int64 `protobuf:"varint,3,rep,packed,name=next_times,json=nextTimes,proto3" json:"next_times,omitempty"` + NominalTimes []int64 `protobuf:"varint,4,rep,packed,name=nominal_times,json=nominalTimes,proto3" json:"nominal_times,omitempty"` + Completed bool `protobuf:"varint,5,opt,name=completed,proto3" json:"completed,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NextTimeCache) Reset() { *x = NextTimeCache{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NextTimeCache) String() string { @@ -875,8 +999,8 @@ func (x *NextTimeCache) String() string { func (*NextTimeCache) ProtoMessage() {} func (x *NextTimeCache) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_schedule_v1_message_proto_msgTypes[12] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -888,7 +1012,7 @@ func (x *NextTimeCache) ProtoReflect() protoreflect.Message { // Deprecated: Use NextTimeCache.ProtoReflect.Descriptor instead. func (*NextTimeCache) Descriptor() ([]byte, []int) { - return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{11} + return file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP(), []int{12} } func (x *NextTimeCache) GetVersion() int64 { @@ -928,276 +1052,175 @@ func (x *NextTimeCache) GetCompleted() bool { var File_temporal_server_api_schedule_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_schedule_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x24, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, - 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, - 0x25, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x61, 0x69, - 0x6c, 0x75, 0x72, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, - 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x77, 0x6f, - 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x26, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x36, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, - 0x31, 0x2f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x88, 0x02, 0x0a, 0x0d, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x41, 0x0a, 0x0c, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, - 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x54, 0x69, - 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x61, 0x70, 0x5f, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x61, 0x70, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x61, 0x70, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1a, 0x0a, 0x06, 0x6d, 0x61, 0x6e, 0x75, 0x61, - 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6d, 0x61, 0x6e, 0x75, 0x61, 0x6c, 0x42, 0x02, - 0x68, 0x00, 0x22, 0x87, 0x05, 0x0a, 0x0d, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4e, 0x0a, 0x13, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5b, 0x0a, 0x0f, 0x62, 0x75, 0x66, 0x66, 0x65, 0x72, - 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x42, - 0x75, 0x66, 0x66, 0x65, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x0e, 0x62, 0x75, 0x66, - 0x66, 0x65, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x72, 0x74, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5a, - 0x0a, 0x11, 0x6f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, - 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x52, 0x10, 0x6f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x42, 0x61, 0x63, 0x6b, 0x66, 0x69, 0x6c, - 0x6c, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x5a, 0x0a, 0x16, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x73, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x51, 0x0a, 0x11, - 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x64, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x64, - 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x63, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x66, 0x72, - 0x65, 0x73, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x52, 0x65, - 0x66, 0x72, 0x65, 0x73, 0x68, 0x42, 0x02, 0x68, 0x00, 0x22, 0xb3, 0x02, 0x0a, 0x11, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x42, 0x0a, - 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x52, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, - 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x49, 0x6e, 0x66, 0x6f, - 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x50, 0x0a, 0x0d, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x27, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x50, 0x61, 0x74, 0x63, 0x68, 0x52, 0x0c, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, - 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0x82, 0x01, - 0x0a, 0x11, 0x46, 0x75, 0x6c, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x5f, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, - 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x42, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, - 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, - 0x6c, 0x65, 0x52, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, - 0x3e, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x74, - 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x02, 0x68, 0x00, 0x12, 0x29, 0x0a, 0x0e, 0x63, - 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x42, 0x02, 0x68, 0x00, 0x22, 0xbd, 0x01, 0x0a, 0x14, 0x57, 0x61, 0x74, 0x63, 0x68, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x37, 0x0a, 0x16, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x66, 0x69, 0x72, 0x73, 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1f, 0x0a, 0x09, 0x6c, 0x6f, - 0x6e, 0x67, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6c, 0x6f, - 0x6e, 0x67, 0x50, 0x6f, 0x6c, 0x6c, 0x42, 0x02, 0x68, 0x00, 0x22, 0xf7, 0x01, 0x0a, 0x15, 0x57, 0x61, - 0x74, 0x63, 0x68, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2e, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3e, 0x0a, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x48, 0x00, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x40, 0x0a, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, - 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x48, 0x00, 0x52, 0x07, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x42, 0x10, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x66, - 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, - 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5c, 0x0a, - 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x77, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x3f, 0x0a, 0x1a, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x6c, - 0x65, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x64, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x53, 0x6c, 0x65, 0x65, 0x70, - 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, - 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x7a, 0x0a, 0x15, 0x53, 0x74, 0x61, 0x72, 0x74, 0x57, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x19, 0x0a, 0x06, - 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x46, 0x0a, 0x0f, 0x72, 0x65, 0x61, 0x6c, 0x5f, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x72, 0x65, 0x61, 0x6c, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc3, 0x01, 0x0a, 0x15, 0x43, 0x61, - 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x1e, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, - 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1a, 0x0a, 0x06, 0x72, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xc6, 0x01, 0x0a, 0x18, 0x54, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, - 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x21, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, - 0x00, 0x12, 0x1e, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1a, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, - 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x22, 0xda, 0x01, 0x0a, 0x0d, 0x4e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x43, - 0x61, 0x63, 0x68, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x3d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x6e, - 0x65, 0x78, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, - 0x09, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x42, 0x02, 0x68, 0x00, 0x12, 0x27, 0x0a, - 0x0d, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x03, 0x52, 0x0c, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_temporal_server_api_schedule_v1_message_proto_rawDesc = "" + + "\n" + + "-temporal/server/api/schedule/v1/message.proto\x12\x1ftemporal.server.api.schedule.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a$temporal/api/enums/v1/schedule.proto\x1a$temporal/api/enums/v1/workflow.proto\x1a%temporal/api/failure/v1/message.proto\x1a&temporal/api/schedule/v1/message.proto\x1a6temporal/api/workflowservice/v1/request_response.proto\"\x95\x05\n" + + "\rBufferedStart\x12=\n" + + "\fnominal_time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\vnominalTime\x12;\n" + + "\vactual_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "actualTime\x12=\n" + + "\fdesired_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\vdesiredTime\x12S\n" + + "\x0eoverlap_policy\x18\x03 \x01(\x0e2,.temporal.api.enums.v1.ScheduleOverlapPolicyR\roverlapPolicy\x12\x16\n" + + "\x06manual\x18\x04 \x01(\bR\x06manual\x12\x1d\n" + + "\n" + + "request_id\x18\x06 \x01(\tR\trequestId\x12\x18\n" + + "\aattempt\x18\a \x01(\x03R\aattempt\x12=\n" + + "\fbackoff_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\vbackoffTime\x12\x1f\n" + + "\vworkflow_id\x18\t \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\n" + + " \x01(\tR\x05runId\x129\n" + + "\n" + + "start_time\x18\v \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x12N\n" + + "\tcompleted\x18\f \x01(\v20.temporal.server.api.schedule.v1.CompletedResultR\tcompleted\x12!\n" + + "\fhas_callback\x18\r \x01(\bR\vhasCallback\"\x94\x01\n" + + "\x0fCompletedResult\x12F\n" + + "\x06status\x18\x01 \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x06status\x129\n" + + "\n" + + "close_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTime\"\x8c\x05\n" + + "\rInternalState\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vschedule_id\x18\b \x01(\tR\n" + + "scheduleId\x12J\n" + + "\x13last_processed_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x11lastProcessedTime\x12W\n" + + "\x0fbuffered_starts\x18\x04 \x03(\v2..temporal.server.api.schedule.v1.BufferedStartR\x0ebufferedStarts\x12V\n" + + "\x11ongoing_backfills\x18\n" + + " \x03(\v2).temporal.api.schedule.v1.BackfillRequestR\x10ongoingBackfills\x12V\n" + + "\x16last_completion_result\x18\x05 \x01(\v2 .temporal.api.common.v1.PayloadsR\x14lastCompletionResult\x12M\n" + + "\x11continued_failure\x18\x06 \x01(\v2 .temporal.api.failure.v1.FailureR\x10continuedFailure\x12%\n" + + "\x0econflict_token\x18\a \x01(\x03R\rconflictToken\x12!\n" + + "\fneed_refresh\x18\t \x01(\bR\vneedRefresh\x12+\n" + + "\x11pending_migration\x18\v \x01(\bR\x10pendingMigration\"\xa3\x02\n" + + "\x11StartScheduleArgs\x12>\n" + + "\bschedule\x18\x01 \x01(\v2\".temporal.api.schedule.v1.ScheduleR\bschedule\x12:\n" + + "\x04info\x18\x02 \x01(\v2&.temporal.api.schedule.v1.ScheduleInfoR\x04info\x12L\n" + + "\rinitial_patch\x18\x03 \x01(\v2'.temporal.api.schedule.v1.SchedulePatchR\finitialPatch\x12D\n" + + "\x05state\x18\x04 \x01(\v2..temporal.server.api.schedule.v1.InternalStateR\x05state\"\xd1\x01\n" + + "\x11FullUpdateRequest\x12>\n" + + "\bschedule\x18\x01 \x01(\v2\".temporal.api.schedule.v1.ScheduleR\bschedule\x12%\n" + + "\x0econflict_token\x18\x02 \x01(\x03R\rconflictToken\x12U\n" + + "\x11search_attributes\x18\x03 \x01(\v2(.temporal.api.common.v1.SearchAttributesR\x10searchAttributes\"\xb5\x01\n" + + "\x10DescribeResponse\x12>\n" + + "\bschedule\x18\x01 \x01(\v2\".temporal.api.schedule.v1.ScheduleR\bschedule\x12:\n" + + "\x04info\x18\x02 \x01(\v2&.temporal.api.schedule.v1.ScheduleInfoR\x04info\x12%\n" + + "\x0econflict_token\x18\x03 \x01(\x03R\rconflictToken\"\xb1\x01\n" + + "\x14WatchWorkflowRequest\x12G\n" + + "\texecution\x18\x03 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x123\n" + + "\x16first_execution_run_id\x18\x04 \x01(\tR\x13firstExecutionRunId\x12\x1b\n" + + "\tlong_poll\x18\x05 \x01(\bR\blongPoll\"\xa6\x02\n" + + "\x15WatchWorkflowResponse\x12F\n" + + "\x06status\x18\x01 \x01(\x0e2..temporal.api.enums.v1.WorkflowExecutionStatusR\x06status\x12:\n" + + "\x06result\x18\x02 \x01(\v2 .temporal.api.common.v1.PayloadsH\x00R\x06result\x12<\n" + + "\afailure\x18\x03 \x01(\v2 .temporal.api.failure.v1.FailureH\x00R\afailure\x129\n" + + "\n" + + "close_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\tcloseTimeB\x10\n" + + "\x0eresult_failure\"\xbf\x01\n" + + "\x14StartWorkflowRequest\x12X\n" + + "\arequest\x18\x02 \x01(\v2>.temporal.api.workflowservice.v1.StartWorkflowExecutionRequestR\arequest\x12;\n" + + "\x1acompleted_rate_limit_sleep\x18\x06 \x01(\bR\x17completedRateLimitSleepJ\x04\b\x03\x10\x04J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06\"r\n" + + "\x15StartWorkflowResponse\x12\x15\n" + + "\x06run_id\x18\x01 \x01(\tR\x05runId\x12B\n" + + "\x0freal_start_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\rrealStartTime\"\xb3\x01\n" + + "\x15CancelWorkflowRequest\x12\x1d\n" + + "\n" + + "request_id\x18\x03 \x01(\tR\trequestId\x12\x1a\n" + + "\bidentity\x18\x04 \x01(\tR\bidentity\x12G\n" + + "\texecution\x18\x05 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12\x16\n" + + "\x06reason\x18\x06 \x01(\tR\x06reason\"\xb6\x01\n" + + "\x18TerminateWorkflowRequest\x12\x1d\n" + + "\n" + + "request_id\x18\x03 \x01(\tR\trequestId\x12\x1a\n" + + "\bidentity\x18\x04 \x01(\tR\bidentity\x12G\n" + + "\texecution\x18\x05 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12\x16\n" + + "\x06reason\x18\x06 \x01(\tR\x06reason\"\xc6\x01\n" + + "\rNextTimeCache\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x129\n" + + "\n" + + "start_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\tstartTime\x12\x1d\n" + + "\n" + + "next_times\x18\x03 \x03(\x03R\tnextTimes\x12#\n" + + "\rnominal_times\x18\x04 \x03(\x03R\fnominalTimes\x12\x1c\n" + + "\tcompleted\x18\x05 \x01(\bR\tcompletedB0Z.go.temporal.io/server/api/schedule/v1;scheduleb\x06proto3" var ( file_temporal_server_api_schedule_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_schedule_v1_message_proto_rawDescData = file_temporal_server_api_schedule_v1_message_proto_rawDesc + file_temporal_server_api_schedule_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_schedule_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_schedule_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_schedule_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_schedule_v1_message_proto_rawDescData) + file_temporal_server_api_schedule_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_schedule_v1_message_proto_rawDesc), len(file_temporal_server_api_schedule_v1_message_proto_rawDesc))) }) return file_temporal_server_api_schedule_v1_message_proto_rawDescData } -var file_temporal_server_api_schedule_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 12) -var file_temporal_server_api_schedule_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_schedule_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_temporal_server_api_schedule_v1_message_proto_goTypes = []any{ (*BufferedStart)(nil), // 0: temporal.server.api.schedule.v1.BufferedStart - (*InternalState)(nil), // 1: temporal.server.api.schedule.v1.InternalState - (*StartScheduleArgs)(nil), // 2: temporal.server.api.schedule.v1.StartScheduleArgs - (*FullUpdateRequest)(nil), // 3: temporal.server.api.schedule.v1.FullUpdateRequest - (*DescribeResponse)(nil), // 4: temporal.server.api.schedule.v1.DescribeResponse - (*WatchWorkflowRequest)(nil), // 5: temporal.server.api.schedule.v1.WatchWorkflowRequest - (*WatchWorkflowResponse)(nil), // 6: temporal.server.api.schedule.v1.WatchWorkflowResponse - (*StartWorkflowRequest)(nil), // 7: temporal.server.api.schedule.v1.StartWorkflowRequest - (*StartWorkflowResponse)(nil), // 8: temporal.server.api.schedule.v1.StartWorkflowResponse - (*CancelWorkflowRequest)(nil), // 9: temporal.server.api.schedule.v1.CancelWorkflowRequest - (*TerminateWorkflowRequest)(nil), // 10: temporal.server.api.schedule.v1.TerminateWorkflowRequest - (*NextTimeCache)(nil), // 11: temporal.server.api.schedule.v1.NextTimeCache - (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp - (v1.ScheduleOverlapPolicy)(0), // 13: temporal.api.enums.v1.ScheduleOverlapPolicy - (*v11.BackfillRequest)(nil), // 14: temporal.api.schedule.v1.BackfillRequest - (*v12.Payloads)(nil), // 15: temporal.api.common.v1.Payloads - (*v13.Failure)(nil), // 16: temporal.api.failure.v1.Failure - (*v11.Schedule)(nil), // 17: temporal.api.schedule.v1.Schedule - (*v11.ScheduleInfo)(nil), // 18: temporal.api.schedule.v1.ScheduleInfo - (*v11.SchedulePatch)(nil), // 19: temporal.api.schedule.v1.SchedulePatch - (*v12.WorkflowExecution)(nil), // 20: temporal.api.common.v1.WorkflowExecution - (v1.WorkflowExecutionStatus)(0), // 21: temporal.api.enums.v1.WorkflowExecutionStatus - (*v14.StartWorkflowExecutionRequest)(nil), // 22: temporal.api.workflowservice.v1.StartWorkflowExecutionRequest + (*CompletedResult)(nil), // 1: temporal.server.api.schedule.v1.CompletedResult + (*InternalState)(nil), // 2: temporal.server.api.schedule.v1.InternalState + (*StartScheduleArgs)(nil), // 3: temporal.server.api.schedule.v1.StartScheduleArgs + (*FullUpdateRequest)(nil), // 4: temporal.server.api.schedule.v1.FullUpdateRequest + (*DescribeResponse)(nil), // 5: temporal.server.api.schedule.v1.DescribeResponse + (*WatchWorkflowRequest)(nil), // 6: temporal.server.api.schedule.v1.WatchWorkflowRequest + (*WatchWorkflowResponse)(nil), // 7: temporal.server.api.schedule.v1.WatchWorkflowResponse + (*StartWorkflowRequest)(nil), // 8: temporal.server.api.schedule.v1.StartWorkflowRequest + (*StartWorkflowResponse)(nil), // 9: temporal.server.api.schedule.v1.StartWorkflowResponse + (*CancelWorkflowRequest)(nil), // 10: temporal.server.api.schedule.v1.CancelWorkflowRequest + (*TerminateWorkflowRequest)(nil), // 11: temporal.server.api.schedule.v1.TerminateWorkflowRequest + (*NextTimeCache)(nil), // 12: temporal.server.api.schedule.v1.NextTimeCache + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp + (v1.ScheduleOverlapPolicy)(0), // 14: temporal.api.enums.v1.ScheduleOverlapPolicy + (v1.WorkflowExecutionStatus)(0), // 15: temporal.api.enums.v1.WorkflowExecutionStatus + (*v11.BackfillRequest)(nil), // 16: temporal.api.schedule.v1.BackfillRequest + (*v12.Payloads)(nil), // 17: temporal.api.common.v1.Payloads + (*v13.Failure)(nil), // 18: temporal.api.failure.v1.Failure + (*v11.Schedule)(nil), // 19: temporal.api.schedule.v1.Schedule + (*v11.ScheduleInfo)(nil), // 20: temporal.api.schedule.v1.ScheduleInfo + (*v11.SchedulePatch)(nil), // 21: temporal.api.schedule.v1.SchedulePatch + (*v12.SearchAttributes)(nil), // 22: temporal.api.common.v1.SearchAttributes + (*v12.WorkflowExecution)(nil), // 23: temporal.api.common.v1.WorkflowExecution + (*v14.StartWorkflowExecutionRequest)(nil), // 24: temporal.api.workflowservice.v1.StartWorkflowExecutionRequest } var file_temporal_server_api_schedule_v1_message_proto_depIdxs = []int32{ - 12, // 0: temporal.server.api.schedule.v1.BufferedStart.nominal_time:type_name -> google.protobuf.Timestamp - 12, // 1: temporal.server.api.schedule.v1.BufferedStart.actual_time:type_name -> google.protobuf.Timestamp - 13, // 2: temporal.server.api.schedule.v1.BufferedStart.overlap_policy:type_name -> temporal.api.enums.v1.ScheduleOverlapPolicy - 12, // 3: temporal.server.api.schedule.v1.InternalState.last_processed_time:type_name -> google.protobuf.Timestamp - 0, // 4: temporal.server.api.schedule.v1.InternalState.buffered_starts:type_name -> temporal.server.api.schedule.v1.BufferedStart - 14, // 5: temporal.server.api.schedule.v1.InternalState.ongoing_backfills:type_name -> temporal.api.schedule.v1.BackfillRequest - 15, // 6: temporal.server.api.schedule.v1.InternalState.last_completion_result:type_name -> temporal.api.common.v1.Payloads - 16, // 7: temporal.server.api.schedule.v1.InternalState.continued_failure:type_name -> temporal.api.failure.v1.Failure - 17, // 8: temporal.server.api.schedule.v1.StartScheduleArgs.schedule:type_name -> temporal.api.schedule.v1.Schedule - 18, // 9: temporal.server.api.schedule.v1.StartScheduleArgs.info:type_name -> temporal.api.schedule.v1.ScheduleInfo - 19, // 10: temporal.server.api.schedule.v1.StartScheduleArgs.initial_patch:type_name -> temporal.api.schedule.v1.SchedulePatch - 1, // 11: temporal.server.api.schedule.v1.StartScheduleArgs.state:type_name -> temporal.server.api.schedule.v1.InternalState - 17, // 12: temporal.server.api.schedule.v1.FullUpdateRequest.schedule:type_name -> temporal.api.schedule.v1.Schedule - 17, // 13: temporal.server.api.schedule.v1.DescribeResponse.schedule:type_name -> temporal.api.schedule.v1.Schedule - 18, // 14: temporal.server.api.schedule.v1.DescribeResponse.info:type_name -> temporal.api.schedule.v1.ScheduleInfo - 20, // 15: temporal.server.api.schedule.v1.WatchWorkflowRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 21, // 16: temporal.server.api.schedule.v1.WatchWorkflowResponse.status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus - 15, // 17: temporal.server.api.schedule.v1.WatchWorkflowResponse.result:type_name -> temporal.api.common.v1.Payloads - 16, // 18: temporal.server.api.schedule.v1.WatchWorkflowResponse.failure:type_name -> temporal.api.failure.v1.Failure - 22, // 19: temporal.server.api.schedule.v1.StartWorkflowRequest.request:type_name -> temporal.api.workflowservice.v1.StartWorkflowExecutionRequest - 12, // 20: temporal.server.api.schedule.v1.StartWorkflowResponse.real_start_time:type_name -> google.protobuf.Timestamp - 20, // 21: temporal.server.api.schedule.v1.CancelWorkflowRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 20, // 22: temporal.server.api.schedule.v1.TerminateWorkflowRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 12, // 23: temporal.server.api.schedule.v1.NextTimeCache.start_time:type_name -> google.protobuf.Timestamp - 24, // [24:24] is the sub-list for method output_type - 24, // [24:24] is the sub-list for method input_type - 24, // [24:24] is the sub-list for extension type_name - 24, // [24:24] is the sub-list for extension extendee - 0, // [0:24] is the sub-list for field type_name + 13, // 0: temporal.server.api.schedule.v1.BufferedStart.nominal_time:type_name -> google.protobuf.Timestamp + 13, // 1: temporal.server.api.schedule.v1.BufferedStart.actual_time:type_name -> google.protobuf.Timestamp + 13, // 2: temporal.server.api.schedule.v1.BufferedStart.desired_time:type_name -> google.protobuf.Timestamp + 14, // 3: temporal.server.api.schedule.v1.BufferedStart.overlap_policy:type_name -> temporal.api.enums.v1.ScheduleOverlapPolicy + 13, // 4: temporal.server.api.schedule.v1.BufferedStart.backoff_time:type_name -> google.protobuf.Timestamp + 13, // 5: temporal.server.api.schedule.v1.BufferedStart.start_time:type_name -> google.protobuf.Timestamp + 1, // 6: temporal.server.api.schedule.v1.BufferedStart.completed:type_name -> temporal.server.api.schedule.v1.CompletedResult + 15, // 7: temporal.server.api.schedule.v1.CompletedResult.status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus + 13, // 8: temporal.server.api.schedule.v1.CompletedResult.close_time:type_name -> google.protobuf.Timestamp + 13, // 9: temporal.server.api.schedule.v1.InternalState.last_processed_time:type_name -> google.protobuf.Timestamp + 0, // 10: temporal.server.api.schedule.v1.InternalState.buffered_starts:type_name -> temporal.server.api.schedule.v1.BufferedStart + 16, // 11: temporal.server.api.schedule.v1.InternalState.ongoing_backfills:type_name -> temporal.api.schedule.v1.BackfillRequest + 17, // 12: temporal.server.api.schedule.v1.InternalState.last_completion_result:type_name -> temporal.api.common.v1.Payloads + 18, // 13: temporal.server.api.schedule.v1.InternalState.continued_failure:type_name -> temporal.api.failure.v1.Failure + 19, // 14: temporal.server.api.schedule.v1.StartScheduleArgs.schedule:type_name -> temporal.api.schedule.v1.Schedule + 20, // 15: temporal.server.api.schedule.v1.StartScheduleArgs.info:type_name -> temporal.api.schedule.v1.ScheduleInfo + 21, // 16: temporal.server.api.schedule.v1.StartScheduleArgs.initial_patch:type_name -> temporal.api.schedule.v1.SchedulePatch + 2, // 17: temporal.server.api.schedule.v1.StartScheduleArgs.state:type_name -> temporal.server.api.schedule.v1.InternalState + 19, // 18: temporal.server.api.schedule.v1.FullUpdateRequest.schedule:type_name -> temporal.api.schedule.v1.Schedule + 22, // 19: temporal.server.api.schedule.v1.FullUpdateRequest.search_attributes:type_name -> temporal.api.common.v1.SearchAttributes + 19, // 20: temporal.server.api.schedule.v1.DescribeResponse.schedule:type_name -> temporal.api.schedule.v1.Schedule + 20, // 21: temporal.server.api.schedule.v1.DescribeResponse.info:type_name -> temporal.api.schedule.v1.ScheduleInfo + 23, // 22: temporal.server.api.schedule.v1.WatchWorkflowRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 15, // 23: temporal.server.api.schedule.v1.WatchWorkflowResponse.status:type_name -> temporal.api.enums.v1.WorkflowExecutionStatus + 17, // 24: temporal.server.api.schedule.v1.WatchWorkflowResponse.result:type_name -> temporal.api.common.v1.Payloads + 18, // 25: temporal.server.api.schedule.v1.WatchWorkflowResponse.failure:type_name -> temporal.api.failure.v1.Failure + 13, // 26: temporal.server.api.schedule.v1.WatchWorkflowResponse.close_time:type_name -> google.protobuf.Timestamp + 24, // 27: temporal.server.api.schedule.v1.StartWorkflowRequest.request:type_name -> temporal.api.workflowservice.v1.StartWorkflowExecutionRequest + 13, // 28: temporal.server.api.schedule.v1.StartWorkflowResponse.real_start_time:type_name -> google.protobuf.Timestamp + 23, // 29: temporal.server.api.schedule.v1.CancelWorkflowRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 23, // 30: temporal.server.api.schedule.v1.TerminateWorkflowRequest.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 13, // 31: temporal.server.api.schedule.v1.NextTimeCache.start_time:type_name -> google.protobuf.Timestamp + 32, // [32:32] is the sub-list for method output_type + 32, // [32:32] is the sub-list for method input_type + 32, // [32:32] is the sub-list for extension type_name + 32, // [32:32] is the sub-list for extension extendee + 0, // [0:32] is the sub-list for field type_name } func init() { file_temporal_server_api_schedule_v1_message_proto_init() } @@ -1205,153 +1228,7 @@ func file_temporal_server_api_schedule_v1_message_proto_init() { if File_temporal_server_api_schedule_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_schedule_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BufferedStart); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InternalState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartScheduleArgs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FullUpdateRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DescribeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WatchWorkflowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WatchWorkflowResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartWorkflowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartWorkflowResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelWorkflowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TerminateWorkflowRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NextTimeCache); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_temporal_server_api_schedule_v1_message_proto_msgTypes[6].OneofWrappers = []interface{}{ + file_temporal_server_api_schedule_v1_message_proto_msgTypes[7].OneofWrappers = []any{ (*WatchWorkflowResponse_Result)(nil), (*WatchWorkflowResponse_Failure)(nil), } @@ -1359,9 +1236,9 @@ func file_temporal_server_api_schedule_v1_message_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_schedule_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_schedule_v1_message_proto_rawDesc), len(file_temporal_server_api_schedule_v1_message_proto_rawDesc)), NumEnums: 0, - NumMessages: 12, + NumMessages: 13, NumExtensions: 0, NumServices: 0, }, @@ -1370,7 +1247,6 @@ func file_temporal_server_api_schedule_v1_message_proto_init() { MessageInfos: file_temporal_server_api_schedule_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_schedule_v1_message_proto = out.File - file_temporal_server_api_schedule_v1_message_proto_rawDesc = nil file_temporal_server_api_schedule_v1_message_proto_goTypes = nil file_temporal_server_api_schedule_v1_message_proto_depIdxs = nil } diff --git a/api/taskqueue/v1/message.go-helpers.pb.go b/api/taskqueue/v1/message.go-helpers.pb.go index 35d71d7cc4a..191ec828f23 100644 --- a/api/taskqueue/v1/message.go-helpers.pb.go +++ b/api/taskqueue/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package taskqueue @@ -65,3 +41,410 @@ func (this *TaskVersionDirective) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type FairLevel to the protobuf v3 wire format +func (val *FairLevel) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type FairLevel from the protobuf v3 wire format +func (val *FairLevel) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *FairLevel) Size() int { + return proto.Size(val) +} + +// Equal returns whether two FairLevel values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *FairLevel) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *FairLevel + switch t := that.(type) { + case *FairLevel: + that1 = t + case FairLevel: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InternalTaskQueueStatus to the protobuf v3 wire format +func (val *InternalTaskQueueStatus) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InternalTaskQueueStatus from the protobuf v3 wire format +func (val *InternalTaskQueueStatus) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InternalTaskQueueStatus) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InternalTaskQueueStatus values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InternalTaskQueueStatus) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InternalTaskQueueStatus + switch t := that.(type) { + case *InternalTaskQueueStatus: + that1 = t + case InternalTaskQueueStatus: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TaskQueueVersionInfoInternal to the protobuf v3 wire format +func (val *TaskQueueVersionInfoInternal) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TaskQueueVersionInfoInternal from the protobuf v3 wire format +func (val *TaskQueueVersionInfoInternal) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TaskQueueVersionInfoInternal) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TaskQueueVersionInfoInternal values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TaskQueueVersionInfoInternal) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TaskQueueVersionInfoInternal + switch t := that.(type) { + case *TaskQueueVersionInfoInternal: + that1 = t + case TaskQueueVersionInfoInternal: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PhysicalTaskQueueInfo to the protobuf v3 wire format +func (val *PhysicalTaskQueueInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PhysicalTaskQueueInfo from the protobuf v3 wire format +func (val *PhysicalTaskQueueInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PhysicalTaskQueueInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PhysicalTaskQueueInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PhysicalTaskQueueInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PhysicalTaskQueueInfo + switch t := that.(type) { + case *PhysicalTaskQueueInfo: + that1 = t + case PhysicalTaskQueueInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TaskQueuePartition to the protobuf v3 wire format +func (val *TaskQueuePartition) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TaskQueuePartition from the protobuf v3 wire format +func (val *TaskQueuePartition) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TaskQueuePartition) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TaskQueuePartition values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TaskQueuePartition) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TaskQueuePartition + switch t := that.(type) { + case *TaskQueuePartition: + that1 = t + case TaskQueuePartition: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkerCommandsPartitionId to the protobuf v3 wire format +func (val *WorkerCommandsPartitionId) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkerCommandsPartitionId from the protobuf v3 wire format +func (val *WorkerCommandsPartitionId) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkerCommandsPartitionId) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkerCommandsPartitionId values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkerCommandsPartitionId) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkerCommandsPartitionId + switch t := that.(type) { + case *WorkerCommandsPartitionId: + that1 = t + case WorkerCommandsPartitionId: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type BuildIdRedirectInfo to the protobuf v3 wire format +func (val *BuildIdRedirectInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type BuildIdRedirectInfo from the protobuf v3 wire format +func (val *BuildIdRedirectInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *BuildIdRedirectInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two BuildIdRedirectInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *BuildIdRedirectInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *BuildIdRedirectInfo + switch t := that.(type) { + case *BuildIdRedirectInfo: + that1 = t + case BuildIdRedirectInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TaskForwardInfo to the protobuf v3 wire format +func (val *TaskForwardInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TaskForwardInfo from the protobuf v3 wire format +func (val *TaskForwardInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TaskForwardInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TaskForwardInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TaskForwardInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TaskForwardInfo + switch t := that.(type) { + case *TaskForwardInfo: + that1 = t + case TaskForwardInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type EphemeralData to the protobuf v3 wire format +func (val *EphemeralData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type EphemeralData from the protobuf v3 wire format +func (val *EphemeralData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *EphemeralData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two EphemeralData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *EphemeralData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *EphemeralData + switch t := that.(type) { + case *EphemeralData: + that1 = t + case EphemeralData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type VersionedEphemeralData to the protobuf v3 wire format +func (val *VersionedEphemeralData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type VersionedEphemeralData from the protobuf v3 wire format +func (val *VersionedEphemeralData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *VersionedEphemeralData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two VersionedEphemeralData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *VersionedEphemeralData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *VersionedEphemeralData + switch t := that.(type) { + case *VersionedEphemeralData: + that1 = t + case VersionedEphemeralData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ClientPartitionCounts to the protobuf v3 wire format +func (val *ClientPartitionCounts) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ClientPartitionCounts from the protobuf v3 wire format +func (val *ClientPartitionCounts) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ClientPartitionCounts) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ClientPartitionCounts values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ClientPartitionCounts) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ClientPartitionCounts + switch t := that.(type) { + case *ClientPartitionCounts: + that1 = t + case ClientPartitionCounts: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/taskqueue/v1/message.pb.go b/api/taskqueue/v1/message.pb.go index 20cb25ffbd0..8a44ca56f15 100644 --- a/api/taskqueue/v1/message.pb.go +++ b/api/taskqueue/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,10 +9,17 @@ package taskqueue import ( reflect "reflect" sync "sync" + unsafe "unsafe" + v11 "go.temporal.io/api/deployment/v1" + v1 "go.temporal.io/api/enums/v1" + v13 "go.temporal.io/api/taskqueue/v1" + v12 "go.temporal.io/server/api/deployment/v1" + v14 "go.temporal.io/server/api/enums/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -46,27 +31,47 @@ const ( // TaskVersionDirective controls how matching should direct a task. type TaskVersionDirective struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Default (if value is not present) is "unversioned": + state protoimpl.MessageState `protogen:"open.v1"` + // Default (if build_id is not present) is "unversioned": // Use the unversioned task queue, even if the task queue has versioning data. + // Absent value means the task is the non-starting task of an unversioned execution so it should remain unversioned. + // Deprecated. Use deployment_version. // - // Types that are assignable to Value: + // Types that are valid to be assigned to BuildId: // - // *TaskVersionDirective_UseDefault - // *TaskVersionDirective_BuildId - Value isTaskVersionDirective_Value `protobuf_oneof:"value"` + // *TaskVersionDirective_UseAssignmentRules + // *TaskVersionDirective_AssignedBuildId + BuildId isTaskVersionDirective_BuildId `protobuf_oneof:"build_id"` + // Workflow's effective behavior when the task is scheduled. + Behavior v1.VersioningBehavior `protobuf:"varint,3,opt,name=behavior,proto3,enum=temporal.api.enums.v1.VersioningBehavior" json:"behavior,omitempty"` + // Workflow's effective deployment when the task is scheduled. + // Deprecated. Use deployment_version. + Deployment *v11.Deployment `protobuf:"bytes,4,opt,name=deployment,proto3" json:"deployment,omitempty"` + // Workflow's effective deployment version when the task is scheduled. + DeploymentVersion *v12.WorkerDeploymentVersion `protobuf:"bytes,5,opt,name=deployment_version,json=deploymentVersion,proto3" json:"deployment_version,omitempty"` + // Counter copied from the workflow execution's WorkflowExecutionVersioningInfo + // during enqueue time. + RevisionNumber int64 `protobuf:"varint,6,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // If behavior is AutoUpgrade and use_ramping_version is true, then this task should use the + // Ramping Version of its Task Queue regardless of workflow_id and ramp_percentage. + // If there is no Ramping Version at the time of task dispatch, the Current Version will be used instead. + // + // If use_ramping_version is false, the Target Version is chosen with the default formula: + // + // if calcRampThreshold(workflow_id) <= ramp_percentage: + // target=ramping_version + // else: + // target=current_version + UseRampingVersion bool `protobuf:"varint,7,opt,name=use_ramping_version,json=useRampingVersion,proto3" json:"use_ramping_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TaskVersionDirective) Reset() { *x = TaskVersionDirective{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TaskVersionDirective) String() string { @@ -77,7 +82,7 @@ func (*TaskVersionDirective) ProtoMessage() {} func (x *TaskVersionDirective) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -92,96 +97,1110 @@ func (*TaskVersionDirective) Descriptor() ([]byte, []int) { return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{0} } -func (m *TaskVersionDirective) GetValue() isTaskVersionDirective_Value { - if m != nil { - return m.Value +func (x *TaskVersionDirective) GetBuildId() isTaskVersionDirective_BuildId { + if x != nil { + return x.BuildId } return nil } -func (x *TaskVersionDirective) GetUseDefault() *emptypb.Empty { - if x, ok := x.GetValue().(*TaskVersionDirective_UseDefault); ok { - return x.UseDefault +func (x *TaskVersionDirective) GetUseAssignmentRules() *emptypb.Empty { + if x != nil { + if x, ok := x.BuildId.(*TaskVersionDirective_UseAssignmentRules); ok { + return x.UseAssignmentRules + } } return nil } -func (x *TaskVersionDirective) GetBuildId() string { - if x, ok := x.GetValue().(*TaskVersionDirective_BuildId); ok { - return x.BuildId +func (x *TaskVersionDirective) GetAssignedBuildId() string { + if x != nil { + if x, ok := x.BuildId.(*TaskVersionDirective_AssignedBuildId); ok { + return x.AssignedBuildId + } } return "" } -type isTaskVersionDirective_Value interface { - isTaskVersionDirective_Value() +func (x *TaskVersionDirective) GetBehavior() v1.VersioningBehavior { + if x != nil { + return x.Behavior + } + return v1.VersioningBehavior(0) } -type TaskVersionDirective_UseDefault struct { - // If use_default is present, the task should be assigned the default - // version for the task queue. This will typically be set for the first - // workflow task in a workflow. - UseDefault *emptypb.Empty `protobuf:"bytes,1,opt,name=use_default,json=useDefault,proto3,oneof"` +func (x *TaskVersionDirective) GetDeployment() *v11.Deployment { + if x != nil { + return x.Deployment + } + return nil +} + +func (x *TaskVersionDirective) GetDeploymentVersion() *v12.WorkerDeploymentVersion { + if x != nil { + return x.DeploymentVersion + } + return nil +} + +func (x *TaskVersionDirective) GetRevisionNumber() int64 { + if x != nil { + return x.RevisionNumber + } + return 0 } -type TaskVersionDirective_BuildId struct { - // If build_id is present, use the default version in the compatible set - // containing this build id. - BuildId string `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3,oneof"` +func (x *TaskVersionDirective) GetUseRampingVersion() bool { + if x != nil { + return x.UseRampingVersion + } + return false } -func (*TaskVersionDirective_UseDefault) isTaskVersionDirective_Value() {} +type isTaskVersionDirective_BuildId interface { + isTaskVersionDirective_BuildId() +} -func (*TaskVersionDirective_BuildId) isTaskVersionDirective_Value() {} +type TaskVersionDirective_UseAssignmentRules struct { + // If use_assignment_rules is present, matching should use the assignment rules + // to determine the build ID. + // WV1: the task should be assigned the default version for the task queue. [cleanup-old-wv] + UseAssignmentRules *emptypb.Empty `protobuf:"bytes,1,opt,name=use_assignment_rules,json=useAssignmentRules,proto3,oneof"` +} -var File_temporal_server_api_taskqueue_v1_message_proto protoreflect.FileDescriptor +type TaskVersionDirective_AssignedBuildId struct { + // This means the task is already assigned to `build_id` + // WV1: If assigned_build_id is present, use the default version in the compatible set + // containing this build ID. [cleanup-old-wv] + AssignedBuildId string `protobuf:"bytes,2,opt,name=assigned_build_id,json=assignedBuildId,proto3,oneof"` +} + +func (*TaskVersionDirective_UseAssignmentRules) isTaskVersionDirective_BuildId() {} + +func (*TaskVersionDirective_AssignedBuildId) isTaskVersionDirective_BuildId() {} + +type FairLevel struct { + state protoimpl.MessageState `protogen:"open.v1"` + TaskPass int64 `protobuf:"varint,1,opt,name=task_pass,json=taskPass,proto3" json:"task_pass,omitempty"` + TaskId int64 `protobuf:"varint,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *FairLevel) Reset() { + *x = FairLevel{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *FairLevel) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FairLevel) ProtoMessage() {} + +func (x *FairLevel) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FairLevel.ProtoReflect.Descriptor instead. +func (*FairLevel) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{1} +} + +func (x *FairLevel) GetTaskPass() int64 { + if x != nil { + return x.TaskPass + } + return 0 +} + +func (x *FairLevel) GetTaskId() int64 { + if x != nil { + return x.TaskId + } + return 0 +} + +type InternalTaskQueueStatus struct { + state protoimpl.MessageState `protogen:"open.v1"` + ReadLevel int64 `protobuf:"varint,1,opt,name=read_level,json=readLevel,proto3" json:"read_level,omitempty"` + FairReadLevel *FairLevel `protobuf:"bytes,7,opt,name=fair_read_level,json=fairReadLevel,proto3" json:"fair_read_level,omitempty"` + AckLevel int64 `protobuf:"varint,2,opt,name=ack_level,json=ackLevel,proto3" json:"ack_level,omitempty"` + FairAckLevel *FairLevel `protobuf:"bytes,8,opt,name=fair_ack_level,json=fairAckLevel,proto3" json:"fair_ack_level,omitempty"` + TaskIdBlock *v13.TaskIdBlock `protobuf:"bytes,3,opt,name=task_id_block,json=taskIdBlock,proto3" json:"task_id_block,omitempty"` + LoadedTasks int64 `protobuf:"varint,4,opt,name=loaded_tasks,json=loadedTasks,proto3" json:"loaded_tasks,omitempty"` + ApproximateBacklogCount int64 `protobuf:"varint,5,opt,name=approximate_backlog_count,json=approximateBacklogCount,proto3" json:"approximate_backlog_count,omitempty"` + MaxReadLevel int64 `protobuf:"varint,6,opt,name=max_read_level,json=maxReadLevel,proto3" json:"max_read_level,omitempty"` + FairMaxReadLevel *FairLevel `protobuf:"bytes,9,opt,name=fair_max_read_level,json=fairMaxReadLevel,proto3" json:"fair_max_read_level,omitempty"` + // Draining means that this status is from a queue that is being drained to + // migrate from v1 to v2 tasks persistence (or backwards). + Draining bool `protobuf:"varint,10,opt,name=draining,proto3" json:"draining,omitempty"` + // BacklogDrained means this queue has an empty backlog at the time this status + // was generated. This is inherently racy — new tasks may arrive after this + // check. Consumers must use version-based validation (see scaleManager) to + // ensure correctness. + BacklogDrained bool `protobuf:"varint,11,opt,name=backlog_drained,json=backlogDrained,proto3" json:"backlog_drained,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InternalTaskQueueStatus) Reset() { + *x = InternalTaskQueueStatus{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InternalTaskQueueStatus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InternalTaskQueueStatus) ProtoMessage() {} + +func (x *InternalTaskQueueStatus) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InternalTaskQueueStatus.ProtoReflect.Descriptor instead. +func (*InternalTaskQueueStatus) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{2} +} + +func (x *InternalTaskQueueStatus) GetReadLevel() int64 { + if x != nil { + return x.ReadLevel + } + return 0 +} + +func (x *InternalTaskQueueStatus) GetFairReadLevel() *FairLevel { + if x != nil { + return x.FairReadLevel + } + return nil +} + +func (x *InternalTaskQueueStatus) GetAckLevel() int64 { + if x != nil { + return x.AckLevel + } + return 0 +} + +func (x *InternalTaskQueueStatus) GetFairAckLevel() *FairLevel { + if x != nil { + return x.FairAckLevel + } + return nil +} + +func (x *InternalTaskQueueStatus) GetTaskIdBlock() *v13.TaskIdBlock { + if x != nil { + return x.TaskIdBlock + } + return nil +} + +func (x *InternalTaskQueueStatus) GetLoadedTasks() int64 { + if x != nil { + return x.LoadedTasks + } + return 0 +} + +func (x *InternalTaskQueueStatus) GetApproximateBacklogCount() int64 { + if x != nil { + return x.ApproximateBacklogCount + } + return 0 +} + +func (x *InternalTaskQueueStatus) GetMaxReadLevel() int64 { + if x != nil { + return x.MaxReadLevel + } + return 0 +} + +func (x *InternalTaskQueueStatus) GetFairMaxReadLevel() *FairLevel { + if x != nil { + return x.FairMaxReadLevel + } + return nil +} + +func (x *InternalTaskQueueStatus) GetDraining() bool { + if x != nil { + return x.Draining + } + return false +} + +func (x *InternalTaskQueueStatus) GetBacklogDrained() bool { + if x != nil { + return x.BacklogDrained + } + return false +} + +type TaskQueueVersionInfoInternal struct { + state protoimpl.MessageState `protogen:"open.v1"` + PhysicalTaskQueueInfo *PhysicalTaskQueueInfo `protobuf:"bytes,2,opt,name=physical_task_queue_info,json=physicalTaskQueueInfo,proto3" json:"physical_task_queue_info,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} -var file_temporal_server_api_taskqueue_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2f, 0x76, - 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2e, 0x76, 0x31, 0x1a, - 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7f, 0x0a, 0x14, 0x54, - 0x61, 0x73, 0x6b, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, - 0x69, 0x76, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, - 0x00, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x1f, 0x0a, 0x08, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x6f, 0x2e, - 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x2f, - 0x76, 0x31, 0x3b, 0x74, 0x61, 0x73, 0x6b, 0x71, 0x75, 0x65, 0x75, 0x65, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, +func (x *TaskQueueVersionInfoInternal) Reset() { + *x = TaskQueueVersionInfoInternal{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } +func (x *TaskQueueVersionInfoInternal) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskQueueVersionInfoInternal) ProtoMessage() {} + +func (x *TaskQueueVersionInfoInternal) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskQueueVersionInfoInternal.ProtoReflect.Descriptor instead. +func (*TaskQueueVersionInfoInternal) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{3} +} + +func (x *TaskQueueVersionInfoInternal) GetPhysicalTaskQueueInfo() *PhysicalTaskQueueInfo { + if x != nil { + return x.PhysicalTaskQueueInfo + } + return nil +} + +type PhysicalTaskQueueInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Unversioned workers (with `useVersioning=false`) are reported in unversioned result even if they set a Build ID. + Pollers []*v13.PollerInfo `protobuf:"bytes,1,rep,name=pollers,proto3" json:"pollers,omitempty"` + InternalTaskQueueStatus []*InternalTaskQueueStatus `protobuf:"bytes,3,rep,name=internal_task_queue_status,json=internalTaskQueueStatus,proto3" json:"internal_task_queue_status,omitempty"` + TaskQueueStats *v13.TaskQueueStats `protobuf:"bytes,2,opt,name=task_queue_stats,json=taskQueueStats,proto3" json:"task_queue_stats,omitempty"` + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "by" is used to clarify the keys. --) + TaskQueueStatsByPriorityKey map[int32]*v13.TaskQueueStats `protobuf:"bytes,4,rep,name=task_queue_stats_by_priority_key,json=taskQueueStatsByPriorityKey,proto3" json:"task_queue_stats_by_priority_key,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PhysicalTaskQueueInfo) Reset() { + *x = PhysicalTaskQueueInfo{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PhysicalTaskQueueInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhysicalTaskQueueInfo) ProtoMessage() {} + +func (x *PhysicalTaskQueueInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhysicalTaskQueueInfo.ProtoReflect.Descriptor instead. +func (*PhysicalTaskQueueInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{4} +} + +func (x *PhysicalTaskQueueInfo) GetPollers() []*v13.PollerInfo { + if x != nil { + return x.Pollers + } + return nil +} + +func (x *PhysicalTaskQueueInfo) GetInternalTaskQueueStatus() []*InternalTaskQueueStatus { + if x != nil { + return x.InternalTaskQueueStatus + } + return nil +} + +func (x *PhysicalTaskQueueInfo) GetTaskQueueStats() *v13.TaskQueueStats { + if x != nil { + return x.TaskQueueStats + } + return nil +} + +func (x *PhysicalTaskQueueInfo) GetTaskQueueStatsByPriorityKey() map[int32]*v13.TaskQueueStats { + if x != nil { + return x.TaskQueueStatsByPriorityKey + } + return nil +} + +// Internal representation of a task queue partition, used for server-to-server RPCs. +// This is the internal equivalent of temporal.api.taskqueue.v1.TaskQueue. +type TaskQueuePartition struct { + state protoimpl.MessageState `protogen:"open.v1"` + // This is the user-facing name for this task queue + TaskQueue string `protobuf:"bytes,1,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskQueueType v1.TaskQueueType `protobuf:"varint,2,opt,name=task_queue_type,json=taskQueueType,proto3,enum=temporal.api.enums.v1.TaskQueueType" json:"task_queue_type,omitempty"` + // Absent means normal root partition (normal_partition_id=0) + // + // Types that are valid to be assigned to PartitionId: + // + // *TaskQueuePartition_NormalPartitionId + // *TaskQueuePartition_StickyName + // *TaskQueuePartition_WorkerCommands + PartitionId isTaskQueuePartition_PartitionId `protobuf_oneof:"partition_id"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskQueuePartition) Reset() { + *x = TaskQueuePartition{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskQueuePartition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskQueuePartition) ProtoMessage() {} + +func (x *TaskQueuePartition) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskQueuePartition.ProtoReflect.Descriptor instead. +func (*TaskQueuePartition) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{5} +} + +func (x *TaskQueuePartition) GetTaskQueue() string { + if x != nil { + return x.TaskQueue + } + return "" +} + +func (x *TaskQueuePartition) GetTaskQueueType() v1.TaskQueueType { + if x != nil { + return x.TaskQueueType + } + return v1.TaskQueueType(0) +} + +func (x *TaskQueuePartition) GetPartitionId() isTaskQueuePartition_PartitionId { + if x != nil { + return x.PartitionId + } + return nil +} + +func (x *TaskQueuePartition) GetNormalPartitionId() int32 { + if x != nil { + if x, ok := x.PartitionId.(*TaskQueuePartition_NormalPartitionId); ok { + return x.NormalPartitionId + } + } + return 0 +} + +func (x *TaskQueuePartition) GetStickyName() string { + if x != nil { + if x, ok := x.PartitionId.(*TaskQueuePartition_StickyName); ok { + return x.StickyName + } + } + return "" +} + +func (x *TaskQueuePartition) GetWorkerCommands() *WorkerCommandsPartitionId { + if x != nil { + if x, ok := x.PartitionId.(*TaskQueuePartition_WorkerCommands); ok { + return x.WorkerCommands + } + } + return nil +} + +type isTaskQueuePartition_PartitionId interface { + isTaskQueuePartition_PartitionId() +} + +type TaskQueuePartition_NormalPartitionId struct { + NormalPartitionId int32 `protobuf:"varint,3,opt,name=normal_partition_id,json=normalPartitionId,proto3,oneof"` +} + +type TaskQueuePartition_StickyName struct { + StickyName string `protobuf:"bytes,4,opt,name=sticky_name,json=stickyName,proto3,oneof"` +} + +type TaskQueuePartition_WorkerCommands struct { + WorkerCommands *WorkerCommandsPartitionId `protobuf:"bytes,5,opt,name=worker_commands,json=workerCommands,proto3,oneof"` +} + +func (*TaskQueuePartition_NormalPartitionId) isTaskQueuePartition_PartitionId() {} + +func (*TaskQueuePartition_StickyName) isTaskQueuePartition_PartitionId() {} + +func (*TaskQueuePartition_WorkerCommands) isTaskQueuePartition_PartitionId() {} + +type WorkerCommandsPartitionId struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkerCommandsPartitionId) Reset() { + *x = WorkerCommandsPartitionId{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkerCommandsPartitionId) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkerCommandsPartitionId) ProtoMessage() {} + +func (x *WorkerCommandsPartitionId) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkerCommandsPartitionId.ProtoReflect.Descriptor instead. +func (*WorkerCommandsPartitionId) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{6} +} + +// Information about redirect intention sent by Matching to History in Record*TaskStarted calls. +// Deprecated. +type BuildIdRedirectInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // build ID asked by History in the directive or the one calculated based on the assignment rules. + // this is the source of the redirect rule chain applied. (the target of the redirect rule chain is + // the poller's build ID reported in WorkerVersionCapabilities) + AssignedBuildId string `protobuf:"bytes,1,opt,name=assigned_build_id,json=assignedBuildId,proto3" json:"assigned_build_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BuildIdRedirectInfo) Reset() { + *x = BuildIdRedirectInfo{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BuildIdRedirectInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BuildIdRedirectInfo) ProtoMessage() {} + +func (x *BuildIdRedirectInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BuildIdRedirectInfo.ProtoReflect.Descriptor instead. +func (*BuildIdRedirectInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{7} +} + +func (x *BuildIdRedirectInfo) GetAssignedBuildId() string { + if x != nil { + return x.AssignedBuildId + } + return "" +} + +// Information about task forwarding from one partition to its parent. +type TaskForwardInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + // RPC name of the partition forwarded the task. + // In case of multiple hops, this is the source partition of the last hop. + SourcePartition string `protobuf:"bytes,1,opt,name=source_partition,json=sourcePartition,proto3" json:"source_partition,omitempty"` + TaskSource v14.TaskSource `protobuf:"varint,2,opt,name=task_source,json=taskSource,proto3,enum=temporal.server.api.enums.v1.TaskSource" json:"task_source,omitempty"` + // The partition where the task was initially forwarded from. + // Unlike source_partition which gets overwritten at each hop, origin_partition + // persists across all forwarding hops. + OriginPartition string `protobuf:"bytes,6,opt,name=origin_partition,json=originPartition,proto3" json:"origin_partition,omitempty"` + // For tasks that are forwarded, we should keep the original creation time that comes from the + // source partition. Used for dispatch latency metrics. + CreateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + // Redirect info is not present for Query and Nexus tasks. Versioning decisions for activity/workflow + // tasks are made at the source partition and sent to the parent partition in this message so that parent partition + // does not have to make versioning decision again. For Query/Nexus tasks, this works differently as the child's + // versioning decision is ignored and the parent partition makes a fresh decision. + // Deprecated. [cleanup-old-wv] + RedirectInfo *BuildIdRedirectInfo `protobuf:"bytes,3,opt,name=redirect_info,json=redirectInfo,proto3" json:"redirect_info,omitempty"` + // Build ID that should be used to dispatch the task to. Ignored in Query and Nexus tasks. + // Deprecated. [cleanup-old-wv] + DispatchBuildId string `protobuf:"bytes,4,opt,name=dispatch_build_id,json=dispatchBuildId,proto3" json:"dispatch_build_id,omitempty"` + // Only used for old versioning. [cleanup-old-wv] + // Deprecated. [cleanup-old-wv] + DispatchVersionSet string `protobuf:"bytes,5,opt,name=dispatch_version_set,json=dispatchVersionSet,proto3" json:"dispatch_version_set,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TaskForwardInfo) Reset() { + *x = TaskForwardInfo{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TaskForwardInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskForwardInfo) ProtoMessage() {} + +func (x *TaskForwardInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskForwardInfo.ProtoReflect.Descriptor instead. +func (*TaskForwardInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{8} +} + +func (x *TaskForwardInfo) GetSourcePartition() string { + if x != nil { + return x.SourcePartition + } + return "" +} + +func (x *TaskForwardInfo) GetTaskSource() v14.TaskSource { + if x != nil { + return x.TaskSource + } + return v14.TaskSource(0) +} + +func (x *TaskForwardInfo) GetOriginPartition() string { + if x != nil { + return x.OriginPartition + } + return "" +} + +func (x *TaskForwardInfo) GetCreateTime() *timestamppb.Timestamp { + if x != nil { + return x.CreateTime + } + return nil +} + +func (x *TaskForwardInfo) GetRedirectInfo() *BuildIdRedirectInfo { + if x != nil { + return x.RedirectInfo + } + return nil +} + +func (x *TaskForwardInfo) GetDispatchBuildId() string { + if x != nil { + return x.DispatchBuildId + } + return "" +} + +func (x *TaskForwardInfo) GetDispatchVersionSet() string { + if x != nil { + return x.DispatchVersionSet + } + return "" +} + +// EphemeralData is data that we want to propagate among task queue partitions, but is not persisted. +// Ephemeral data is propagated alongside "task queue user data", but while user data applies to a +// task queue family (all queues with the same name, across types), ephemeral data applies only to +// one type at a time. +type EphemeralData struct { + state protoimpl.MessageState `protogen:"open.v1"` + Partition []*EphemeralData_ByPartition `protobuf:"bytes,1,rep,name=partition,proto3" json:"partition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EphemeralData) Reset() { + *x = EphemeralData{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EphemeralData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EphemeralData) ProtoMessage() {} + +func (x *EphemeralData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EphemeralData.ProtoReflect.Descriptor instead. +func (*EphemeralData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{9} +} + +func (x *EphemeralData) GetPartition() []*EphemeralData_ByPartition { + if x != nil { + return x.Partition + } + return nil +} + +type VersionedEphemeralData struct { + state protoimpl.MessageState `protogen:"open.v1"` + Data *EphemeralData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *VersionedEphemeralData) Reset() { + *x = VersionedEphemeralData{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *VersionedEphemeralData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VersionedEphemeralData) ProtoMessage() {} + +func (x *VersionedEphemeralData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VersionedEphemeralData.ProtoReflect.Descriptor instead. +func (*VersionedEphemeralData) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{10} +} + +func (x *VersionedEphemeralData) GetData() *EphemeralData { + if x != nil { + return x.Data + } + return nil +} + +func (x *VersionedEphemeralData) GetVersion() int64 { + if x != nil { + return x.Version + } + return 0 +} + +// ClientPartitionCounts is propagated from the matching service to clients in grpc headers/trailers. +type ClientPartitionCounts struct { + state protoimpl.MessageState `protogen:"open.v1"` + Read int32 `protobuf:"varint,1,opt,name=read,proto3" json:"read,omitempty"` + Write int32 `protobuf:"varint,2,opt,name=write,proto3" json:"write,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ClientPartitionCounts) Reset() { + *x = ClientPartitionCounts{} + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ClientPartitionCounts) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientPartitionCounts) ProtoMessage() {} + +func (x *ClientPartitionCounts) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientPartitionCounts.ProtoReflect.Descriptor instead. +func (*ClientPartitionCounts) Descriptor() ([]byte, []int) { + return file_temporal_server_api_taskqueue_v1_message_proto_rawDescGZIP(), []int{11} +} + +func (x *ClientPartitionCounts) GetRead() int32 { + if x != nil { + return x.Read + } + return 0 +} + +func (x *ClientPartitionCounts) GetWrite() int32 { + if x != nil { + return x.Write + } + return 0 +} + +type EphemeralData_ByVersion struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Key for this data. Data for the unversioned queue has no version field present. + // All following fields are data associated with this versioned queue. + Version *v12.WorkerDeploymentVersion `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // This is a bit field of priority levels that have "significant" backlog (defined by + // the server configuration). Priority key k corresponds to 1< google.protobuf.Empty - 1, // [1:1] is the sub-list for method output_type - 1, // [1:1] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 15, // 0: temporal.server.api.taskqueue.v1.TaskVersionDirective.use_assignment_rules:type_name -> google.protobuf.Empty + 16, // 1: temporal.server.api.taskqueue.v1.TaskVersionDirective.behavior:type_name -> temporal.api.enums.v1.VersioningBehavior + 17, // 2: temporal.server.api.taskqueue.v1.TaskVersionDirective.deployment:type_name -> temporal.api.deployment.v1.Deployment + 18, // 3: temporal.server.api.taskqueue.v1.TaskVersionDirective.deployment_version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 1, // 4: temporal.server.api.taskqueue.v1.InternalTaskQueueStatus.fair_read_level:type_name -> temporal.server.api.taskqueue.v1.FairLevel + 1, // 5: temporal.server.api.taskqueue.v1.InternalTaskQueueStatus.fair_ack_level:type_name -> temporal.server.api.taskqueue.v1.FairLevel + 19, // 6: temporal.server.api.taskqueue.v1.InternalTaskQueueStatus.task_id_block:type_name -> temporal.api.taskqueue.v1.TaskIdBlock + 1, // 7: temporal.server.api.taskqueue.v1.InternalTaskQueueStatus.fair_max_read_level:type_name -> temporal.server.api.taskqueue.v1.FairLevel + 4, // 8: temporal.server.api.taskqueue.v1.TaskQueueVersionInfoInternal.physical_task_queue_info:type_name -> temporal.server.api.taskqueue.v1.PhysicalTaskQueueInfo + 20, // 9: temporal.server.api.taskqueue.v1.PhysicalTaskQueueInfo.pollers:type_name -> temporal.api.taskqueue.v1.PollerInfo + 2, // 10: temporal.server.api.taskqueue.v1.PhysicalTaskQueueInfo.internal_task_queue_status:type_name -> temporal.server.api.taskqueue.v1.InternalTaskQueueStatus + 21, // 11: temporal.server.api.taskqueue.v1.PhysicalTaskQueueInfo.task_queue_stats:type_name -> temporal.api.taskqueue.v1.TaskQueueStats + 12, // 12: temporal.server.api.taskqueue.v1.PhysicalTaskQueueInfo.task_queue_stats_by_priority_key:type_name -> temporal.server.api.taskqueue.v1.PhysicalTaskQueueInfo.TaskQueueStatsByPriorityKeyEntry + 22, // 13: temporal.server.api.taskqueue.v1.TaskQueuePartition.task_queue_type:type_name -> temporal.api.enums.v1.TaskQueueType + 6, // 14: temporal.server.api.taskqueue.v1.TaskQueuePartition.worker_commands:type_name -> temporal.server.api.taskqueue.v1.WorkerCommandsPartitionId + 23, // 15: temporal.server.api.taskqueue.v1.TaskForwardInfo.task_source:type_name -> temporal.server.api.enums.v1.TaskSource + 24, // 16: temporal.server.api.taskqueue.v1.TaskForwardInfo.create_time:type_name -> google.protobuf.Timestamp + 7, // 17: temporal.server.api.taskqueue.v1.TaskForwardInfo.redirect_info:type_name -> temporal.server.api.taskqueue.v1.BuildIdRedirectInfo + 14, // 18: temporal.server.api.taskqueue.v1.EphemeralData.partition:type_name -> temporal.server.api.taskqueue.v1.EphemeralData.ByPartition + 9, // 19: temporal.server.api.taskqueue.v1.VersionedEphemeralData.data:type_name -> temporal.server.api.taskqueue.v1.EphemeralData + 21, // 20: temporal.server.api.taskqueue.v1.PhysicalTaskQueueInfo.TaskQueueStatsByPriorityKeyEntry.value:type_name -> temporal.api.taskqueue.v1.TaskQueueStats + 18, // 21: temporal.server.api.taskqueue.v1.EphemeralData.ByVersion.version:type_name -> temporal.server.api.deployment.v1.WorkerDeploymentVersion + 13, // 22: temporal.server.api.taskqueue.v1.EphemeralData.ByPartition.version:type_name -> temporal.server.api.taskqueue.v1.EphemeralData.ByVersion + 23, // [23:23] is the sub-list for method output_type + 23, // [23:23] is the sub-list for method input_type + 23, // [23:23] is the sub-list for extension type_name + 23, // [23:23] is the sub-list for extension extendee + 0, // [0:23] is the sub-list for field type_name } func init() { file_temporal_server_api_taskqueue_v1_message_proto_init() } @@ -189,31 +1208,22 @@ func file_temporal_server_api_taskqueue_v1_message_proto_init() { if File_temporal_server_api_taskqueue_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TaskVersionDirective); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } + file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[0].OneofWrappers = []any{ + (*TaskVersionDirective_UseAssignmentRules)(nil), + (*TaskVersionDirective_AssignedBuildId)(nil), } - file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*TaskVersionDirective_UseDefault)(nil), - (*TaskVersionDirective_BuildId)(nil), + file_temporal_server_api_taskqueue_v1_message_proto_msgTypes[5].OneofWrappers = []any{ + (*TaskQueuePartition_NormalPartitionId)(nil), + (*TaskQueuePartition_StickyName)(nil), + (*TaskQueuePartition_WorkerCommands)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_taskqueue_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_taskqueue_v1_message_proto_rawDesc), len(file_temporal_server_api_taskqueue_v1_message_proto_rawDesc)), NumEnums: 0, - NumMessages: 1, + NumMessages: 15, NumExtensions: 0, NumServices: 0, }, @@ -222,7 +1232,6 @@ func file_temporal_server_api_taskqueue_v1_message_proto_init() { MessageInfos: file_temporal_server_api_taskqueue_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_taskqueue_v1_message_proto = out.File - file_temporal_server_api_taskqueue_v1_message_proto_rawDesc = nil file_temporal_server_api_taskqueue_v1_message_proto_goTypes = nil file_temporal_server_api_taskqueue_v1_message_proto_depIdxs = nil } diff --git a/api/testservice/v1/request_response.go-helpers.pb.go b/api/testservice/v1/request_response.go-helpers.pb.go new file mode 100644 index 00000000000..fe4c6b97dee --- /dev/null +++ b/api/testservice/v1/request_response.go-helpers.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package testservice + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type SendHelloRequest to the protobuf v3 wire format +func (val *SendHelloRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SendHelloRequest from the protobuf v3 wire format +func (val *SendHelloRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SendHelloRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SendHelloRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SendHelloRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SendHelloRequest + switch t := that.(type) { + case *SendHelloRequest: + that1 = t + case SendHelloRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SendHelloResponse to the protobuf v3 wire format +func (val *SendHelloResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SendHelloResponse from the protobuf v3 wire format +func (val *SendHelloResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SendHelloResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SendHelloResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SendHelloResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SendHelloResponse + switch t := that.(type) { + case *SendHelloResponse: + that1 = t + case SendHelloResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/testservice/v1/request_response.pb.go b/api/testservice/v1/request_response.pb.go new file mode 100644 index 00000000000..40b8599bff8 --- /dev/null +++ b/api/testservice/v1/request_response.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/testservice/v1/request_response.proto + +package testservice + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SendHelloRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendHelloRequest) Reset() { + *x = SendHelloRequest{} + mi := &file_temporal_server_api_testservice_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendHelloRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendHelloRequest) ProtoMessage() {} + +func (x *SendHelloRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_testservice_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendHelloRequest.ProtoReflect.Descriptor instead. +func (*SendHelloRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_testservice_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *SendHelloRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type SendHelloResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SendHelloResponse) Reset() { + *x = SendHelloResponse{} + mi := &file_temporal_server_api_testservice_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SendHelloResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendHelloResponse) ProtoMessage() {} + +func (x *SendHelloResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_testservice_v1_request_response_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendHelloResponse.ProtoReflect.Descriptor instead. +func (*SendHelloResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_testservice_v1_request_response_proto_rawDescGZIP(), []int{1} +} + +func (x *SendHelloResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +var File_temporal_server_api_testservice_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_api_testservice_v1_request_response_proto_rawDesc = "" + + "\n" + + "9temporal/server/api/testservice/v1/request_response.proto\x12\"temporal.server.api.testservice.v1\"&\n" + + "\x10SendHelloRequest\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\"-\n" + + "\x11SendHelloResponse\x12\x18\n" + + "\amessage\x18\x01 \x01(\tR\amessageB6Z4go.temporal.io/server/api/testservice/v1;testserviceb\x06proto3" + +var ( + file_temporal_server_api_testservice_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_api_testservice_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_api_testservice_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_api_testservice_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_api_testservice_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_testservice_v1_request_response_proto_rawDesc), len(file_temporal_server_api_testservice_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_api_testservice_v1_request_response_proto_rawDescData +} + +var file_temporal_server_api_testservice_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_temporal_server_api_testservice_v1_request_response_proto_goTypes = []any{ + (*SendHelloRequest)(nil), // 0: temporal.server.api.testservice.v1.SendHelloRequest + (*SendHelloResponse)(nil), // 1: temporal.server.api.testservice.v1.SendHelloResponse +} +var file_temporal_server_api_testservice_v1_request_response_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_testservice_v1_request_response_proto_init() } +func file_temporal_server_api_testservice_v1_request_response_proto_init() { + if File_temporal_server_api_testservice_v1_request_response_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_testservice_v1_request_response_proto_rawDesc), len(file_temporal_server_api_testservice_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_testservice_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_api_testservice_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_api_testservice_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_api_testservice_v1_request_response_proto = out.File + file_temporal_server_api_testservice_v1_request_response_proto_goTypes = nil + file_temporal_server_api_testservice_v1_request_response_proto_depIdxs = nil +} diff --git a/api/testservice/v1/service.pb.go b/api/testservice/v1/service.pb.go new file mode 100644 index 00000000000..b81431ead0b --- /dev/null +++ b/api/testservice/v1/service.pb.go @@ -0,0 +1,68 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/testservice/v1/service.proto + +package testservice + +import ( + reflect "reflect" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_temporal_server_api_testservice_v1_service_proto protoreflect.FileDescriptor + +const file_temporal_server_api_testservice_v1_service_proto_rawDesc = "" + + "\n" + + "0temporal/server/api/testservice/v1/service.proto\x12\"temporal.server.api.testservice.v1\x1a9temporal/server/api/testservice/v1/request_response.proto2\x89\x01\n" + + "\vTestService\x12z\n" + + "\tSendHello\x124.temporal.server.api.testservice.v1.SendHelloRequest\x1a5.temporal.server.api.testservice.v1.SendHelloResponse\"\x00B6Z4go.temporal.io/server/api/testservice/v1;testserviceb\x06proto3" + +var file_temporal_server_api_testservice_v1_service_proto_goTypes = []any{ + (*SendHelloRequest)(nil), // 0: temporal.server.api.testservice.v1.SendHelloRequest + (*SendHelloResponse)(nil), // 1: temporal.server.api.testservice.v1.SendHelloResponse +} +var file_temporal_server_api_testservice_v1_service_proto_depIdxs = []int32{ + 0, // 0: temporal.server.api.testservice.v1.TestService.SendHello:input_type -> temporal.server.api.testservice.v1.SendHelloRequest + 1, // 1: temporal.server.api.testservice.v1.TestService.SendHello:output_type -> temporal.server.api.testservice.v1.SendHelloResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_testservice_v1_service_proto_init() } +func file_temporal_server_api_testservice_v1_service_proto_init() { + if File_temporal_server_api_testservice_v1_service_proto != nil { + return + } + file_temporal_server_api_testservice_v1_request_response_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_testservice_v1_service_proto_rawDesc), len(file_temporal_server_api_testservice_v1_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_temporal_server_api_testservice_v1_service_proto_goTypes, + DependencyIndexes: file_temporal_server_api_testservice_v1_service_proto_depIdxs, + }.Build() + File_temporal_server_api_testservice_v1_service_proto = out.File + file_temporal_server_api_testservice_v1_service_proto_goTypes = nil + file_temporal_server_api_testservice_v1_service_proto_depIdxs = nil +} diff --git a/api/testservice/v1/service_grpc.pb.go b/api/testservice/v1/service_grpc.pb.go new file mode 100644 index 00000000000..8223f3ece84 --- /dev/null +++ b/api/testservice/v1/service_grpc.pb.go @@ -0,0 +1,112 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// plugins: +// - protoc-gen-go-grpc +// - protoc +// source: temporal/server/api/testservice/v1/service.proto + +package testservice + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + TestService_SendHello_FullMethodName = "/temporal.server.api.testservice.v1.TestService/SendHello" +) + +// TestServiceClient is the client API for TestService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TestServiceClient interface { + // Sends a greeting + SendHello(ctx context.Context, in *SendHelloRequest, opts ...grpc.CallOption) (*SendHelloResponse, error) +} + +type testServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) SendHello(ctx context.Context, in *SendHelloRequest, opts ...grpc.CallOption) (*SendHelloResponse, error) { + out := new(SendHelloResponse) + err := c.cc.Invoke(ctx, TestService_SendHello_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TestServiceServer is the server API for TestService service. +// All implementations must embed UnimplementedTestServiceServer +// for forward compatibility +type TestServiceServer interface { + // Sends a greeting + SendHello(context.Context, *SendHelloRequest) (*SendHelloResponse, error) + mustEmbedUnimplementedTestServiceServer() +} + +// UnimplementedTestServiceServer must be embedded to have forward compatible implementations. +type UnimplementedTestServiceServer struct { +} + +func (UnimplementedTestServiceServer) SendHello(context.Context, *SendHelloRequest) (*SendHelloResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendHello not implemented") +} +func (UnimplementedTestServiceServer) mustEmbedUnimplementedTestServiceServer() {} + +// UnsafeTestServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TestServiceServer will +// result in compilation errors. +type UnsafeTestServiceServer interface { + mustEmbedUnimplementedTestServiceServer() +} + +func RegisterTestServiceServer(s grpc.ServiceRegistrar, srv TestServiceServer) { + s.RegisterService(&TestService_ServiceDesc, srv) +} + +func _TestService_SendHello_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendHelloRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).SendHello(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TestService_SendHello_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).SendHello(ctx, req.(*SendHelloRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// TestService_ServiceDesc is the grpc.ServiceDesc for TestService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TestService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "temporal.server.api.testservice.v1.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendHello", + Handler: _TestService_SendHello_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "temporal/server/api/testservice/v1/service.proto", +} diff --git a/api/testservicemock/v1/service.pb.mock.go b/api/testservicemock/v1/service.pb.mock.go new file mode 100644 index 00000000000..57015772c93 --- /dev/null +++ b/api/testservicemock/v1/service.pb.mock.go @@ -0,0 +1,10 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: api/testservice/v1/service.pb.go +// +// Generated by this command: +// +// mockgen -package testservicemock -source api/testservice/v1/service.pb.go -destination api/testservicemock/v1/service.pb.mock.go +// + +// Package testservicemock is a generated GoMock package. +package testservicemock diff --git a/api/testservicemock/v1/service_grpc.pb.mock.go b/api/testservicemock/v1/service_grpc.pb.mock.go new file mode 100644 index 00000000000..84b3c4a148b --- /dev/null +++ b/api/testservicemock/v1/service_grpc.pb.mock.go @@ -0,0 +1,150 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: api/testservice/v1/service_grpc.pb.go +// +// Generated by this command: +// +// mockgen -package testservicemock -source api/testservice/v1/service_grpc.pb.go -destination api/testservicemock/v1/service_grpc.pb.mock.go +// + +// Package testservicemock is a generated GoMock package. +package testservicemock + +import ( + context "context" + reflect "reflect" + + testservice "go.temporal.io/server/api/testservice/v1" + gomock "go.uber.org/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockTestServiceClient is a mock of TestServiceClient interface. +type MockTestServiceClient struct { + ctrl *gomock.Controller + recorder *MockTestServiceClientMockRecorder + isgomock struct{} +} + +// MockTestServiceClientMockRecorder is the mock recorder for MockTestServiceClient. +type MockTestServiceClientMockRecorder struct { + mock *MockTestServiceClient +} + +// NewMockTestServiceClient creates a new mock instance. +func NewMockTestServiceClient(ctrl *gomock.Controller) *MockTestServiceClient { + mock := &MockTestServiceClient{ctrl: ctrl} + mock.recorder = &MockTestServiceClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTestServiceClient) EXPECT() *MockTestServiceClientMockRecorder { + return m.recorder +} + +// SendHello mocks base method. +func (m *MockTestServiceClient) SendHello(ctx context.Context, in *testservice.SendHelloRequest, opts ...grpc.CallOption) (*testservice.SendHelloResponse, error) { + m.ctrl.T.Helper() + varargs := []any{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SendHello", varargs...) + ret0, _ := ret[0].(*testservice.SendHelloResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendHello indicates an expected call of SendHello. +func (mr *MockTestServiceClientMockRecorder) SendHello(ctx, in any, opts ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHello", reflect.TypeOf((*MockTestServiceClient)(nil).SendHello), varargs...) +} + +// MockTestServiceServer is a mock of TestServiceServer interface. +type MockTestServiceServer struct { + ctrl *gomock.Controller + recorder *MockTestServiceServerMockRecorder + isgomock struct{} +} + +// MockTestServiceServerMockRecorder is the mock recorder for MockTestServiceServer. +type MockTestServiceServerMockRecorder struct { + mock *MockTestServiceServer +} + +// NewMockTestServiceServer creates a new mock instance. +func NewMockTestServiceServer(ctrl *gomock.Controller) *MockTestServiceServer { + mock := &MockTestServiceServer{ctrl: ctrl} + mock.recorder = &MockTestServiceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTestServiceServer) EXPECT() *MockTestServiceServerMockRecorder { + return m.recorder +} + +// SendHello mocks base method. +func (m *MockTestServiceServer) SendHello(arg0 context.Context, arg1 *testservice.SendHelloRequest) (*testservice.SendHelloResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendHello", arg0, arg1) + ret0, _ := ret[0].(*testservice.SendHelloResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendHello indicates an expected call of SendHello. +func (mr *MockTestServiceServerMockRecorder) SendHello(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendHello", reflect.TypeOf((*MockTestServiceServer)(nil).SendHello), arg0, arg1) +} + +// mustEmbedUnimplementedTestServiceServer mocks base method. +func (m *MockTestServiceServer) mustEmbedUnimplementedTestServiceServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedTestServiceServer") +} + +// mustEmbedUnimplementedTestServiceServer indicates an expected call of mustEmbedUnimplementedTestServiceServer. +func (mr *MockTestServiceServerMockRecorder) mustEmbedUnimplementedTestServiceServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedTestServiceServer", reflect.TypeOf((*MockTestServiceServer)(nil).mustEmbedUnimplementedTestServiceServer)) +} + +// MockUnsafeTestServiceServer is a mock of UnsafeTestServiceServer interface. +type MockUnsafeTestServiceServer struct { + ctrl *gomock.Controller + recorder *MockUnsafeTestServiceServerMockRecorder + isgomock struct{} +} + +// MockUnsafeTestServiceServerMockRecorder is the mock recorder for MockUnsafeTestServiceServer. +type MockUnsafeTestServiceServerMockRecorder struct { + mock *MockUnsafeTestServiceServer +} + +// NewMockUnsafeTestServiceServer creates a new mock instance. +func NewMockUnsafeTestServiceServer(ctrl *gomock.Controller) *MockUnsafeTestServiceServer { + mock := &MockUnsafeTestServiceServer{ctrl: ctrl} + mock.recorder = &MockUnsafeTestServiceServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUnsafeTestServiceServer) EXPECT() *MockUnsafeTestServiceServerMockRecorder { + return m.recorder +} + +// mustEmbedUnimplementedTestServiceServer mocks base method. +func (m *MockUnsafeTestServiceServer) mustEmbedUnimplementedTestServiceServer() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedTestServiceServer") +} + +// mustEmbedUnimplementedTestServiceServer indicates an expected call of mustEmbedUnimplementedTestServiceServer. +func (mr *MockUnsafeTestServiceServerMockRecorder) mustEmbedUnimplementedTestServiceServer() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedTestServiceServer", reflect.TypeOf((*MockUnsafeTestServiceServer)(nil).mustEmbedUnimplementedTestServiceServer)) +} diff --git a/api/token/v1/message.go-helpers.pb.go b/api/token/v1/message.go-helpers.pb.go index 49b3f8428f9..235b9499a68 100644 --- a/api/token/v1/message.go-helpers.pb.go +++ b/api/token/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package token @@ -213,3 +189,77 @@ func (this *NexusTask) Equal(that interface{}) bool { return proto.Equal(this, that1) } + +// Marshal an object of type HistoryEventRef to the protobuf v3 wire format +func (val *HistoryEventRef) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type HistoryEventRef from the protobuf v3 wire format +func (val *HistoryEventRef) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *HistoryEventRef) Size() int { + return proto.Size(val) +} + +// Equal returns whether two HistoryEventRef values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *HistoryEventRef) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *HistoryEventRef + switch t := that.(type) { + case *HistoryEventRef: + that1 = t + case HistoryEventRef: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type NexusOperationCompletion to the protobuf v3 wire format +func (val *NexusOperationCompletion) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusOperationCompletion from the protobuf v3 wire format +func (val *NexusOperationCompletion) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusOperationCompletion) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusOperationCompletion values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusOperationCompletion) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusOperationCompletion + switch t := that.(type) { + case *NexusOperationCompletion: + that1 = t + case NexusOperationCompletion: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/token/v1/message.pb.go b/api/token/v1/message.pb.go index ad61c17da33..0dbc4627b5b 100644 --- a/api/token/v1/message.pb.go +++ b/api/token/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,9 +9,11 @@ package token import ( reflect "reflect" sync "sync" + unsafe "unsafe" - v11 "go.temporal.io/server/api/clock/v1" + v12 "go.temporal.io/server/api/clock/v1" v1 "go.temporal.io/server/api/history/v1" + v11 "go.temporal.io/server/api/persistence/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" timestamppb "google.golang.org/protobuf/types/known/timestamppb" @@ -47,27 +27,24 @@ const ( ) type HistoryContinuation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - FirstEventId int64 `protobuf:"varint,2,opt,name=first_event_id,json=firstEventId,proto3" json:"first_event_id,omitempty"` - NextEventId int64 `protobuf:"varint,3,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` - IsWorkflowRunning bool `protobuf:"varint,5,opt,name=is_workflow_running,json=isWorkflowRunning,proto3" json:"is_workflow_running,omitempty"` - PersistenceToken []byte `protobuf:"bytes,6,opt,name=persistence_token,json=persistenceToken,proto3" json:"persistence_token,omitempty"` - TransientWorkflowTask *v1.TransientWorkflowTaskInfo `protobuf:"bytes,7,opt,name=transient_workflow_task,json=transientWorkflowTask,proto3" json:"transient_workflow_task,omitempty"` - BranchToken []byte `protobuf:"bytes,8,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` - VersionHistoryItem *v1.VersionHistoryItem `protobuf:"bytes,10,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + FirstEventId int64 `protobuf:"varint,2,opt,name=first_event_id,json=firstEventId,proto3" json:"first_event_id,omitempty"` + NextEventId int64 `protobuf:"varint,3,opt,name=next_event_id,json=nextEventId,proto3" json:"next_event_id,omitempty"` + IsWorkflowRunning bool `protobuf:"varint,5,opt,name=is_workflow_running,json=isWorkflowRunning,proto3" json:"is_workflow_running,omitempty"` + PersistenceToken []byte `protobuf:"bytes,6,opt,name=persistence_token,json=persistenceToken,proto3" json:"persistence_token,omitempty"` + BranchToken []byte `protobuf:"bytes,8,opt,name=branch_token,json=branchToken,proto3" json:"branch_token,omitempty"` + VersionHistoryItem *v1.VersionHistoryItem `protobuf:"bytes,10,opt,name=version_history_item,json=versionHistoryItem,proto3" json:"version_history_item,omitempty"` + VersionedTransition *v11.VersionedTransition `protobuf:"bytes,11,opt,name=versioned_transition,json=versionedTransition,proto3" json:"versioned_transition,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HistoryContinuation) Reset() { *x = HistoryContinuation{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *HistoryContinuation) String() string { @@ -78,7 +55,7 @@ func (*HistoryContinuation) ProtoMessage() {} func (x *HistoryContinuation) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -128,50 +105,47 @@ func (x *HistoryContinuation) GetPersistenceToken() []byte { return nil } -func (x *HistoryContinuation) GetTransientWorkflowTask() *v1.TransientWorkflowTaskInfo { +func (x *HistoryContinuation) GetBranchToken() []byte { if x != nil { - return x.TransientWorkflowTask + return x.BranchToken } return nil } -func (x *HistoryContinuation) GetBranchToken() []byte { +func (x *HistoryContinuation) GetVersionHistoryItem() *v1.VersionHistoryItem { if x != nil { - return x.BranchToken + return x.VersionHistoryItem } return nil } -func (x *HistoryContinuation) GetVersionHistoryItem() *v1.VersionHistoryItem { +func (x *HistoryContinuation) GetVersionedTransition() *v11.VersionedTransition { if x != nil { - return x.VersionHistoryItem + return x.VersionedTransition } return nil } type RawHistoryContinuation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,10,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` - RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - StartEventId int64 `protobuf:"varint,4,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` - StartEventVersion int64 `protobuf:"varint,5,opt,name=start_event_version,json=startEventVersion,proto3" json:"start_event_version,omitempty"` - EndEventId int64 `protobuf:"varint,6,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` - EndEventVersion int64 `protobuf:"varint,7,opt,name=end_event_version,json=endEventVersion,proto3" json:"end_event_version,omitempty"` - PersistenceToken []byte `protobuf:"bytes,8,opt,name=persistence_token,json=persistenceToken,proto3" json:"persistence_token,omitempty"` - VersionHistories *v1.VersionHistories `protobuf:"bytes,9,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,10,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + StartEventId int64 `protobuf:"varint,4,opt,name=start_event_id,json=startEventId,proto3" json:"start_event_id,omitempty"` + StartEventVersion int64 `protobuf:"varint,5,opt,name=start_event_version,json=startEventVersion,proto3" json:"start_event_version,omitempty"` + EndEventId int64 `protobuf:"varint,6,opt,name=end_event_id,json=endEventId,proto3" json:"end_event_id,omitempty"` + EndEventVersion int64 `protobuf:"varint,7,opt,name=end_event_version,json=endEventVersion,proto3" json:"end_event_version,omitempty"` + PersistenceToken []byte `protobuf:"bytes,8,opt,name=persistence_token,json=persistenceToken,proto3" json:"persistence_token,omitempty"` + VersionHistories *v1.VersionHistories `protobuf:"bytes,9,opt,name=version_histories,json=versionHistories,proto3" json:"version_histories,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RawHistoryContinuation) Reset() { *x = RawHistoryContinuation{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RawHistoryContinuation) String() string { @@ -182,7 +156,7 @@ func (*RawHistoryContinuation) ProtoMessage() {} func (x *RawHistoryContinuation) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -261,10 +235,7 @@ func (x *RawHistoryContinuation) GetVersionHistories() *v1.VersionHistories { } type Task struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` @@ -273,19 +244,22 @@ type Task struct { ActivityId string `protobuf:"bytes,6,opt,name=activity_id,json=activityId,proto3" json:"activity_id,omitempty"` WorkflowType string `protobuf:"bytes,7,opt,name=workflow_type,json=workflowType,proto3" json:"workflow_type,omitempty"` ActivityType string `protobuf:"bytes,8,opt,name=activity_type,json=activityType,proto3" json:"activity_type,omitempty"` - Clock *v11.VectorClock `protobuf:"bytes,9,opt,name=clock,proto3" json:"clock,omitempty"` + Clock *v12.VectorClock `protobuf:"bytes,9,opt,name=clock,proto3" json:"clock,omitempty"` StartedEventId int64 `protobuf:"varint,10,opt,name=started_event_id,json=startedEventId,proto3" json:"started_event_id,omitempty"` Version int64 `protobuf:"varint,11,opt,name=version,proto3" json:"version,omitempty"` StartedTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + StartVersion int64 `protobuf:"varint,13,opt,name=start_version,json=startVersion,proto3" json:"start_version,omitempty"` + // Reference to the associated Chasm component, if provided. + ComponentRef []byte `protobuf:"bytes,14,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Task) Reset() { *x = Task{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Task) String() string { @@ -296,7 +270,7 @@ func (*Task) ProtoMessage() {} func (x *Task) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -367,7 +341,7 @@ func (x *Task) GetActivityType() string { return "" } -func (x *Task) GetClock() *v11.VectorClock { +func (x *Task) GetClock() *v12.VectorClock { if x != nil { return x.Clock } @@ -395,23 +369,34 @@ func (x *Task) GetStartedTime() *timestamppb.Timestamp { return nil } +func (x *Task) GetStartVersion() int64 { + if x != nil { + return x.StartVersion + } + return 0 +} + +func (x *Task) GetComponentRef() []byte { + if x != nil { + return x.ComponentRef + } + return nil +} + type QueryTask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *QueryTask) Reset() { *x = QueryTask{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *QueryTask) String() string { @@ -422,7 +407,7 @@ func (*QueryTask) ProtoMessage() {} func (x *QueryTask) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -459,22 +444,19 @@ func (x *QueryTask) GetTaskId() string { } type NexusTask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - TaskQueue string `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` - TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *NexusTask) Reset() { *x = NexusTask{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *NexusTask) String() string { @@ -485,7 +467,7 @@ func (*NexusTask) ProtoMessage() {} func (x *NexusTask) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -521,162 +503,264 @@ func (x *NexusTask) GetTaskId() string { return "" } -var File_temporal_server_api_token_v1_message_proto protoreflect.FileDescriptor +// A reference for loading a history event. +type HistoryEventRef struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Event ID. + EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` + // Event batch ID - the first event ID in the batch the event was stored in. + EventBatchId int64 `protobuf:"varint,2,opt,name=event_batch_id,json=eventBatchId,proto3" json:"event_batch_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HistoryEventRef) Reset() { + *x = HistoryEventRef{} + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} -var file_temporal_server_api_token_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, - 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x6f, 0x6b, - 0x65, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf5, 0x03, 0x0a, 0x13, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, - 0x06, 0x72, 0x75, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x66, 0x69, - 0x72, 0x73, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x26, 0x0a, - 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x69, 0x73, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, - 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x69, - 0x73, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x2f, 0x0a, 0x11, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x65, 0x72, - 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x75, 0x0a, 0x17, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, - 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, - 0x73, 0x6b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x15, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74, - 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x25, - 0x0a, 0x0c, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, - 0x02, 0x68, 0x00, 0x12, 0x68, 0x0a, 0x14, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x12, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x49, 0x74, - 0x65, 0x6d, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xcd, 0x03, 0x0a, 0x16, 0x52, - 0x61, 0x77, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, - 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x32, 0x0a, 0x13, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x24, 0x0a, 0x0c, 0x65, 0x6e, - 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2e, - 0x0a, 0x11, 0x65, 0x6e, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2f, 0x0a, 0x11, 0x70, 0x65, - 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x10, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x12, 0x61, 0x0a, 0x11, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x30, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x2e, 0x76, 0x31, - 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73, - 0x52, 0x10, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x69, 0x65, - 0x73, 0x42, 0x02, 0x68, 0x00, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x88, 0x04, 0x0a, 0x04, 0x54, 0x61, - 0x73, 0x6b, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, - 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, 0x6e, 0x5f, - 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x30, 0x0a, 0x12, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x5f, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x07, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x42, 0x02, 0x68, 0x00, 0x12, 0x23, 0x0a, 0x0b, - 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x27, - 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, - 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x27, 0x0a, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, - 0x6f, 0x63, 0x6b, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, - 0x6f, 0x63, 0x6b, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2c, 0x0a, 0x10, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, - 0x68, 0x00, 0x12, 0x41, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x52, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x22, 0x72, 0x0a, 0x09, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x0a, 0x0c, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x21, 0x0a, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x1b, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x72, 0x0a, 0x09, 0x4e, - 0x65, 0x78, 0x75, 0x73, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x21, 0x0a, 0x0a, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x74, 0x61, 0x73, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x1b, 0x0a, 0x07, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, - 0x73, 0x6b, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x74, 0x65, 0x6d, - 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, - 0x70, 0x69, 0x2f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +func (x *HistoryEventRef) String() string { + return protoimpl.X.MessageStringOf(x) } +func (*HistoryEventRef) ProtoMessage() {} + +func (x *HistoryEventRef) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HistoryEventRef.ProtoReflect.Descriptor instead. +func (*HistoryEventRef) Descriptor() ([]byte, []int) { + return file_temporal_server_api_token_v1_message_proto_rawDescGZIP(), []int{5} +} + +func (x *HistoryEventRef) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +func (x *HistoryEventRef) GetEventBatchId() int64 { + if x != nil { + return x.EventBatchId + } + return 0 +} + +// A completion token for a Nexus operation started from a workflow. +type NexusOperationCompletion struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Namespace UUID. (Deprecated) + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // Workflow ID. (Deprecated) + WorkflowId string `protobuf:"bytes,2,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + // Run ID at the time this token was generated. (Deprecated) + RunId string `protobuf:"bytes,3,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + // Reference including the path to the backing Operation state machine and a version + transition count for + // staleness checks. (Deprecated) + Ref *v11.StateMachineRef `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"` + // Request ID embedded in the NexusOperationScheduledEvent. + // Allows completing a started operation after a workflow has been reset. + RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Reference to the CHASM component to be informed of the completion. + ComponentRef []byte `protobuf:"bytes,6,opt,name=component_ref,json=componentRef,proto3" json:"component_ref,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusOperationCompletion) Reset() { + *x = NexusOperationCompletion{} + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusOperationCompletion) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusOperationCompletion) ProtoMessage() {} + +func (x *NexusOperationCompletion) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_token_v1_message_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusOperationCompletion.ProtoReflect.Descriptor instead. +func (*NexusOperationCompletion) Descriptor() ([]byte, []int) { + return file_temporal_server_api_token_v1_message_proto_rawDescGZIP(), []int{6} +} + +func (x *NexusOperationCompletion) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *NexusOperationCompletion) GetWorkflowId() string { + if x != nil { + return x.WorkflowId + } + return "" +} + +func (x *NexusOperationCompletion) GetRunId() string { + if x != nil { + return x.RunId + } + return "" +} + +func (x *NexusOperationCompletion) GetRef() *v11.StateMachineRef { + if x != nil { + return x.Ref + } + return nil +} + +func (x *NexusOperationCompletion) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *NexusOperationCompletion) GetComponentRef() []byte { + if x != nil { + return x.ComponentRef + } + return nil +} + +var File_temporal_server_api_token_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_api_token_v1_message_proto_rawDesc = "" + + "\n" + + "*temporal/server/api/token/v1/message.proto\x12\x1ctemporal.server.api.token.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a*temporal/server/api/clock/v1/message.proto\x1a,temporal/server/api/history/v1/message.proto\x1a,temporal/server/api/persistence/v1/hsm.proto\"\xd4\x03\n" + + "\x13HistoryContinuation\x12\x15\n" + + "\x06run_id\x18\x01 \x01(\tR\x05runId\x12$\n" + + "\x0efirst_event_id\x18\x02 \x01(\x03R\ffirstEventId\x12\"\n" + + "\rnext_event_id\x18\x03 \x01(\x03R\vnextEventId\x12.\n" + + "\x13is_workflow_running\x18\x05 \x01(\bR\x11isWorkflowRunning\x12+\n" + + "\x11persistence_token\x18\x06 \x01(\fR\x10persistenceToken\x12!\n" + + "\fbranch_token\x18\b \x01(\fR\vbranchToken\x12d\n" + + "\x14version_history_item\x18\n" + + " \x01(\v22.temporal.server.api.history.v1.VersionHistoryItemR\x12versionHistoryItem\x12j\n" + + "\x14versioned_transition\x18\v \x01(\v27.temporal.server.api.persistence.v1.VersionedTransitionR\x13versionedTransitionJ\x04\b\a\x10\bJ\x04\b\t\x10\n" + + "\"\xa9\x03\n" + + "\x16RawHistoryContinuation\x12!\n" + + "\fnamespace_id\x18\n" + + " \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12$\n" + + "\x0estart_event_id\x18\x04 \x01(\x03R\fstartEventId\x12.\n" + + "\x13start_event_version\x18\x05 \x01(\x03R\x11startEventVersion\x12 \n" + + "\fend_event_id\x18\x06 \x01(\x03R\n" + + "endEventId\x12*\n" + + "\x11end_event_version\x18\a \x01(\x03R\x0fendEventVersion\x12+\n" + + "\x11persistence_token\x18\b \x01(\fR\x10persistenceToken\x12]\n" + + "\x11version_histories\x18\t \x01(\v20.temporal.server.api.history.v1.VersionHistoriesR\x10versionHistoriesJ\x04\b\x01\x10\x02\"\xa2\x04\n" + + "\x04Task\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12,\n" + + "\x12scheduled_event_id\x18\x04 \x01(\x03R\x10scheduledEventId\x12\x18\n" + + "\aattempt\x18\x05 \x01(\x05R\aattempt\x12\x1f\n" + + "\vactivity_id\x18\x06 \x01(\tR\n" + + "activityId\x12#\n" + + "\rworkflow_type\x18\a \x01(\tR\fworkflowType\x12#\n" + + "\ractivity_type\x18\b \x01(\tR\factivityType\x12?\n" + + "\x05clock\x18\t \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12(\n" + + "\x10started_event_id\x18\n" + + " \x01(\x03R\x0estartedEventId\x12\x18\n" + + "\aversion\x18\v \x01(\x03R\aversion\x12=\n" + + "\fstarted_time\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12#\n" + + "\rstart_version\x18\r \x01(\x03R\fstartVersion\x12#\n" + + "\rcomponent_ref\x18\x0e \x01(\fR\fcomponentRef\"f\n" + + "\tQueryTask\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12\x17\n" + + "\atask_id\x18\x03 \x01(\tR\x06taskId\"f\n" + + "\tNexusTask\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1d\n" + + "\n" + + "task_queue\x18\x02 \x01(\tR\ttaskQueue\x12\x17\n" + + "\atask_id\x18\x03 \x01(\tR\x06taskId\"R\n" + + "\x0fHistoryEventRef\x12\x19\n" + + "\bevent_id\x18\x01 \x01(\x03R\aeventId\x12$\n" + + "\x0eevent_batch_id\x18\x02 \x01(\x03R\feventBatchId\"\x80\x02\n" + + "\x18NexusOperationCompletion\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vworkflow_id\x18\x02 \x01(\tR\n" + + "workflowId\x12\x15\n" + + "\x06run_id\x18\x03 \x01(\tR\x05runId\x12E\n" + + "\x03ref\x18\x04 \x01(\v23.temporal.server.api.persistence.v1.StateMachineRefR\x03ref\x12\x1d\n" + + "\n" + + "request_id\x18\x05 \x01(\tR\trequestId\x12#\n" + + "\rcomponent_ref\x18\x06 \x01(\fR\fcomponentRefB*Z(go.temporal.io/server/api/token/v1;tokenb\x06proto3" + var ( file_temporal_server_api_token_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_token_v1_message_proto_rawDescData = file_temporal_server_api_token_v1_message_proto_rawDesc + file_temporal_server_api_token_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_token_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_token_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_token_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_token_v1_message_proto_rawDescData) + file_temporal_server_api_token_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_token_v1_message_proto_rawDesc), len(file_temporal_server_api_token_v1_message_proto_rawDesc))) }) return file_temporal_server_api_token_v1_message_proto_rawDescData } -var file_temporal_server_api_token_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 5) -var file_temporal_server_api_token_v1_message_proto_goTypes = []interface{}{ - (*HistoryContinuation)(nil), // 0: temporal.server.api.token.v1.HistoryContinuation - (*RawHistoryContinuation)(nil), // 1: temporal.server.api.token.v1.RawHistoryContinuation - (*Task)(nil), // 2: temporal.server.api.token.v1.Task - (*QueryTask)(nil), // 3: temporal.server.api.token.v1.QueryTask - (*NexusTask)(nil), // 4: temporal.server.api.token.v1.NexusTask - (*v1.TransientWorkflowTaskInfo)(nil), // 5: temporal.server.api.history.v1.TransientWorkflowTaskInfo - (*v1.VersionHistoryItem)(nil), // 6: temporal.server.api.history.v1.VersionHistoryItem - (*v1.VersionHistories)(nil), // 7: temporal.server.api.history.v1.VersionHistories - (*v11.VectorClock)(nil), // 8: temporal.server.api.clock.v1.VectorClock - (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp +var file_temporal_server_api_token_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_temporal_server_api_token_v1_message_proto_goTypes = []any{ + (*HistoryContinuation)(nil), // 0: temporal.server.api.token.v1.HistoryContinuation + (*RawHistoryContinuation)(nil), // 1: temporal.server.api.token.v1.RawHistoryContinuation + (*Task)(nil), // 2: temporal.server.api.token.v1.Task + (*QueryTask)(nil), // 3: temporal.server.api.token.v1.QueryTask + (*NexusTask)(nil), // 4: temporal.server.api.token.v1.NexusTask + (*HistoryEventRef)(nil), // 5: temporal.server.api.token.v1.HistoryEventRef + (*NexusOperationCompletion)(nil), // 6: temporal.server.api.token.v1.NexusOperationCompletion + (*v1.VersionHistoryItem)(nil), // 7: temporal.server.api.history.v1.VersionHistoryItem + (*v11.VersionedTransition)(nil), // 8: temporal.server.api.persistence.v1.VersionedTransition + (*v1.VersionHistories)(nil), // 9: temporal.server.api.history.v1.VersionHistories + (*v12.VectorClock)(nil), // 10: temporal.server.api.clock.v1.VectorClock + (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp + (*v11.StateMachineRef)(nil), // 12: temporal.server.api.persistence.v1.StateMachineRef } var file_temporal_server_api_token_v1_message_proto_depIdxs = []int32{ - 5, // 0: temporal.server.api.token.v1.HistoryContinuation.transient_workflow_task:type_name -> temporal.server.api.history.v1.TransientWorkflowTaskInfo - 6, // 1: temporal.server.api.token.v1.HistoryContinuation.version_history_item:type_name -> temporal.server.api.history.v1.VersionHistoryItem - 7, // 2: temporal.server.api.token.v1.RawHistoryContinuation.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories - 8, // 3: temporal.server.api.token.v1.Task.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 9, // 4: temporal.server.api.token.v1.Task.started_time:type_name -> google.protobuf.Timestamp - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 7, // 0: temporal.server.api.token.v1.HistoryContinuation.version_history_item:type_name -> temporal.server.api.history.v1.VersionHistoryItem + 8, // 1: temporal.server.api.token.v1.HistoryContinuation.versioned_transition:type_name -> temporal.server.api.persistence.v1.VersionedTransition + 9, // 2: temporal.server.api.token.v1.RawHistoryContinuation.version_histories:type_name -> temporal.server.api.history.v1.VersionHistories + 10, // 3: temporal.server.api.token.v1.Task.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 11, // 4: temporal.server.api.token.v1.Task.started_time:type_name -> google.protobuf.Timestamp + 12, // 5: temporal.server.api.token.v1.NexusOperationCompletion.ref:type_name -> temporal.server.api.persistence.v1.StateMachineRef + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_temporal_server_api_token_v1_message_proto_init() } @@ -684,75 +768,13 @@ func file_temporal_server_api_token_v1_message_proto_init() { if File_temporal_server_api_token_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_token_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HistoryContinuation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_token_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RawHistoryContinuation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_token_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Task); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_token_v1_message_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryTask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_token_v1_message_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NexusTask); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_token_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_token_v1_message_proto_rawDesc), len(file_temporal_server_api_token_v1_message_proto_rawDesc)), NumEnums: 0, - NumMessages: 5, + NumMessages: 7, NumExtensions: 0, NumServices: 0, }, @@ -761,7 +783,6 @@ func file_temporal_server_api_token_v1_message_proto_init() { MessageInfos: file_temporal_server_api_token_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_token_v1_message_proto = out.File - file_temporal_server_api_token_v1_message_proto_rawDesc = nil file_temporal_server_api_token_v1_message_proto_goTypes = nil file_temporal_server_api_token_v1_message_proto_depIdxs = nil } diff --git a/api/update/v1/message.go-helpers.pb.go b/api/update/v1/message.go-helpers.pb.go deleted file mode 100644 index 5855e611f75..00000000000 --- a/api/update/v1/message.go-helpers.pb.go +++ /dev/null @@ -1,141 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by protoc-gen-go-helpers. DO NOT EDIT. -package update - -import ( - "google.golang.org/protobuf/proto" -) - -// Marshal an object of type AcceptanceInfo to the protobuf v3 wire format -func (val *AcceptanceInfo) Marshal() ([]byte, error) { - return proto.Marshal(val) -} - -// Unmarshal an object of type AcceptanceInfo from the protobuf v3 wire format -func (val *AcceptanceInfo) Unmarshal(buf []byte) error { - return proto.Unmarshal(buf, val) -} - -// Size returns the size of the object, in bytes, once serialized -func (val *AcceptanceInfo) Size() int { - return proto.Size(val) -} - -// Equal returns whether two AcceptanceInfo values are equivalent by recursively -// comparing the message's fields. -// For more information see the documentation for -// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *AcceptanceInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - var that1 *AcceptanceInfo - switch t := that.(type) { - case *AcceptanceInfo: - that1 = t - case AcceptanceInfo: - that1 = &t - default: - return false - } - - return proto.Equal(this, that1) -} - -// Marshal an object of type CompletionInfo to the protobuf v3 wire format -func (val *CompletionInfo) Marshal() ([]byte, error) { - return proto.Marshal(val) -} - -// Unmarshal an object of type CompletionInfo from the protobuf v3 wire format -func (val *CompletionInfo) Unmarshal(buf []byte) error { - return proto.Unmarshal(buf, val) -} - -// Size returns the size of the object, in bytes, once serialized -func (val *CompletionInfo) Size() int { - return proto.Size(val) -} - -// Equal returns whether two CompletionInfo values are equivalent by recursively -// comparing the message's fields. -// For more information see the documentation for -// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *CompletionInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - var that1 *CompletionInfo - switch t := that.(type) { - case *CompletionInfo: - that1 = t - case CompletionInfo: - that1 = &t - default: - return false - } - - return proto.Equal(this, that1) -} - -// Marshal an object of type UpdateInfo to the protobuf v3 wire format -func (val *UpdateInfo) Marshal() ([]byte, error) { - return proto.Marshal(val) -} - -// Unmarshal an object of type UpdateInfo from the protobuf v3 wire format -func (val *UpdateInfo) Unmarshal(buf []byte) error { - return proto.Unmarshal(buf, val) -} - -// Size returns the size of the object, in bytes, once serialized -func (val *UpdateInfo) Size() int { - return proto.Size(val) -} - -// Equal returns whether two UpdateInfo values are equivalent by recursively -// comparing the message's fields. -// For more information see the documentation for -// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal -func (this *UpdateInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - var that1 *UpdateInfo - switch t := that.(type) { - case *UpdateInfo: - that1 = t - case UpdateInfo: - that1 = &t - default: - return false - } - - return proto.Equal(this, that1) -} diff --git a/api/update/v1/message.pb.go b/api/update/v1/message.pb.go deleted file mode 100644 index ef6397a3e44..00000000000 --- a/api/update/v1/message.pb.go +++ /dev/null @@ -1,363 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// plugins: -// protoc-gen-go -// protoc -// source: temporal/server/api/update/v1/message.proto - -package update - -import ( - reflect "reflect" - sync "sync" - - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// AcceptanceInfo contains information about an accepted update -type AcceptanceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // the event ID of the WorkflowExecutionUpdateAcceptedEvent - EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` -} - -func (x *AcceptanceInfo) Reset() { - *x = AcceptanceInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_update_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AcceptanceInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AcceptanceInfo) ProtoMessage() {} - -func (x *AcceptanceInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_update_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AcceptanceInfo.ProtoReflect.Descriptor instead. -func (*AcceptanceInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_update_v1_message_proto_rawDescGZIP(), []int{0} -} - -func (x *AcceptanceInfo) GetEventId() int64 { - if x != nil { - return x.EventId - } - return 0 -} - -// CompletionInfo contains information about a completed update -type CompletionInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // the event ID of the WorkflowExecutionUpdateCompletedEvent - EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` - // the ID of the event batch containing the event_id above - EventBatchId int64 `protobuf:"varint,2,opt,name=event_batch_id,json=eventBatchId,proto3" json:"event_batch_id,omitempty"` -} - -func (x *CompletionInfo) Reset() { - *x = CompletionInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_update_v1_message_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CompletionInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CompletionInfo) ProtoMessage() {} - -func (x *CompletionInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_update_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CompletionInfo.ProtoReflect.Descriptor instead. -func (*CompletionInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_update_v1_message_proto_rawDescGZIP(), []int{1} -} - -func (x *CompletionInfo) GetEventId() int64 { - if x != nil { - return x.EventId - } - return 0 -} - -func (x *CompletionInfo) GetEventBatchId() int64 { - if x != nil { - return x.EventBatchId - } - return 0 -} - -// UpdateInfo is the persistent state of a single update -type UpdateInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Value: - // - // *UpdateInfo_Acceptance - // *UpdateInfo_Completion - Value isUpdateInfo_Value `protobuf_oneof:"value"` -} - -func (x *UpdateInfo) Reset() { - *x = UpdateInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_update_v1_message_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *UpdateInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateInfo) ProtoMessage() {} - -func (x *UpdateInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_update_v1_message_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateInfo.ProtoReflect.Descriptor instead. -func (*UpdateInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_update_v1_message_proto_rawDescGZIP(), []int{2} -} - -func (m *UpdateInfo) GetValue() isUpdateInfo_Value { - if m != nil { - return m.Value - } - return nil -} - -func (x *UpdateInfo) GetAcceptance() *AcceptanceInfo { - if x, ok := x.GetValue().(*UpdateInfo_Acceptance); ok { - return x.Acceptance - } - return nil -} - -func (x *UpdateInfo) GetCompletion() *CompletionInfo { - if x, ok := x.GetValue().(*UpdateInfo_Completion); ok { - return x.Completion - } - return nil -} - -type isUpdateInfo_Value interface { - isUpdateInfo_Value() -} - -type UpdateInfo_Acceptance struct { - // update has been accepted and this is the acceptance metadata - Acceptance *AcceptanceInfo `protobuf:"bytes,1,opt,name=acceptance,proto3,oneof"` -} - -type UpdateInfo_Completion struct { - // update has been completed and this is the completion metadata - Completion *CompletionInfo `protobuf:"bytes,2,opt,name=completion,proto3,oneof"` -} - -func (*UpdateInfo_Acceptance) isUpdateInfo_Value() {} - -func (*UpdateInfo_Completion) isUpdateInfo_Value() {} - -var File_temporal_server_api_update_v1_message_proto protoreflect.FileDescriptor - -var file_temporal_server_api_update_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2b, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x74, 0x65, 0x6d, 0x70, - 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x31, 0x22, 0x2f, 0x0a, 0x0e, 0x41, 0x63, 0x63, 0x65, - 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x08, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0x59, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, - 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x65, 0x76, 0x65, 0x6e, 0x74, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x28, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x62, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x65, - 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x22, 0xbf, - 0x01, 0x0a, 0x0a, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x53, 0x0a, 0x0a, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x53, - 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2d, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, - 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x42, - 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x31, 0x3b, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_temporal_server_api_update_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_update_v1_message_proto_rawDescData = file_temporal_server_api_update_v1_message_proto_rawDesc -) - -func file_temporal_server_api_update_v1_message_proto_rawDescGZIP() []byte { - file_temporal_server_api_update_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_update_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_update_v1_message_proto_rawDescData) - }) - return file_temporal_server_api_update_v1_message_proto_rawDescData -} - -var file_temporal_server_api_update_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 3) -var file_temporal_server_api_update_v1_message_proto_goTypes = []interface{}{ - (*AcceptanceInfo)(nil), // 0: temporal.server.api.update.v1.AcceptanceInfo - (*CompletionInfo)(nil), // 1: temporal.server.api.update.v1.CompletionInfo - (*UpdateInfo)(nil), // 2: temporal.server.api.update.v1.UpdateInfo -} -var file_temporal_server_api_update_v1_message_proto_depIdxs = []int32{ - 0, // 0: temporal.server.api.update.v1.UpdateInfo.acceptance:type_name -> temporal.server.api.update.v1.AcceptanceInfo - 1, // 1: temporal.server.api.update.v1.UpdateInfo.completion:type_name -> temporal.server.api.update.v1.CompletionInfo - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name -} - -func init() { file_temporal_server_api_update_v1_message_proto_init() } -func file_temporal_server_api_update_v1_message_proto_init() { - if File_temporal_server_api_update_v1_message_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_update_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcceptanceInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_update_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CompletionInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_update_v1_message_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_temporal_server_api_update_v1_message_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*UpdateInfo_Acceptance)(nil), - (*UpdateInfo_Completion)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_update_v1_message_proto_rawDesc, - NumEnums: 0, - NumMessages: 3, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_temporal_server_api_update_v1_message_proto_goTypes, - DependencyIndexes: file_temporal_server_api_update_v1_message_proto_depIdxs, - MessageInfos: file_temporal_server_api_update_v1_message_proto_msgTypes, - }.Build() - File_temporal_server_api_update_v1_message_proto = out.File - file_temporal_server_api_update_v1_message_proto_rawDesc = nil - file_temporal_server_api_update_v1_message_proto_goTypes = nil - file_temporal_server_api_update_v1_message_proto_depIdxs = nil -} diff --git a/api/visibilityservice/v1/request_response.go-helpers.pb.go b/api/visibilityservice/v1/request_response.go-helpers.pb.go new file mode 100644 index 00000000000..3373ed06574 --- /dev/null +++ b/api/visibilityservice/v1/request_response.go-helpers.pb.go @@ -0,0 +1,154 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package visibilityservice + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ListChasmExecutionsRequest to the protobuf v3 wire format +func (val *ListChasmExecutionsRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ListChasmExecutionsRequest from the protobuf v3 wire format +func (val *ListChasmExecutionsRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ListChasmExecutionsRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ListChasmExecutionsRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ListChasmExecutionsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ListChasmExecutionsRequest + switch t := that.(type) { + case *ListChasmExecutionsRequest: + that1 = t + case ListChasmExecutionsRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ListChasmExecutionsResponse to the protobuf v3 wire format +func (val *ListChasmExecutionsResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ListChasmExecutionsResponse from the protobuf v3 wire format +func (val *ListChasmExecutionsResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ListChasmExecutionsResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ListChasmExecutionsResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ListChasmExecutionsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ListChasmExecutionsResponse + switch t := that.(type) { + case *ListChasmExecutionsResponse: + that1 = t + case ListChasmExecutionsResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CountChasmExecutionsRequest to the protobuf v3 wire format +func (val *CountChasmExecutionsRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CountChasmExecutionsRequest from the protobuf v3 wire format +func (val *CountChasmExecutionsRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CountChasmExecutionsRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CountChasmExecutionsRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CountChasmExecutionsRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CountChasmExecutionsRequest + switch t := that.(type) { + case *CountChasmExecutionsRequest: + that1 = t + case CountChasmExecutionsRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CountChasmExecutionsResponse to the protobuf v3 wire format +func (val *CountChasmExecutionsResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CountChasmExecutionsResponse from the protobuf v3 wire format +func (val *CountChasmExecutionsResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CountChasmExecutionsResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CountChasmExecutionsResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CountChasmExecutionsResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CountChasmExecutionsResponse + switch t := that.(type) { + case *CountChasmExecutionsResponse: + that1 = t + case CountChasmExecutionsResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/api/visibilityservice/v1/request_response.pb.go b/api/visibilityservice/v1/request_response.pb.go new file mode 100644 index 00000000000..550dbd50ca0 --- /dev/null +++ b/api/visibilityservice/v1/request_response.pb.go @@ -0,0 +1,426 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/api/visibilityservice/v1/request_response.proto + +package visibilityservice + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v11 "go.temporal.io/api/common/v1" + v1 "go.temporal.io/server/api/chasm/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ListChasmExecutionsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,1,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + Query string `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + // Maximum number of executions per page + PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // Token to continue reading next page of executions. + // Pass in empty slice for first page. + NextPageToken []byte `protobuf:"bytes,6,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListChasmExecutionsRequest) Reset() { + *x = ListChasmExecutionsRequest{} + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListChasmExecutionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListChasmExecutionsRequest) ProtoMessage() {} + +func (x *ListChasmExecutionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListChasmExecutionsRequest.ProtoReflect.Descriptor instead. +func (*ListChasmExecutionsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *ListChasmExecutionsRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +func (x *ListChasmExecutionsRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ListChasmExecutionsRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *ListChasmExecutionsRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *ListChasmExecutionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListChasmExecutionsRequest) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken + } + return nil +} + +type ListChasmExecutionsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Executions []*v1.VisibilityExecutionInfo `protobuf:"bytes,1,rep,name=executions,proto3" json:"executions,omitempty"` + NextPageToken []byte `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListChasmExecutionsResponse) Reset() { + *x = ListChasmExecutionsResponse{} + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListChasmExecutionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListChasmExecutionsResponse) ProtoMessage() {} + +func (x *ListChasmExecutionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListChasmExecutionsResponse.ProtoReflect.Descriptor instead. +func (*ListChasmExecutionsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescGZIP(), []int{1} +} + +func (x *ListChasmExecutionsResponse) GetExecutions() []*v1.VisibilityExecutionInfo { + if x != nil { + return x.Executions + } + return nil +} + +func (x *ListChasmExecutionsResponse) GetNextPageToken() []byte { + if x != nil { + return x.NextPageToken + } + return nil +} + +type CountChasmExecutionsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // (-- api-linter: core::0141::forbidden-types=disabled --) + ArchetypeId uint32 `protobuf:"varint,1,opt,name=archetype_id,json=archetypeId,proto3" json:"archetype_id,omitempty"` + NamespaceId string `protobuf:"bytes,2,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + Query string `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CountChasmExecutionsRequest) Reset() { + *x = CountChasmExecutionsRequest{} + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CountChasmExecutionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CountChasmExecutionsRequest) ProtoMessage() {} + +func (x *CountChasmExecutionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CountChasmExecutionsRequest.ProtoReflect.Descriptor instead. +func (*CountChasmExecutionsRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescGZIP(), []int{2} +} + +func (x *CountChasmExecutionsRequest) GetArchetypeId() uint32 { + if x != nil { + return x.ArchetypeId + } + return 0 +} + +func (x *CountChasmExecutionsRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CountChasmExecutionsRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *CountChasmExecutionsRequest) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +type CountChasmExecutionsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // `groups` contains the groups if the request is grouping by a field. + // The list might not be complete, and the counts of each group are approximations. + Groups []*CountChasmExecutionsResponse_AggregationGroup `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CountChasmExecutionsResponse) Reset() { + *x = CountChasmExecutionsResponse{} + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CountChasmExecutionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CountChasmExecutionsResponse) ProtoMessage() {} + +func (x *CountChasmExecutionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CountChasmExecutionsResponse.ProtoReflect.Descriptor instead. +func (*CountChasmExecutionsResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescGZIP(), []int{3} +} + +func (x *CountChasmExecutionsResponse) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *CountChasmExecutionsResponse) GetGroups() []*CountChasmExecutionsResponse_AggregationGroup { + if x != nil { + return x.Groups + } + return nil +} + +type CountChasmExecutionsResponse_AggregationGroup struct { + state protoimpl.MessageState `protogen:"open.v1"` + GroupValues []*v11.Payload `protobuf:"bytes,1,rep,name=group_values,json=groupValues,proto3" json:"group_values,omitempty"` + Count int64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CountChasmExecutionsResponse_AggregationGroup) Reset() { + *x = CountChasmExecutionsResponse_AggregationGroup{} + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CountChasmExecutionsResponse_AggregationGroup) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CountChasmExecutionsResponse_AggregationGroup) ProtoMessage() {} + +func (x *CountChasmExecutionsResponse_AggregationGroup) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CountChasmExecutionsResponse_AggregationGroup.ProtoReflect.Descriptor instead. +func (*CountChasmExecutionsResponse_AggregationGroup) Descriptor() ([]byte, []int) { + return file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *CountChasmExecutionsResponse_AggregationGroup) GetGroupValues() []*v11.Payload { + if x != nil { + return x.GroupValues + } + return nil +} + +func (x *CountChasmExecutionsResponse_AggregationGroup) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +var File_temporal_server_api_visibilityservice_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDesc = "" + + "\n" + + "?temporal/server/api/visibilityservice/v1/request_response.proto\x12(temporal.server.api.visibilityservice.v1\x1a$temporal/api/common/v1/message.proto\x1a*temporal/server/api/chasm/v1/message.proto\"\xdb\x01\n" + + "\x1aListChasmExecutionsRequest\x12!\n" + + "\farchetype_id\x18\x01 \x01(\rR\varchetypeId\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12\x1c\n" + + "\tnamespace\x18\x03 \x01(\tR\tnamespace\x12\x14\n" + + "\x05query\x18\x04 \x01(\tR\x05query\x12\x1b\n" + + "\tpage_size\x18\x05 \x01(\x05R\bpageSize\x12&\n" + + "\x0fnext_page_token\x18\x06 \x01(\fR\rnextPageToken\"\x9c\x01\n" + + "\x1bListChasmExecutionsResponse\x12U\n" + + "\n" + + "executions\x18\x01 \x03(\v25.temporal.server.api.chasm.v1.VisibilityExecutionInfoR\n" + + "executions\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\fR\rnextPageToken\"\x97\x01\n" + + "\x1bCountChasmExecutionsRequest\x12!\n" + + "\farchetype_id\x18\x01 \x01(\rR\varchetypeId\x12!\n" + + "\fnamespace_id\x18\x02 \x01(\tR\vnamespaceId\x12\x1c\n" + + "\tnamespace\x18\x03 \x01(\tR\tnamespace\x12\x14\n" + + "\x05query\x18\x04 \x01(\tR\x05query\"\x93\x02\n" + + "\x1cCountChasmExecutionsResponse\x12\x14\n" + + "\x05count\x18\x01 \x01(\x03R\x05count\x12o\n" + + "\x06groups\x18\x02 \x03(\v2W.temporal.server.api.visibilityservice.v1.CountChasmExecutionsResponse.AggregationGroupR\x06groups\x1al\n" + + "\x10AggregationGroup\x12B\n" + + "\fgroup_values\x18\x01 \x03(\v2\x1f.temporal.api.common.v1.PayloadR\vgroupValues\x12\x14\n" + + "\x05count\x18\x02 \x01(\x03R\x05countBBZ@go.temporal.io/server/api/visibilityservice/v1;visibilityserviceb\x06proto3" + +var ( + file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDesc), len(file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDescData +} + +var file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_temporal_server_api_visibilityservice_v1_request_response_proto_goTypes = []any{ + (*ListChasmExecutionsRequest)(nil), // 0: temporal.server.api.visibilityservice.v1.ListChasmExecutionsRequest + (*ListChasmExecutionsResponse)(nil), // 1: temporal.server.api.visibilityservice.v1.ListChasmExecutionsResponse + (*CountChasmExecutionsRequest)(nil), // 2: temporal.server.api.visibilityservice.v1.CountChasmExecutionsRequest + (*CountChasmExecutionsResponse)(nil), // 3: temporal.server.api.visibilityservice.v1.CountChasmExecutionsResponse + (*CountChasmExecutionsResponse_AggregationGroup)(nil), // 4: temporal.server.api.visibilityservice.v1.CountChasmExecutionsResponse.AggregationGroup + (*v1.VisibilityExecutionInfo)(nil), // 5: temporal.server.api.chasm.v1.VisibilityExecutionInfo + (*v11.Payload)(nil), // 6: temporal.api.common.v1.Payload +} +var file_temporal_server_api_visibilityservice_v1_request_response_proto_depIdxs = []int32{ + 5, // 0: temporal.server.api.visibilityservice.v1.ListChasmExecutionsResponse.executions:type_name -> temporal.server.api.chasm.v1.VisibilityExecutionInfo + 4, // 1: temporal.server.api.visibilityservice.v1.CountChasmExecutionsResponse.groups:type_name -> temporal.server.api.visibilityservice.v1.CountChasmExecutionsResponse.AggregationGroup + 6, // 2: temporal.server.api.visibilityservice.v1.CountChasmExecutionsResponse.AggregationGroup.group_values:type_name -> temporal.api.common.v1.Payload + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_temporal_server_api_visibilityservice_v1_request_response_proto_init() } +func file_temporal_server_api_visibilityservice_v1_request_response_proto_init() { + if File_temporal_server_api_visibilityservice_v1_request_response_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDesc), len(file_temporal_server_api_visibilityservice_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_api_visibilityservice_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_api_visibilityservice_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_api_visibilityservice_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_api_visibilityservice_v1_request_response_proto = out.File + file_temporal_server_api_visibilityservice_v1_request_response_proto_goTypes = nil + file_temporal_server_api_visibilityservice_v1_request_response_proto_depIdxs = nil +} diff --git a/api/workflow/v1/message.go-helpers.pb.go b/api/workflow/v1/message.go-helpers.pb.go index 05e49882929..45567b2be6a 100644 --- a/api/workflow/v1/message.go-helpers.pb.go +++ b/api/workflow/v1/message.go-helpers.pb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go-helpers. DO NOT EDIT. package workflow @@ -66,6 +42,43 @@ func (this *ParentExecutionInfo) Equal(that interface{}) bool { return proto.Equal(this, that1) } +// Marshal an object of type RootExecutionInfo to the protobuf v3 wire format +func (val *RootExecutionInfo) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RootExecutionInfo from the protobuf v3 wire format +func (val *RootExecutionInfo) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RootExecutionInfo) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RootExecutionInfo values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RootExecutionInfo) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RootExecutionInfo + switch t := that.(type) { + case *RootExecutionInfo: + that1 = t + case RootExecutionInfo: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + // Marshal an object of type BaseExecutionInfo to the protobuf v3 wire format func (val *BaseExecutionInfo) Marshal() ([]byte, error) { return proto.Marshal(val) diff --git a/api/workflow/v1/message.pb.go b/api/workflow/v1/message.pb.go index fe199975444..bf4977256d4 100644 --- a/api/workflow/v1/message.pb.go +++ b/api/workflow/v1/message.pb.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Code generated by protoc-gen-go. DO NOT EDIT. // plugins: // protoc-gen-go @@ -31,6 +9,7 @@ package workflow import ( reflect "reflect" sync "sync" + unsafe "unsafe" v1 "go.temporal.io/api/common/v1" v11 "go.temporal.io/server/api/clock/v1" @@ -46,25 +25,29 @@ const ( ) type ParentExecutionInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Execution *v1.WorkflowExecution `protobuf:"bytes,3,opt,name=execution,proto3" json:"execution,omitempty"` - InitiatedId int64 `protobuf:"varint,4,opt,name=initiated_id,json=initiatedId,proto3" json:"initiated_id,omitempty"` - Clock *v11.VectorClock `protobuf:"bytes,5,opt,name=clock,proto3" json:"clock,omitempty"` - InitiatedVersion int64 `protobuf:"varint,6,opt,name=initiated_version,json=initiatedVersion,proto3" json:"initiated_version,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Execution *v1.WorkflowExecution `protobuf:"bytes,3,opt,name=execution,proto3" json:"execution,omitempty"` + InitiatedId int64 `protobuf:"varint,4,opt,name=initiated_id,json=initiatedId,proto3" json:"initiated_id,omitempty"` + Clock *v11.VectorClock `protobuf:"bytes,5,opt,name=clock,proto3" json:"clock,omitempty"` + InitiatedVersion int64 `protobuf:"varint,6,opt,name=initiated_version,json=initiatedVersion,proto3" json:"initiated_version,omitempty"` + // When present, child workflow starts as Pinned to this Worker Deployment Version. + // Set only if the parent execution is effectively Pinned to a Worker Deployment Version when it + // first starts the child workflow, and the child workflow is starting on a Task Queue belonging + // to the same Worker Deployment Version. + // Not set in the subsequent execution if the child workflow continues-as-new. + // Deprecated. Replaced with `inherited_pinned_version` in WorkflowExecutionStartedEventAttributes. + PinnedWorkerDeploymentVersion string `protobuf:"bytes,7,opt,name=pinned_worker_deployment_version,json=pinnedWorkerDeploymentVersion,proto3" json:"pinned_worker_deployment_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ParentExecutionInfo) Reset() { *x = ParentExecutionInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ParentExecutionInfo) String() string { @@ -75,7 +58,7 @@ func (*ParentExecutionInfo) ProtoMessage() {} func (x *ParentExecutionInfo) ProtoReflect() protoreflect.Message { mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -132,23 +115,71 @@ func (x *ParentExecutionInfo) GetInitiatedVersion() int64 { return 0 } -type BaseExecutionInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +func (x *ParentExecutionInfo) GetPinnedWorkerDeploymentVersion() string { + if x != nil { + return x.PinnedWorkerDeploymentVersion + } + return "" +} + +type RootExecutionInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Execution *v1.WorkflowExecution `protobuf:"bytes,1,opt,name=execution,proto3" json:"execution,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` - LowestCommonAncestorEventId int64 `protobuf:"varint,2,opt,name=lowest_common_ancestor_event_id,json=lowestCommonAncestorEventId,proto3" json:"lowest_common_ancestor_event_id,omitempty"` - LowestCommonAncestorEventVersion int64 `protobuf:"varint,3,opt,name=lowest_common_ancestor_event_version,json=lowestCommonAncestorEventVersion,proto3" json:"lowest_common_ancestor_event_version,omitempty"` +func (x *RootExecutionInfo) Reset() { + *x = RootExecutionInfo{} + mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *BaseExecutionInfo) Reset() { - *x = BaseExecutionInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[1] +func (x *RootExecutionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RootExecutionInfo) ProtoMessage() {} + +func (x *RootExecutionInfo) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[1] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RootExecutionInfo.ProtoReflect.Descriptor instead. +func (*RootExecutionInfo) Descriptor() ([]byte, []int) { + return file_temporal_server_api_workflow_v1_message_proto_rawDescGZIP(), []int{1} +} + +func (x *RootExecutionInfo) GetExecution() *v1.WorkflowExecution { + if x != nil { + return x.Execution } + return nil +} + +type BaseExecutionInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + RunId string `protobuf:"bytes,1,opt,name=run_id,json=runId,proto3" json:"run_id,omitempty"` + LowestCommonAncestorEventId int64 `protobuf:"varint,2,opt,name=lowest_common_ancestor_event_id,json=lowestCommonAncestorEventId,proto3" json:"lowest_common_ancestor_event_id,omitempty"` + LowestCommonAncestorEventVersion int64 `protobuf:"varint,3,opt,name=lowest_common_ancestor_event_version,json=lowestCommonAncestorEventVersion,proto3" json:"lowest_common_ancestor_event_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BaseExecutionInfo) Reset() { + *x = BaseExecutionInfo{} + mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *BaseExecutionInfo) String() string { @@ -158,8 +189,8 @@ func (x *BaseExecutionInfo) String() string { func (*BaseExecutionInfo) ProtoMessage() {} func (x *BaseExecutionInfo) ProtoReflect() protoreflect.Message { - mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + mi := &file_temporal_server_api_workflow_v1_message_proto_msgTypes[2] + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -171,7 +202,7 @@ func (x *BaseExecutionInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use BaseExecutionInfo.ProtoReflect.Descriptor instead. func (*BaseExecutionInfo) Descriptor() ([]byte, []int) { - return file_temporal_server_api_workflow_v1_message_proto_rawDescGZIP(), []int{1} + return file_temporal_server_api_workflow_v1_message_proto_rawDescGZIP(), []int{2} } func (x *BaseExecutionInfo) GetRunId() string { @@ -197,82 +228,53 @@ func (x *BaseExecutionInfo) GetLowestCommonAncestorEventVersion() int64 { var File_temporal_server_api_workflow_v1_message_proto protoreflect.FileDescriptor -var file_temporal_server_api_workflow_v1_message_proto_rawDesc = []byte{ - 0x0a, 0x2d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x76, 0x31, 0x2f, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x76, 0x31, 0x1a, 0x24, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x2a, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2f, 0x76, 0x31, 0x2f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc8, 0x02, 0x0a, 0x13, 0x50, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x25, 0x0a, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x20, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x42, 0x02, 0x68, 0x00, 0x12, 0x4b, 0x0a, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, - 0x72, 0x61, 0x6c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x09, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, - 0x12, 0x25, 0x0a, 0x0c, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x49, - 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x43, 0x0a, 0x05, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x63, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x76, - 0x31, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x63, 0x6c, - 0x6f, 0x63, 0x6b, 0x42, 0x02, 0x68, 0x00, 0x12, 0x2f, 0x0a, 0x11, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x74, 0x65, 0x64, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x42, 0x02, 0x68, 0x00, 0x22, 0xcc, 0x01, 0x0a, 0x11, 0x42, 0x61, 0x73, 0x65, 0x45, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x19, 0x0a, 0x06, 0x72, 0x75, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x72, 0x75, 0x6e, 0x49, 0x64, - 0x42, 0x02, 0x68, 0x00, 0x12, 0x48, 0x0a, 0x1f, 0x6c, 0x6f, 0x77, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x5f, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1b, 0x6c, 0x6f, 0x77, - 0x65, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x42, 0x02, 0x68, 0x00, 0x12, 0x52, 0x0a, 0x24, 0x6c, 0x6f, - 0x77, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x74, 0x6f, 0x72, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x20, 0x6c, 0x6f, 0x77, 0x65, 0x73, 0x74, 0x43, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x68, 0x00, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x6f, - 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x6c, 0x2e, 0x69, 0x6f, 0x2f, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x76, - 0x31, 0x3b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} +const file_temporal_server_api_workflow_v1_message_proto_rawDesc = "" + + "\n" + + "-temporal/server/api/workflow/v1/message.proto\x12\x1ftemporal.server.api.workflow.v1\x1a$temporal/api/common/v1/message.proto\x1a*temporal/server/api/clock/v1/message.proto\"\xff\x02\n" + + "\x13ParentExecutionInfo\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1c\n" + + "\tnamespace\x18\x02 \x01(\tR\tnamespace\x12G\n" + + "\texecution\x18\x03 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\x12!\n" + + "\finitiated_id\x18\x04 \x01(\x03R\vinitiatedId\x12?\n" + + "\x05clock\x18\x05 \x01(\v2).temporal.server.api.clock.v1.VectorClockR\x05clock\x12+\n" + + "\x11initiated_version\x18\x06 \x01(\x03R\x10initiatedVersion\x12G\n" + + " pinned_worker_deployment_version\x18\a \x01(\tR\x1dpinnedWorkerDeploymentVersionJ\x04\b\b\x10\t\"\\\n" + + "\x11RootExecutionInfo\x12G\n" + + "\texecution\x18\x01 \x01(\v2).temporal.api.common.v1.WorkflowExecutionR\texecution\"\xc0\x01\n" + + "\x11BaseExecutionInfo\x12\x15\n" + + "\x06run_id\x18\x01 \x01(\tR\x05runId\x12D\n" + + "\x1flowest_common_ancestor_event_id\x18\x02 \x01(\x03R\x1blowestCommonAncestorEventId\x12N\n" + + "$lowest_common_ancestor_event_version\x18\x03 \x01(\x03R lowestCommonAncestorEventVersionB0Z.go.temporal.io/server/api/workflow/v1;workflowb\x06proto3" var ( file_temporal_server_api_workflow_v1_message_proto_rawDescOnce sync.Once - file_temporal_server_api_workflow_v1_message_proto_rawDescData = file_temporal_server_api_workflow_v1_message_proto_rawDesc + file_temporal_server_api_workflow_v1_message_proto_rawDescData []byte ) func file_temporal_server_api_workflow_v1_message_proto_rawDescGZIP() []byte { file_temporal_server_api_workflow_v1_message_proto_rawDescOnce.Do(func() { - file_temporal_server_api_workflow_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_temporal_server_api_workflow_v1_message_proto_rawDescData) + file_temporal_server_api_workflow_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_api_workflow_v1_message_proto_rawDesc), len(file_temporal_server_api_workflow_v1_message_proto_rawDesc))) }) return file_temporal_server_api_workflow_v1_message_proto_rawDescData } -var file_temporal_server_api_workflow_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_temporal_server_api_workflow_v1_message_proto_goTypes = []interface{}{ +var file_temporal_server_api_workflow_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_temporal_server_api_workflow_v1_message_proto_goTypes = []any{ (*ParentExecutionInfo)(nil), // 0: temporal.server.api.workflow.v1.ParentExecutionInfo - (*BaseExecutionInfo)(nil), // 1: temporal.server.api.workflow.v1.BaseExecutionInfo - (*v1.WorkflowExecution)(nil), // 2: temporal.api.common.v1.WorkflowExecution - (*v11.VectorClock)(nil), // 3: temporal.server.api.clock.v1.VectorClock + (*RootExecutionInfo)(nil), // 1: temporal.server.api.workflow.v1.RootExecutionInfo + (*BaseExecutionInfo)(nil), // 2: temporal.server.api.workflow.v1.BaseExecutionInfo + (*v1.WorkflowExecution)(nil), // 3: temporal.api.common.v1.WorkflowExecution + (*v11.VectorClock)(nil), // 4: temporal.server.api.clock.v1.VectorClock } var file_temporal_server_api_workflow_v1_message_proto_depIdxs = []int32{ - 2, // 0: temporal.server.api.workflow.v1.ParentExecutionInfo.execution:type_name -> temporal.api.common.v1.WorkflowExecution - 3, // 1: temporal.server.api.workflow.v1.ParentExecutionInfo.clock:type_name -> temporal.server.api.clock.v1.VectorClock - 2, // [2:2] is the sub-list for method output_type - 2, // [2:2] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 3, // 0: temporal.server.api.workflow.v1.ParentExecutionInfo.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 4, // 1: temporal.server.api.workflow.v1.ParentExecutionInfo.clock:type_name -> temporal.server.api.clock.v1.VectorClock + 3, // 2: temporal.server.api.workflow.v1.RootExecutionInfo.execution:type_name -> temporal.api.common.v1.WorkflowExecution + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_temporal_server_api_workflow_v1_message_proto_init() } @@ -280,39 +282,13 @@ func file_temporal_server_api_workflow_v1_message_proto_init() { if File_temporal_server_api_workflow_v1_message_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_temporal_server_api_workflow_v1_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParentExecutionInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_temporal_server_api_workflow_v1_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BaseExecutionInfo); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_temporal_server_api_workflow_v1_message_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_api_workflow_v1_message_proto_rawDesc), len(file_temporal_server_api_workflow_v1_message_proto_rawDesc)), NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 0, }, @@ -321,7 +297,6 @@ func file_temporal_server_api_workflow_v1_message_proto_init() { MessageInfos: file_temporal_server_api_workflow_v1_message_proto_msgTypes, }.Build() File_temporal_server_api_workflow_v1_message_proto = out.File - file_temporal_server_api_workflow_v1_message_proto_rawDesc = nil file_temporal_server_api_workflow_v1_message_proto_goTypes = nil file_temporal_server_api_workflow_v1_message_proto_depIdxs = nil } diff --git a/build/go.mod b/build/go.mod deleted file mode 100644 index 1e3a5a25ec6..00000000000 --- a/build/go.mod +++ /dev/null @@ -1,10 +0,0 @@ -module build - -go 1.18 - -require ( - go.temporal.io/api v1.29.0 - google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 -) - -require google.golang.org/protobuf v1.33.0 // indirect diff --git a/build/go.sum b/build/go.sum deleted file mode 100644 index cbff0cadf03..00000000000 --- a/build/go.sum +++ /dev/null @@ -1,12 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -go.temporal.io/api v1.29.0 h1:bLviMlvD1SYqKPRGfRAv1/uV+8cNYmuhKIhTgLSs8OY= -go.temporal.io/api v1.29.0/go.mod h1:sAtVCXkwNaCtHVMP6B/FlK8PcEnaDjJ+KHCwS/ufscI= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/build/tools.go b/build/tools.go deleted file mode 100644 index eac6a2a13e6..00000000000 --- a/build/tools.go +++ /dev/null @@ -1,31 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package build - -import ( - _ "go.temporal.io/api/cmd/protoc-gen-go-helpers" - _ "go.temporal.io/api/cmd/protogen" - _ "google.golang.org/grpc/cmd/protoc-gen-go-grpc" -) diff --git a/chasm/archetype.go b/chasm/archetype.go new file mode 100644 index 00000000000..c1dbfdbc5db --- /dev/null +++ b/chasm/archetype.go @@ -0,0 +1,17 @@ +package chasm + +// Archetype is the fully qualified name of the root component of a CHASM execution. +type Archetype = string + +// ArchetypeID is CHASM framework's internal ID for an Archetype. +type ArchetypeID = uint32 + +const ( + // UnspecifiedArchetypeID is a reserved special ArchetypeID value indicating that the + // ArchetypeID is not specified. + // This typically happens when: + // 1. The chasm tree is not yet initialized with a root component, + // 2. If it's a field in a persisted record, it means the record is persisted before archetypeID + // was introduced (basically Workflow). + UnspecifiedArchetypeID ArchetypeID = 0 +) diff --git a/chasm/callback.go b/chasm/callback.go new file mode 100644 index 00000000000..46392fb4998 --- /dev/null +++ b/chasm/callback.go @@ -0,0 +1,10 @@ +package chasm + +const ( + CallbackLibraryName = "callback" + CallbackComponentName = "callback" +) + +var ( + CallbackComponentID = GenerateTypeID(FullyQualifiedName(CallbackLibraryName, CallbackComponentName)) +) diff --git a/chasm/chasmtest/task_helpers.go b/chasm/chasmtest/task_helpers.go new file mode 100644 index 00000000000..a8e6d3c6ec9 --- /dev/null +++ b/chasm/chasmtest/task_helpers.go @@ -0,0 +1,106 @@ +package chasmtest + +import ( + "context" + "fmt" + + "go.temporal.io/server/chasm" +) + +// ExecutePureTask validates and executes a pure task atomically via [Engine.UpdateComponent]. +// It returns taskDropped set to true if [chasm.PureTaskHandler.Validate] returns (false, nil), +// indicating the task is no longer relevant and was not executed. +// +// The component ref is resolved automatically — no separate [Engine.ReadComponent] call to +// obtain a ref is needed. Pass the component pointer directly. +// +// This helper ensures that Validate is always exercised alongside Execute, matching the real +// engine's behavior. Use [chasm.MockMutableContext] directly when you need to inspect the +// typed task payloads added to the context during execution. +func ExecutePureTask[C chasm.Component, T any]( + ctx context.Context, + e *Engine, + component C, + handler chasm.PureTaskHandler[C, T], + attrs chasm.TaskAttributes, + task T, +) (taskDropped bool, err error) { + ref, err := e.refForComponent(component) + if err != nil { + return false, err + } + + engineCtx := chasm.NewEngineContext(ctx, e) + _, err = e.UpdateComponent( + engineCtx, + ref, + func(mutableCtx chasm.MutableContext, c chasm.Component) error { + typedC, ok := c.(C) + if !ok { + return fmt.Errorf("component type mismatch: got %T", c) + } + var valid bool + valid, err = handler.Validate(mutableCtx, typedC, attrs, task) + if err != nil { + return err + } + if !valid { + taskDropped = true + return nil + } + return handler.Execute(mutableCtx, typedC, attrs, task) + }, + ) + return taskDropped, err +} + +// ExecuteSideEffectTask validates and executes a side effect task. +// Validation runs via [Engine.ReadComponent] in read only mode, and if valid, +// [chasm.SideEffectTaskHandler.Execute] is called with an engine context so that +// [chasm.UpdateComponent] and [chasm.ReadComponent] inside the handler route through +// the test engine. +// +// It returns taskDropped set to true if [chasm.SideEffectTaskHandler.Validate] returns (false, nil), +// indicating the task is no longer relevant and was not executed. +// +// The component ref is resolved automatically — no separate [Engine.ReadComponent] call to +// obtain a ref is needed. Pass the component pointer directly. +// +// Use [chasm.MockMutableContext] directly when you need to inspect typed task payloads added +// during execution, since the real engine serializes them into history layer tasks. +func ExecuteSideEffectTask[C chasm.Component, T any]( + ctx context.Context, + e *Engine, + component C, + handler chasm.SideEffectTaskHandler[C, T], + attrs chasm.TaskAttributes, + task T, +) (taskDropped bool, err error) { + ref, err := e.refForComponent(component) + if err != nil { + return false, err + } + + engineCtx := chasm.NewEngineContext(ctx, e) + + var valid bool + if err = e.ReadComponent( + engineCtx, + ref, + func(chasmCtx chasm.Context, c chasm.Component) error { + typedC, ok := c.(C) + if !ok { + return fmt.Errorf("component type mismatch: got %T", c) + } + valid, err = handler.Validate(chasmCtx, typedC, attrs, task) + return err + }, + ); err != nil { + return false, err + } + if !valid { + return true, nil + } + + return false, handler.Execute(engineCtx, ref, attrs, task) +} diff --git a/chasm/chasmtest/test_engine.go b/chasm/chasmtest/test_engine.go new file mode 100644 index 00000000000..3c6b122969e --- /dev/null +++ b/chasm/chasmtest/test_engine.go @@ -0,0 +1,718 @@ +package chasmtest + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/testing/testlogger" + "go.temporal.io/server/service/history/tasks" +) + +type ( + EngineOption func(*Engine) + + // Engine is a lightweight in memory CHASM engine for unit tests. It implements + // [chasm.Engine] and supports the full set of conflict and reuse policies, as + // well as blocking [PollComponent] with [NotifyExecution], matching the behavior + // of the production engine as closely as possible without persistence or shard logic. + Engine struct { + t *testing.T + registry *chasm.Registry + logger log.Logger + metrics metrics.Handler + timeSource clock.TimeSource + // currentExecutions maps (namespaceID, businessID) to the latest run (running or closed). + currentExecutions map[businessKey]*execution + // allExecutions maps (namespaceID, businessID, runID) to any run, for lookups by specific RunID. + allExecutions map[runKey]*execution + notifier *executionNotifier + } + + execution struct { + key chasm.ExecutionKey + node *chasm.Node + backend *chasm.MockNodeBackend + root chasm.RootComponent + requestID string + } + + businessKey struct { + namespaceID string + businessID string + } + + runKey struct { + namespaceID string + businessID string + runID string + } +) + +// WithTimeSource overrides the engine's default time source. +// The default is a [clock.EventTimeSource] initialized to [time.Now] at engine creation, +// which gives deterministic, frozen time suitable for most unit tests. +// Pass a *clock.EventTimeSource when tests need to advance time explicitly; +// the caller holds the reference and calls ts.Update(...) directly. +func WithTimeSource(ts clock.TimeSource) EngineOption { + return func(e *Engine) { + e.timeSource = ts + } +} + +var defaultTransitionOptions = chasm.TransitionOptions{ + ReusePolicy: chasm.BusinessIDReusePolicyAllowDuplicate, + ConflictPolicy: chasm.BusinessIDConflictPolicyFail, +} + +var _ chasm.Engine = (*Engine)(nil) + +func NewEngine( + t *testing.T, + registry *chasm.Registry, + opts ...EngineOption, +) *Engine { + t.Helper() + + ts := clock.NewEventTimeSource() + ts.Update(time.Now()) + e := &Engine{ + t: t, + registry: registry, + logger: testlogger.NewTestLogger(t, testlogger.FailOnExpectedErrorOnly), + metrics: metrics.NoopMetricsHandler, + timeSource: ts, + currentExecutions: make(map[businessKey]*execution), + allExecutions: make(map[runKey]*execution), + notifier: newExecutionNotifier(), + } + + for _, opt := range opts { + opt(e) + } + + return e +} + +// Tasks returns all physical tasks scheduled for the execution identified by ref, grouped by category. +// Logical tasks accumulate across every [Engine.UpdateComponent], [Engine.StartExecution], and +// [Engine.UpdateWithStartExecution] call on the execution, and convert to physical tasks on CloseTransaction, +// matching what the real engine would deliver to task processors. +func (e *Engine) Tasks(ref chasm.ComponentRef) (map[tasks.Category][]tasks.Task, error) { + exec, err := e.executionForRef(ref) + if err != nil { + return nil, err + } + // Return a shallow copy so callers cannot mutate the internal task lists. + result := make(map[tasks.Category][]tasks.Task, len(exec.backend.TasksByCategory)) + for cat, ts := range exec.backend.TasksByCategory { + result[cat] = ts + } + return result, nil +} + +func (e *Engine) StartExecution( + ctx context.Context, + ref chasm.ComponentRef, + startFn func(chasm.MutableContext) (chasm.RootComponent, error), + opts ...chasm.TransitionOption, +) (chasm.StartExecutionResult, error) { + options := constructTransitionOptions(opts...) + bKey := newBusinessKey(ref.ExecutionKey) + + current, hasCurrent := e.currentExecutions[bKey] + if hasCurrent { + // if the requestID matches the original create request, return the existing run. + if options.RequestID != "" && options.RequestID == current.requestID { + serializedRef, err := current.node.Ref(current.root) + if err != nil { + return chasm.StartExecutionResult{}, err + } + return chasm.StartExecutionResult{ + ExecutionKey: current.key, + ExecutionRef: serializedRef, + Created: false, + }, nil + } + + switch current.backend.GetExecutionState().State { + case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: + return e.handleConflictPolicy(ctx, ref, current, startFn, options) + case enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: + return e.handleReusePolicy(ctx, ref, current, startFn, options) + default: + return chasm.StartExecutionResult{}, serviceerror.NewInternal( + fmt.Sprintf("unexpected execution state: %v", current.backend.GetExecutionState().State), + ) + } + } + + return e.startNew(ctx, ref.ExecutionKey, startFn, options.RequestID) +} + +func (e *Engine) UpdateWithStartExecution( + ctx context.Context, + ref chasm.ComponentRef, + startFn func(chasm.MutableContext) (chasm.RootComponent, error), + updateFn func(chasm.MutableContext, chasm.Component) error, + opts ...chasm.TransitionOption, +) (chasm.EngineUpdateWithStartExecutionResult, error) { + options := constructTransitionOptions(opts...) + bKey := newBusinessKey(ref.ExecutionKey) + + current, hasCurrent := e.currentExecutions[bKey] + if hasCurrent { + switch current.backend.GetExecutionState().State { + case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: + serializedRef, err := e.updateComponentInExecution(ctx, current, ref, updateFn) + if err != nil { + return chasm.EngineUpdateWithStartExecutionResult{}, err + } + return chasm.EngineUpdateWithStartExecutionResult{ + ExecutionKey: current.key, + ExecutionRef: serializedRef, + Created: false, + }, nil + case enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: + switch options.ReusePolicy { + case chasm.BusinessIDReusePolicyAllowDuplicate: + case chasm.BusinessIDReusePolicyAllowDuplicateFailedOnly: + if !executionFailed(current) { + return chasm.EngineUpdateWithStartExecutionResult{}, chasm.NewExecutionAlreadyStartedErr( + fmt.Sprintf( + "CHASM execution already completed successfully. BusinessID: %s, RunID: %s, ID Reuse Policy: %v", + ref.BusinessID, current.key.RunID, options.ReusePolicy, + ), + current.requestID, + current.key.RunID, + ) + } + case chasm.BusinessIDReusePolicyRejectDuplicate: + return chasm.EngineUpdateWithStartExecutionResult{}, chasm.NewExecutionAlreadyStartedErr( + fmt.Sprintf( + "CHASM execution already finished. BusinessID: %s, RunID: %s, ID Reuse Policy: %v", + ref.BusinessID, current.key.RunID, options.ReusePolicy, + ), + current.requestID, + current.key.RunID, + ) + default: + return chasm.EngineUpdateWithStartExecutionResult{}, serviceerror.NewInternal( + fmt.Sprintf("unknown business ID reuse policy: %v", options.ReusePolicy), + ) + } + default: + return chasm.EngineUpdateWithStartExecutionResult{}, serviceerror.NewInternal( + fmt.Sprintf("unexpected execution state: %v", current.backend.GetExecutionState().State), + ) + } + } + + return e.startAndUpdateNew(ctx, ref.ExecutionKey, startFn, updateFn, options.RequestID) +} + +func (e *Engine) UpdateComponent( + ctx context.Context, + ref chasm.ComponentRef, + updateFn func(chasm.MutableContext, chasm.Component) error, + _ ...chasm.TransitionOption, +) ([]byte, error) { + execution, err := e.executionForRef(ref) + if err != nil { + return nil, err + } + return e.updateComponentInExecution(ctx, execution, ref, updateFn) +} + +func (e *Engine) ReadComponent( + ctx context.Context, + ref chasm.ComponentRef, + readFn func(chasm.Context, chasm.Component) error, + _ ...chasm.TransitionOption, +) error { + execution, err := e.executionForRef(ref) + if err != nil { + return err + } + + chasmCtx := chasm.NewContext(ctx, execution.node) + component, err := execution.node.Component(chasmCtx, ref) + if err != nil { + return err + } + + return readFn(chasmCtx, component) +} + +// PollComponent waits until the supplied predicate is satisfied when evaluated against the +// component identified by ref. If the predicate is true immediately it returns without blocking. +// Otherwise it subscribes to [NotifyExecution] signals and re evaluates after each one, just +// like the production engine. Returns (nil, nil) if ctx is cancelled, matching the long poll +// timeout semantics of the production engine where the caller is expected to re-poll. +func (e *Engine) PollComponent( + ctx context.Context, + ref chasm.ComponentRef, + predicate func(chasm.Context, chasm.Component) (bool, error), + _ ...chasm.TransitionOption, +) ([]byte, error) { + executionKey := ref.ExecutionKey + + checkPredicate := func() ([]byte, bool, error) { + exec, err := e.executionForRef(ref) + if err != nil { + return nil, false, err + } + chasmCtx := chasm.NewContext(ctx, exec.node) + component, err := exec.node.Component(chasmCtx, ref) + if err != nil { + return nil, false, err + } + satisfied, err := predicate(chasmCtx, component) + if err != nil || !satisfied { + return nil, satisfied, err + } + serializedRef, err := exec.node.Ref(component) + return serializedRef, true, err + } + + // Evaluate once before subscribing. + serializedRef, satisfied, err := checkPredicate() + if err != nil || satisfied { + return serializedRef, err + } + + for { + ch, unsubscribe := e.notifier.subscribe(executionKey) + // Re evaluate while holding the subscription to avoid missing a notification + // that arrives between the failed check above and this subscribe call. + serializedRef, satisfied, err = checkPredicate() + if err != nil || satisfied { + unsubscribe() + return serializedRef, err + } + + select { + case <-ch: + unsubscribe() + serializedRef, satisfied, err = checkPredicate() + if err != nil || satisfied { + return serializedRef, err + } + case <-ctx.Done(): + unsubscribe() + return nil, nil //nolint:nilerr // nil, nil = long-poll timeout; caller should re-poll + } + } +} + +// NotifyExecution wakes up any [PollComponent] callers waiting on the execution. +func (e *Engine) NotifyExecution(key chasm.ExecutionKey) { + e.notifier.notify(key) +} + +func (e *Engine) DeleteExecution( + _ context.Context, + ref chasm.ComponentRef, + _ chasm.DeleteExecutionRequest, +) error { + exec, err := e.executionForRef(ref) + if err != nil { + return err + } + rKey := newRunKey(exec.key) + bKey := newBusinessKey(exec.key) + delete(e.allExecutions, rKey) + // Only evict from current if this is still the current run for the businessID. + if cur, ok := e.currentExecutions[bKey]; ok && cur == exec { + delete(e.currentExecutions, bKey) + } + return nil +} + +// handleConflictPolicy is called when a StartExecution arrives for a business ID whose +// current run is still running. +func (e *Engine) handleConflictPolicy( + ctx context.Context, + ref chasm.ComponentRef, + current *execution, + startFn func(chasm.MutableContext) (chasm.RootComponent, error), + options chasm.TransitionOptions, +) (chasm.StartExecutionResult, error) { + switch options.ConflictPolicy { + case chasm.BusinessIDConflictPolicyFail: + return chasm.StartExecutionResult{}, chasm.NewExecutionAlreadyStartedErr( + fmt.Sprintf( + "CHASM execution still running. BusinessID: %s, RunID: %s, ID Conflict Policy: %v", + ref.BusinessID, current.key.RunID, options.ConflictPolicy, + ), + current.requestID, + current.key.RunID, + ) + case chasm.BusinessIDConflictPolicyTerminateExisting: + _, _ = current.backend.UpdateWorkflowStateStatus( + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + ) + return e.startNew(ctx, ref.ExecutionKey, startFn, options.RequestID) + case chasm.BusinessIDConflictPolicyUseExisting: + serializedRef, err := current.node.Ref(current.root) + if err != nil { + return chasm.StartExecutionResult{}, err + } + return chasm.StartExecutionResult{ + ExecutionKey: current.key, + ExecutionRef: serializedRef, + Created: false, + }, nil + default: + return chasm.StartExecutionResult{}, serviceerror.NewInternal( + fmt.Sprintf("unknown business ID conflict policy: %v", options.ConflictPolicy), + ) + } +} + +// handleReusePolicy is called when a StartExecution arrives for a business ID whose +// current run is closed or completed. +func (e *Engine) handleReusePolicy( + ctx context.Context, + ref chasm.ComponentRef, + current *execution, + startFn func(chasm.MutableContext) (chasm.RootComponent, error), + options chasm.TransitionOptions, +) (chasm.StartExecutionResult, error) { + switch options.ReusePolicy { + case chasm.BusinessIDReusePolicyAllowDuplicate: + case chasm.BusinessIDReusePolicyAllowDuplicateFailedOnly: + if !executionFailed(current) { + return chasm.StartExecutionResult{}, chasm.NewExecutionAlreadyStartedErr( + fmt.Sprintf( + "CHASM execution already completed successfully. BusinessID: %s, RunID: %s, ID Reuse Policy: %v", + ref.BusinessID, current.key.RunID, options.ReusePolicy, + ), + current.requestID, + current.key.RunID, + ) + } + case chasm.BusinessIDReusePolicyRejectDuplicate: + return chasm.StartExecutionResult{}, chasm.NewExecutionAlreadyStartedErr( + fmt.Sprintf( + "CHASM execution already finished. BusinessID: %s, RunID: %s, ID Reuse Policy: %v", + ref.BusinessID, current.key.RunID, options.ReusePolicy, + ), + current.requestID, + current.key.RunID, + ) + default: + return chasm.StartExecutionResult{}, serviceerror.NewInternal( + fmt.Sprintf("unknown business ID reuse policy: %v", options.ReusePolicy), + ) + } + return e.startNew(ctx, ref.ExecutionKey, startFn, options.RequestID) +} + +// startNew creates a new execution and registers it as the current run for the business ID. +func (e *Engine) startNew( + ctx context.Context, + key chasm.ExecutionKey, + startFn func(chasm.MutableContext) (chasm.RootComponent, error), + requestID string, +) (chasm.StartExecutionResult, error) { + exec := e.newExecution(key) + exec.requestID = requestID + + mutableCtx := chasm.NewMutableContext(ctx, exec.node) + root, err := startFn(mutableCtx) + if err != nil { + return chasm.StartExecutionResult{}, err + } + if err := exec.node.SetRootComponent(root); err != nil { + return chasm.StartExecutionResult{}, err + } + if _, err = exec.node.CloseTransaction(); err != nil { + return chasm.StartExecutionResult{}, err + } + + exec.root = root + e.currentExecutions[newBusinessKey(exec.key)] = exec + e.allExecutions[newRunKey(exec.key)] = exec + + serializedRef, err := exec.node.Ref(root) + if err != nil { + return chasm.StartExecutionResult{}, err + } + + return chasm.StartExecutionResult{ + ExecutionKey: exec.key, + ExecutionRef: serializedRef, + Created: true, + }, nil +} + +// startAndUpdateNew creates a new execution, applies startFn and updateFn in the same +// transaction, and registers it as the current run for the business ID. +func (e *Engine) startAndUpdateNew( + ctx context.Context, + key chasm.ExecutionKey, + startFn func(chasm.MutableContext) (chasm.RootComponent, error), + updateFn func(chasm.MutableContext, chasm.Component) error, + requestID string, +) (chasm.EngineUpdateWithStartExecutionResult, error) { + exec := e.newExecution(key) + exec.requestID = requestID + + mutableCtx := chasm.NewMutableContext(ctx, exec.node) + root, err := startFn(mutableCtx) + if err != nil { + return chasm.EngineUpdateWithStartExecutionResult{}, err + } + if err := exec.node.SetRootComponent(root); err != nil { + return chasm.EngineUpdateWithStartExecutionResult{}, err + } + if err := updateFn(mutableCtx, root); err != nil { + return chasm.EngineUpdateWithStartExecutionResult{}, err + } + if _, err = exec.node.CloseTransaction(); err != nil { + return chasm.EngineUpdateWithStartExecutionResult{}, err + } + + exec.root = root + e.currentExecutions[newBusinessKey(exec.key)] = exec + e.allExecutions[newRunKey(exec.key)] = exec + + serializedRef, err := exec.node.Ref(root) + if err != nil { + return chasm.EngineUpdateWithStartExecutionResult{}, err + } + + return chasm.EngineUpdateWithStartExecutionResult{ + ExecutionKey: exec.key, + ExecutionRef: serializedRef, + Created: true, + }, nil +} + +func (e *Engine) newExecution(key chasm.ExecutionKey) *execution { + // bsMu (backend state mutex) guards transitionCount and execState, which are shared + // across handler closures. It is separate from MockNodeBackend's internal mu to avoid deadlocks. + var ( + bsMu sync.Mutex + transitionCount int64 = 1 + execState = persistencespb.WorkflowExecutionState{ + State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + } + ) + + backend := &chasm.MockNodeBackend{ + // NextTransitionCount increments on every CloseTransaction call, matching + // the real engine's per transition monotonic counter. + HandleNextTransitionCount: func() int64 { + bsMu.Lock() + defer bsMu.Unlock() + transitionCount++ + return transitionCount + }, + // CurrentVersionedTransition reflects the latest committed transition count. + HandleCurrentVersionedTransition: func() *persistencespb.VersionedTransition { + bsMu.Lock() + defer bsMu.Unlock() + return &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: transitionCount, + } + }, + HandleGetCurrentVersion: func() int64 { return 1 }, + HandleGetWorkflowKey: func() definition.WorkflowKey { + return definition.NewWorkflowKey(key.NamespaceID, key.BusinessID, key.RunID) + }, + HandleIsWorkflow: func() bool { return false }, + // GetExecutionState returns the current lifecycle state, which CloseTransaction + // uses to decide whether to call UpdateWorkflowStateStatus on the backend. + HandleGetExecutionState: func() *persistencespb.WorkflowExecutionState { + bsMu.Lock() + defer bsMu.Unlock() + return &persistencespb.WorkflowExecutionState{ + State: execState.State, + Status: execState.Status, + } + }, + // UpdateWorkflowStateStatus is called by CloseTransaction when the root + // component's LifecycleState changes from Running to Completed, Failed, or Terminated. + HandleUpdateWorkflowStateStatus: func(state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus) (bool, error) { + bsMu.Lock() + defer bsMu.Unlock() + changed := execState.State != state || execState.Status != status + execState.State = state + execState.Status = status + return changed, nil + }, + } + return &execution{ + key: key, + backend: backend, + node: chasm.NewEmptyTree( + e.registry, + e.timeSource, + backend, + chasm.DefaultPathEncoder, + e.logger, + e.metrics, + ), + } +} + +// executionForRef looks up an execution by the ref's RunID when present, or falls back +// to the current run for the business ID when RunID is empty. +func (e *Engine) executionForRef(ref chasm.ComponentRef) (*execution, error) { + if ref.RunID != "" { + exec, ok := e.allExecutions[newRunKey(ref.ExecutionKey)] + if !ok { + return nil, serviceerror.NewNotFound( + fmt.Sprintf("execution not found: namespace=%q business_id=%q run_id=%q", ref.NamespaceID, ref.BusinessID, ref.RunID), + ) + } + return exec, nil + } + exec, ok := e.currentExecutions[newBusinessKey(ref.ExecutionKey)] + if !ok { + return nil, serviceerror.NewNotFound( + fmt.Sprintf("execution not found: namespace=%q business_id=%q", ref.NamespaceID, ref.BusinessID), + ) + } + return exec, nil +} + +func (e *Engine) updateComponentInExecution( + ctx context.Context, + execution *execution, + ref chasm.ComponentRef, + updateFn func(chasm.MutableContext, chasm.Component) error, +) ([]byte, error) { + chasmCtx := chasm.NewContext(ctx, execution.node) + component, err := execution.node.Component(chasmCtx, ref) + if err != nil { + return nil, err + } + + mutableCtx := chasm.NewMutableContext(ctx, execution.node) + if err := updateFn(mutableCtx, component); err != nil { + return nil, err + } + + if _, err = execution.node.CloseTransaction(); err != nil { + return nil, err + } + + return mutableCtx.Ref(component) +} + +// refForComponent looks up the ComponentRef for a component instance by scanning +// all executions. It works because Node.CloseTransaction (called after every mutation) +// runs syncSubComponents, which populates the node's valueToNode map for all +// subcomponents. Returns an error if the component is not found in any execution. +func (e *Engine) refForComponent(component chasm.Component) (chasm.ComponentRef, error) { + for _, exec := range e.allExecutions { + serialized, err := exec.node.Ref(component) + if err != nil { + if errors.As(err, new(*serviceerror.NotFound)) { + continue // component not registered in this execution's node + } + return chasm.ComponentRef{}, err + } + return chasm.DeserializeComponentRef(serialized) + } + return chasm.ComponentRef{}, fmt.Errorf("component %T not found in any execution managed by this engine", component) +} + +func constructTransitionOptions(opts ...chasm.TransitionOption) chasm.TransitionOptions { + options := defaultTransitionOptions + for _, opt := range opts { + opt(&options) + } + // NOTE: TransitionOptions.Speculative is intentionally not implemented here. It is also + // unimplemented in the production engine (see the TODO in service/history/chasm_engine.go). + return options +} + +// executionFailed reports whether a closed execution ended in a failure state +// (failed, terminated, cancelled, or timed out). This drives the +// [chasm.BusinessIDReusePolicyAllowDuplicateFailedOnly] reuse policy check. +func executionFailed(exec *execution) bool { + return exec.backend.GetExecutionState().Status != enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED +} + +func newBusinessKey(key chasm.ExecutionKey) businessKey { + return businessKey{namespaceID: key.NamespaceID, businessID: key.BusinessID} +} + +func newRunKey(key chasm.ExecutionKey) runKey { + return runKey{namespaceID: key.NamespaceID, businessID: key.BusinessID, runID: key.RunID} +} + +// executionNotifier allows [PollComponent] callers to subscribe to state change +// signals for a given execution. notify closes the channel for all current +// subscribers and each subscriber must resubscribe after being woken. +type executionNotifier struct { + mu sync.Mutex + subscribers map[chasm.ExecutionKey][]chan struct{} +} + +func newExecutionNotifier() *executionNotifier { + return &executionNotifier{ + subscribers: make(map[chasm.ExecutionKey][]chan struct{}), + } +} + +// subscribe returns a channel that will be closed on the next notify call for key, +// and an unsubscribe function that must be called when the caller is done waiting. +func (n *executionNotifier) subscribe(key chasm.ExecutionKey) (<-chan struct{}, func()) { + ch := make(chan struct{}) + n.mu.Lock() + n.subscribers[key] = append(n.subscribers[key], ch) + n.mu.Unlock() + + unsubscribed := false + unsubscribe := func() { + n.mu.Lock() + defer n.mu.Unlock() + if unsubscribed { + return + } + unsubscribed = true + subs := n.subscribers[key] + for i, s := range subs { + if s == ch { + n.subscribers[key] = append(subs[:i], subs[i+1:]...) + if len(n.subscribers[key]) == 0 { + delete(n.subscribers, key) + } + break + } + } + } + return ch, unsubscribe +} + +// notify closes all subscriber channels for key, waking any blocked PollComponent callers. +func (n *executionNotifier) notify(key chasm.ExecutionKey) { + n.mu.Lock() + subs := n.subscribers[key] + delete(n.subscribers, key) + n.mu.Unlock() + + for _, ch := range subs { + close(ch) + } +} diff --git a/chasm/component.go b/chasm/component.go new file mode 100644 index 00000000000..816cec1845d --- /dev/null +++ b/chasm/component.go @@ -0,0 +1,136 @@ +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination component_mock.go + +package chasm + +import ( + "context" + "reflect" + "strconv" + + commonpb "go.temporal.io/api/common/v1" +) + +type Component interface { + LifecycleState(Context) LifecycleState + + // we may not need this in the beginning + mustEmbedUnimplementedComponent() +} + +type TerminableComponent interface { + Component + + // Terminate method is invoked by the chasm framework on an execution's root component when the execution + // needs to be forcefully terminated. + // Some examples include: + // - Execution state becomes too large. + // - Two running executions with the same businessID when namespace performs a force failover. + Terminate(MutableContext, TerminateComponentRequest) (TerminateComponentResponse, error) +} + +type TerminateComponentRequest struct { + Identity string + Reason string + Details *commonpb.Payloads + RequestID string +} + +type TerminateComponentResponse struct{} + +// RootComponent is the interface that must be implemented by the top level component of a chasm execution. +// When the RootComponent's LifecycleState transitions to a closed state, the entire execution is considered closed, +// and will be cleaned up by the chasm framework after namespace's retention period. The BusinessID is also available for reuse. +// +// TODO: (not yet true) Visibility record will no longer be updated after RootComponent is closed. +type RootComponent interface { + TerminableComponent + + // ContextMetadata returns execution metadata to propagate to the request context. + // When the ContextMetadataInterceptor is configured with setTrailer=true (history, matching), + // these keys are propagated via gRPC trailers. Keys defined in common/contextutil/metadata.go. + ContextMetadata(Context) map[string]string +} + +// Embed UnimplementedComponent to get forward compatibility +type UnimplementedComponent struct{} + +func (UnimplementedComponent) mustEmbedUnimplementedComponent() {} + +var UnimplementedComponentT = reflect.TypeFor[UnimplementedComponent]() + +// Shall it be named ComponentLifecycleState? +type LifecycleState int + +const ( + // Lifecycle states that are considered OPEN + // + // LifecycleStateCreated LifecycleState = 1 << iota + LifecycleStateRunning LifecycleState = 2 << iota + LifecycleStatePaused + + // Lifecycle states that are considered CLOSED + // + LifecycleStateCompleted + LifecycleStateFailed + // LifecycleStateTerminated + // LifecycleStateTimedout + // LifecycleStateReset +) + +func (s LifecycleState) IsClosed() bool { + return s >= LifecycleStateCompleted +} + +func (s LifecycleState) IsPaused() bool { + return s == LifecycleStatePaused +} + +func (s LifecycleState) String() string { + switch s { + case LifecycleStateRunning: + return "Running" + case LifecycleStatePaused: + return "Paused" + case LifecycleStateCompleted: + return "Completed" + case LifecycleStateFailed: + return "Failed" + default: + return strconv.Itoa(int(s)) + } +} + +type OperationIntent int + +const ( + OperationIntentProgress OperationIntent = 1 << iota + OperationIntentObserve + + OperationIntentUnspecified = OperationIntent(0) +) + +// The operation intent must come from the context +// as the handler may not pass the endpoint request as Input to, +// say, the chasm.UpdateComponent method. +// So similar to the chasm engine, handler needs to add the intent +// to the context. +type operationIntentCtxKeyType struct{} + +var operationIntentCtxKey = operationIntentCtxKeyType{} + +func newContextWithOperationIntent( + ctx context.Context, + intent OperationIntent, +) context.Context { + return context.WithValue(ctx, operationIntentCtxKey, intent) +} + +func operationIntentFromContext( + ctx context.Context, +) OperationIntent { + intent, ok := ctx.Value(operationIntentCtxKey).(OperationIntent) + if !ok { + return OperationIntentUnspecified + } + return intent +} diff --git a/chasm/component_field_option.go b/chasm/component_field_option.go new file mode 100644 index 00000000000..8177b1e964f --- /dev/null +++ b/chasm/component_field_option.go @@ -0,0 +1,15 @@ +package chasm + +type ( + componentFieldOptions struct { + detached bool + } + + ComponentFieldOption func(*componentFieldOptions) +) + +func ComponentFieldDetached() ComponentFieldOption { + return func(o *componentFieldOptions) { + o.detached = true + } +} diff --git a/chasm/component_mock.go b/chasm/component_mock.go new file mode 100644 index 00000000000..3d65d7c4cb6 --- /dev/null +++ b/chasm/component_mock.go @@ -0,0 +1,210 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: component.go +// +// Generated by this command: +// +// mockgen -package chasm -source component.go -destination component_mock.go +// + +// Package chasm is a generated GoMock package. +package chasm + +import ( + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockComponent is a mock of Component interface. +type MockComponent struct { + ctrl *gomock.Controller + recorder *MockComponentMockRecorder + isgomock struct{} +} + +// MockComponentMockRecorder is the mock recorder for MockComponent. +type MockComponentMockRecorder struct { + mock *MockComponent +} + +// NewMockComponent creates a new mock instance. +func NewMockComponent(ctrl *gomock.Controller) *MockComponent { + mock := &MockComponent{ctrl: ctrl} + mock.recorder = &MockComponentMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockComponent) EXPECT() *MockComponentMockRecorder { + return m.recorder +} + +// LifecycleState mocks base method. +func (m *MockComponent) LifecycleState(arg0 Context) LifecycleState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LifecycleState", arg0) + ret0, _ := ret[0].(LifecycleState) + return ret0 +} + +// LifecycleState indicates an expected call of LifecycleState. +func (mr *MockComponentMockRecorder) LifecycleState(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LifecycleState", reflect.TypeOf((*MockComponent)(nil).LifecycleState), arg0) +} + +// mustEmbedUnimplementedComponent mocks base method. +func (m *MockComponent) mustEmbedUnimplementedComponent() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedComponent") +} + +// mustEmbedUnimplementedComponent indicates an expected call of mustEmbedUnimplementedComponent. +func (mr *MockComponentMockRecorder) mustEmbedUnimplementedComponent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedComponent", reflect.TypeOf((*MockComponent)(nil).mustEmbedUnimplementedComponent)) +} + +// MockTerminableComponent is a mock of TerminableComponent interface. +type MockTerminableComponent struct { + ctrl *gomock.Controller + recorder *MockTerminableComponentMockRecorder + isgomock struct{} +} + +// MockTerminableComponentMockRecorder is the mock recorder for MockTerminableComponent. +type MockTerminableComponentMockRecorder struct { + mock *MockTerminableComponent +} + +// NewMockTerminableComponent creates a new mock instance. +func NewMockTerminableComponent(ctrl *gomock.Controller) *MockTerminableComponent { + mock := &MockTerminableComponent{ctrl: ctrl} + mock.recorder = &MockTerminableComponentMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTerminableComponent) EXPECT() *MockTerminableComponentMockRecorder { + return m.recorder +} + +// LifecycleState mocks base method. +func (m *MockTerminableComponent) LifecycleState(arg0 Context) LifecycleState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LifecycleState", arg0) + ret0, _ := ret[0].(LifecycleState) + return ret0 +} + +// LifecycleState indicates an expected call of LifecycleState. +func (mr *MockTerminableComponentMockRecorder) LifecycleState(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LifecycleState", reflect.TypeOf((*MockTerminableComponent)(nil).LifecycleState), arg0) +} + +// Terminate mocks base method. +func (m *MockTerminableComponent) Terminate(arg0 MutableContext, arg1 TerminateComponentRequest) (TerminateComponentResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Terminate", arg0, arg1) + ret0, _ := ret[0].(TerminateComponentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Terminate indicates an expected call of Terminate. +func (mr *MockTerminableComponentMockRecorder) Terminate(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockTerminableComponent)(nil).Terminate), arg0, arg1) +} + +// mustEmbedUnimplementedComponent mocks base method. +func (m *MockTerminableComponent) mustEmbedUnimplementedComponent() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedComponent") +} + +// mustEmbedUnimplementedComponent indicates an expected call of mustEmbedUnimplementedComponent. +func (mr *MockTerminableComponentMockRecorder) mustEmbedUnimplementedComponent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedComponent", reflect.TypeOf((*MockTerminableComponent)(nil).mustEmbedUnimplementedComponent)) +} + +// MockRootComponent is a mock of RootComponent interface. +type MockRootComponent struct { + ctrl *gomock.Controller + recorder *MockRootComponentMockRecorder + isgomock struct{} +} + +// MockRootComponentMockRecorder is the mock recorder for MockRootComponent. +type MockRootComponentMockRecorder struct { + mock *MockRootComponent +} + +// NewMockRootComponent creates a new mock instance. +func NewMockRootComponent(ctrl *gomock.Controller) *MockRootComponent { + mock := &MockRootComponent{ctrl: ctrl} + mock.recorder = &MockRootComponentMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRootComponent) EXPECT() *MockRootComponentMockRecorder { + return m.recorder +} + +// ContextMetadata mocks base method. +func (m *MockRootComponent) ContextMetadata(arg0 Context) map[string]string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ContextMetadata", arg0) + ret0, _ := ret[0].(map[string]string) + return ret0 +} + +// ContextMetadata indicates an expected call of ContextMetadata. +func (mr *MockRootComponentMockRecorder) ContextMetadata(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ContextMetadata", reflect.TypeOf((*MockRootComponent)(nil).ContextMetadata), arg0) +} + +// LifecycleState mocks base method. +func (m *MockRootComponent) LifecycleState(arg0 Context) LifecycleState { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LifecycleState", arg0) + ret0, _ := ret[0].(LifecycleState) + return ret0 +} + +// LifecycleState indicates an expected call of LifecycleState. +func (mr *MockRootComponentMockRecorder) LifecycleState(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LifecycleState", reflect.TypeOf((*MockRootComponent)(nil).LifecycleState), arg0) +} + +// Terminate mocks base method. +func (m *MockRootComponent) Terminate(arg0 MutableContext, arg1 TerminateComponentRequest) (TerminateComponentResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Terminate", arg0, arg1) + ret0, _ := ret[0].(TerminateComponentResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Terminate indicates an expected call of Terminate. +func (mr *MockRootComponentMockRecorder) Terminate(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Terminate", reflect.TypeOf((*MockRootComponent)(nil).Terminate), arg0, arg1) +} + +// mustEmbedUnimplementedComponent mocks base method. +func (m *MockRootComponent) mustEmbedUnimplementedComponent() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedComponent") +} + +// mustEmbedUnimplementedComponent indicates an expected call of mustEmbedUnimplementedComponent. +func (mr *MockRootComponentMockRecorder) mustEmbedUnimplementedComponent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedComponent", reflect.TypeOf((*MockRootComponent)(nil).mustEmbedUnimplementedComponent)) +} diff --git a/chasm/context.go b/chasm/context.go new file mode 100644 index 00000000000..3f829d231b4 --- /dev/null +++ b/chasm/context.go @@ -0,0 +1,236 @@ +package chasm + +import ( + "context" + "errors" + "time" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" +) + +type Context interface { + // Context is not bound to any component, + // so all methods needs to take in component as a parameter + + // NOTE: component created in the current transaction won't have a ref + // this is a Ref to the component state at the start of the transition + Ref(Component) ([]byte, error) + // Now returns the current time in the context of the given component. + // In a context of a transaction, this time must be used to allow for framework support of pause and time skipping. + Now(Component) time.Time + // ExecutionKey returns the execution key for the execution the context is operating on. + ExecutionKey() ExecutionKey + // ExecutionInfo returns metadata information about the execution. + ExecutionInfo() ExecutionInfo + // Logger returns a logger tagged with execution key and other chasm framework internal information. + Logger() log.Logger + // NamespaceEntry returns the namespace entry for the execution. + NamespaceEntry() *namespace.Namespace + // EndpointByName resolves a nexus endpoint entry. + EndpointByName(endpointName string) (*persistencespb.NexusEndpointEntry, error) + // MetricsHandler returns a metrics handler with namespace tag. + MetricsHandler() metrics.Handler + // Value returns the value associated with this context for key. The behavior is the same as context.Context.Value(). + // Use WithContextValues RegistrableComponentOption to set key values pair for a component upon registration. + // Registered key-value pairs will automatically be added to the Context whenever framework accesses the component. + // Alternatively, use ContextWithValue() to manually set values on Context which will take precedence over registered ones. + Value(key any) any + + // Intent() OperationIntent + // ComponentOptions(Component) []ComponentOption + + // withValue should only be used by ContextWithValue() function, do NOT call it directly. + // For structs implementing this method, although the returned value has type Context, + // the concrete type MUST be the same concrete type as the receiver. + withValue(key any, value any) Context + structuredRef(Component) (ComponentRef, error) + goContext() context.Context +} + +type ExecutionInfo struct { + // StateTransitionCount is the number of create/update transactions in the history of this execution. + StateTransitionCount int64 + // ApproximateStateSize is the approximate size in bytes of the persisted execution state of this execution. + ApproximateStateSize int + // CloseTime is the time when the execution was closed. + // An execution is closed when its root component reaches a terminal state in its lifecycle. + // If the component is still running (not yet closed), it returns a zero time.Time value. + CloseTime time.Time +} + +type EndpointRegistry interface { + GetByName(ctx context.Context, namespaceID namespace.ID, endpointName string) (*persistencespb.NexusEndpointEntry, error) +} + +type MutableContext interface { + Context + + // AddTask adds a task to be emitted as part of the current transaction. + // The task is associated with the given component and will be invoked via the registered handler for the given task + // referencing the component. + AddTask(Component, TaskAttributes, any) + + // Get a Ref for the component + // This ref to the component state at the end of the transition + // Same as Ref(Component) method in Context, + // this only works for components that already exists at the start of the transition + // + // If we provide this method, then the method on the engine doesn't need to + // return a Ref + // NewRef(Component) (ComponentRef, bool) +} + +type immutableCtx struct { + // The context here is not really used today. + // But it will be when we support partial loading later, + // and the framework potentially needs to go to persistence to load some fields. + ctx context.Context + + executionKey ExecutionKey + + // Not embedding the Node here to avoid exposing AddTask() method on Node, + // so that ContextImpl won't implement MutableContext interface. + root *Node +} + +type mutableCtx struct { + *immutableCtx +} + +// NewContext creates a new Context from an existing Context and root Node. +// +// NOTE: Library authors should not invoke this constructor directly, and instead use [ReadComponent]. +func NewContext( + ctx context.Context, + node *Node, +) Context { + return newContext(ctx, node) +} + +// newContext creates a new immutableCtx from an existing Context and root Node. +// This is similar to NewContext, but returns *immutableCtx instead of Context interface. +func newContext( + ctx context.Context, + node *Node, +) *immutableCtx { + workflowKey := node.backend.GetWorkflowKey() + return &immutableCtx{ + ctx: ctx, + root: node.root(), + executionKey: ExecutionKey{ + NamespaceID: workflowKey.NamespaceID, + BusinessID: workflowKey.WorkflowID, + RunID: workflowKey.RunID, + }, + } +} + +func (c *immutableCtx) Ref(component Component) ([]byte, error) { + return c.root.Ref(component) +} + +func (c *immutableCtx) Now(component Component) time.Time { + return c.root.Now(component) +} + +func (c *immutableCtx) ExecutionKey() ExecutionKey { + return c.executionKey +} + +func (c *immutableCtx) ExecutionInfo() ExecutionInfo { + executionInfo := c.root.backend.GetExecutionInfo() + + var closeTime time.Time + closeTimestamp := executionInfo.GetCloseTime() + if closeTimestamp != nil { + closeTime = closeTimestamp.AsTime() + } + + return ExecutionInfo{ + StateTransitionCount: executionInfo.GetStateTransitionCount(), + ApproximateStateSize: c.root.backend.GetApproximatePersistedSize(), + CloseTime: closeTime, + } +} + +func (c *immutableCtx) Logger() log.Logger { + return c.root.logger +} + +func (c *immutableCtx) MetricsHandler() metrics.Handler { + return c.root.metricsHandler +} + +func (c *immutableCtx) Value(key any) any { + if v := c.goContext().Value(key); v != nil { + return v + } + + return c.root.registry.componentContextValue(key) +} + +func (c *immutableCtx) withValue(key any, value any) Context { + return &immutableCtx{ + ctx: context.WithValue(c.goContext(), key, value), + root: c.root, + executionKey: c.executionKey, + } +} + +func (c *immutableCtx) structuredRef(component Component) (ComponentRef, error) { + return c.root.structuredRef(component) +} + +func (c *immutableCtx) NamespaceEntry() *namespace.Namespace { + return c.root.backend.GetNamespaceEntry() +} + +func (c *immutableCtx) goContext() context.Context { + return c.ctx +} + +func (c *immutableCtx) EndpointByName(name string) (*persistencespb.NexusEndpointEntry, error) { + reg := c.root.backend.EndpointRegistry() + if reg == nil { + return nil, errors.New("endpoint registry not available") + } + return reg.GetByName(c.ctx, c.NamespaceEntry().ID(), name) +} + +// NewMutableContext creates a new MutableContext from an existing Context and root Node. +// +// NOTE: Library authors should not invoke this constructor directly, and instead use the [UpdateComponent], +// [UpdateWithStartExecution], or [StartExecution] APIs. +func NewMutableContext( + ctx context.Context, + node *Node, +) MutableContext { + return &mutableCtx{ + immutableCtx: newContext(ctx, node), + } +} + +func (c *mutableCtx) AddTask( + component Component, + attributes TaskAttributes, + payload any, +) { + c.root.AddTask(component, attributes, payload) +} + +func (c *mutableCtx) withValue(key any, value any) Context { + return &mutableCtx{ + immutableCtx: ContextWithValue(c.immutableCtx, key, value), + } +} + +// ContextWithValue returns a new Context with the given key-value pair added. +// Added key-value pairs will be accessible via the Value() method on the returned Context, +// and the behavior of the key-value pair is the same as context.Context.WithValue(). +func ContextWithValue[C Context](c C, key any, value any) C { + //nolint:revive // unchecked-type-assertion + return any(c.withValue(key, value)).(C) +} diff --git a/chasm/context_mock.go b/chasm/context_mock.go new file mode 100644 index 00000000000..7521ca78d2c --- /dev/null +++ b/chasm/context_mock.go @@ -0,0 +1,170 @@ +package chasm + +import ( + "context" + "errors" + "fmt" + "slices" + "sync" + "time" + + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" +) + +var _ Context = (*MockContext)(nil) +var _ MutableContext = (*MockMutableContext)(nil) + +// MockContext is a mock implementation of [Context]. +type MockContext struct { + HandleExecutionKey func() ExecutionKey + HandleNow func(component Component) time.Time + HandleRef func(component Component) ([]byte, error) + HandleExecutionCloseTime func() time.Time + HandleStateTransitionCount func() int64 + HandleExecutionInfo func() ExecutionInfo + HandleLibrary func(name string) (Library, bool) + HandleNamespaceEntry func() *namespace.Namespace + HandleEndpointByName func(string) (*persistencespb.NexusEndpointEntry, error) + HandleMetricsHandler func() metrics.Handler + + // GoCtx is the underlying context.Context used for context value lookups. + // Any values set on it will be available via the CHASM mock context's Value method, + // and take precedence over any registered context values. + // Defaults to context.Background() if nil. + GoCtx context.Context + + registeredContextValues map[any]any +} + +func (c *MockContext) RegisterComponentContextValues( + keyValues map[any]any, +) { + if c.registeredContextValues == nil { + c.registeredContextValues = make(map[any]any) + } + for k, v := range keyValues { + if _, exists := c.registeredContextValues[k]; exists { + // nolint:forbidigo + panic(fmt.Sprintf("context value key already registered: %v", k)) + } + c.registeredContextValues[k] = v + } +} + +func (c *MockContext) goContext() context.Context { + if c.GoCtx == nil { + c.GoCtx = context.Background() + } + return c.GoCtx +} + +func (c *MockContext) EndpointByName(name string) (*persistencespb.NexusEndpointEntry, error) { + if c.HandleEndpointByName != nil { + return c.HandleEndpointByName(name) + } + return nil, errors.New("endpoint registry not available") +} + +func (c *MockContext) Now(cmp Component) time.Time { + if c.HandleNow != nil { + return c.HandleNow(cmp) + } + return time.Now() +} + +func (c *MockContext) Ref(cmp Component) ([]byte, error) { + if c.HandleRef != nil { + return c.HandleRef(cmp) + } + return nil, nil +} + +func (c *MockContext) structuredRef(cmp Component) (ComponentRef, error) { + return ComponentRef{}, nil +} + +func (c *MockContext) ExecutionKey() ExecutionKey { + if c.HandleExecutionKey != nil { + return c.HandleExecutionKey() + } + return ExecutionKey{} +} + +func (c *MockContext) ExecutionInfo() ExecutionInfo { + if c.HandleExecutionInfo != nil { + return c.HandleExecutionInfo() + } + return ExecutionInfo{} +} + +func (c *MockContext) NamespaceEntry() *namespace.Namespace { + if c.HandleNamespaceEntry != nil { + return c.HandleNamespaceEntry() + } + return nil +} + +func (c *MockContext) Logger() log.Logger { + executionKey := c.ExecutionKey() + return log.NewTestLogger().With( + tag.WorkflowNamespaceID(executionKey.NamespaceID), + tag.WorkflowID(executionKey.BusinessID), + tag.WorkflowRunID(executionKey.RunID), + ) +} + +func (c *MockContext) MetricsHandler() metrics.Handler { + if c.HandleMetricsHandler != nil { + return c.HandleMetricsHandler() + } + return metrics.NoopMetricsHandler +} + +func (c *MockContext) Value(key any) any { + return c.goContext().Value(key) +} + +func (c *MockContext) withValue(key any, value any) Context { + return &MockContext{ + HandleExecutionKey: c.HandleExecutionKey, + HandleNow: c.HandleNow, + HandleRef: c.HandleRef, + HandleExecutionInfo: c.HandleExecutionInfo, + HandleMetricsHandler: c.HandleMetricsHandler, + GoCtx: context.WithValue(c.goContext(), key, value), + HandleNamespaceEntry: c.HandleNamespaceEntry, + HandleEndpointByName: c.HandleEndpointByName, + } +} + +// MockMutableContext is a mock implementation of [MutableContext] that records added tasks for inspection in +// tests. +type MockMutableContext struct { + MockContext + + mu sync.Mutex + Tasks []MockTask +} + +func (c *MockMutableContext) AddTask(component Component, attributes TaskAttributes, payload any) { + c.mu.Lock() + defer c.mu.Unlock() + c.Tasks = append(c.Tasks, MockTask{component, attributes, payload}) +} + +func (c *MockMutableContext) withValue(key any, value any) Context { + return &MockMutableContext{ + MockContext: *ContextWithValue(&c.MockContext, key, value), + Tasks: slices.Clone(c.Tasks), + } +} + +type MockTask struct { + Component Component + Attributes TaskAttributes + Payload any +} diff --git a/chasm/engine.go b/chasm/engine.go new file mode 100644 index 00000000000..a0b98091857 --- /dev/null +++ b/chasm/engine.go @@ -0,0 +1,462 @@ +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination engine_mock.go + +package chasm + +import ( + "context" + + "go.temporal.io/server/common/log" +) + +// NoValue is a sentinel type representing no value. +// Useful for accessing components using the engine methods (e.g., [GetComponent]) with a function that does not need to +// return any information. +type NoValue = *struct{} + +type Engine interface { + StartExecution( + context.Context, + ComponentRef, + func(MutableContext) (RootComponent, error), + ...TransitionOption, + ) (StartExecutionResult, error) + UpdateWithStartExecution( + context.Context, + ComponentRef, + func(MutableContext) (RootComponent, error), + func(MutableContext, Component) error, + ...TransitionOption, + ) (EngineUpdateWithStartExecutionResult, error) + + UpdateComponent( + context.Context, + ComponentRef, + func(MutableContext, Component) error, + ...TransitionOption, + ) ([]byte, error) + ReadComponent( + context.Context, + ComponentRef, + func(Context, Component) error, + ...TransitionOption, + ) error + + PollComponent( + context.Context, + ComponentRef, + func(Context, Component) (bool, error), + ...TransitionOption, + ) ([]byte, error) + + DeleteExecution( + context.Context, + ComponentRef, + DeleteExecutionRequest, + ) error + + // NotifyExecution notifies any PollComponent callers waiting on the execution. + NotifyExecution(ExecutionKey) +} + +// DeleteExecutionRequest is the request for [DeleteExecution]. TerminateComponentRequest will only be +// used if the execution is still running. The actual deletion of the execution is async, and will return +// after creating the DeleteExecutionTask. +type DeleteExecutionRequest struct { + TerminateComponentRequest +} + +type BusinessIDReusePolicy int + +const ( + BusinessIDReusePolicyAllowDuplicate BusinessIDReusePolicy = iota + BusinessIDReusePolicyAllowDuplicateFailedOnly + BusinessIDReusePolicyRejectDuplicate +) + +type BusinessIDConflictPolicy int + +const ( + BusinessIDConflictPolicyFail BusinessIDConflictPolicy = iota + BusinessIDConflictPolicyTerminateExisting + BusinessIDConflictPolicyUseExisting +) + +type TransitionOptions struct { + ReusePolicy BusinessIDReusePolicy + ConflictPolicy BusinessIDConflictPolicy + RequestID string + Speculative bool +} + +type TransitionOption func(*TransitionOptions) + +// StartExecutionResult contains the outcome of creating a new execution via [StartExecution]. +// +// This struct provides information about whether a new execution was actually created, +// along with identifiers needed to reference the execution in subsequent operations. +// +// Fields: +// - ExecutionKey: The unique identifier for the execution. This key can be used to +// look up or reference the execution in future operations. +// - ExecutionRef: A serialized reference to the newly created root component. +// This can be passed to [UpdateComponent], [ReadComponent], or [PollComponent] +// to interact with the component. Use [DeserializeComponentRef] to convert this +// back to a [ComponentRef] if needed. +// - Created: Indicates whether a new execution was actually created. When false, +// the execution already existed (based on the [BusinessIDReusePolicy] and +// [BusinessIDConflictPolicy] configured via [WithBusinessIDPolicy]), and the +// existing execution was returned instead. +type StartExecutionResult struct { + ExecutionKey ExecutionKey + ExecutionRef []byte + Created bool +} + +// UpdateWithStartExecutionResult is the result of a UpdateWithStartExecution operation. +// +// Fields: +// - ExecutionKey: The unique identifier for the execution. This key can be used to +// look up or reference the execution in future operations. +// - ExecutionRef: A serialized reference to the newly created root component. +// This can be passed to [UpdateComponent], [ReadComponent], or [PollComponent] +// to interact with the component. Use [DeserializeComponentRef] to convert this +// back to a [ComponentRef] if needed. +// - Created: Indicates whether a new execution was actually created. When false, +// the execution already existed (based on the [BusinessIDReusePolicy] and +// [BusinessIDConflictPolicy] configured via [WithBusinessIDPolicy]), and the +// existing execution was returned instead. +// - UpdateOutput: The output value returned by the update function. +type UpdateWithStartExecutionResult[O any] struct { + ExecutionKey ExecutionKey + ExecutionRef []byte + Created bool + UpdateOutput O +} + +// EngineUpdateWithStartExecutionResult is a type alias for the result type returned by the UpdateWithStart Engine implementation. +type EngineUpdateWithStartExecutionResult = UpdateWithStartExecutionResult[struct{}] + +// (only) this transition will not be persisted +// The next non-speculative transition will persist this transition as well. +// Compared to the ExecutionEphemeral() operation on RegistrableComponent, +// the scope of this operation is limited to a certain transition, +// while the ExecutionEphemeral() applies to all transitions. +// TODO: we need to figure out a way to run the tasks +// generated in a speculative transition +func WithSpeculative() TransitionOption { + return func(opts *TransitionOptions) { + opts.Speculative = true + } +} + +// WithBusinessIDPolicy sets the businessID reuse and conflict policy +// used in the transition when creating a new execution. +// This option only applies to StartExecution() and UpdateWithStartExecution(). +func WithBusinessIDPolicy( + reusePolicy BusinessIDReusePolicy, + conflictPolicy BusinessIDConflictPolicy, +) TransitionOption { + return func(opts *TransitionOptions) { + opts.ReusePolicy = reusePolicy + opts.ConflictPolicy = conflictPolicy + } +} + +// WithRequestID sets the requestID used when creating a new execution. +// This option only applies to StartExecution() and UpdateWithStartExecution(). +func WithRequestID( + requestID string, +) TransitionOption { + return func(opts *TransitionOptions) { + opts.RequestID = requestID + } +} + +// Not needed for V1 +// func WithEagerLoading( +// paths []ComponentPath, +// ) OperationOption { +// panic("not implemented") +// } + +// StartExecution creates a new execution with a component initialized by the provided factory function. +// +// This is the primary entry point for starting a new execution in the CHASM engine. It handles +// the lifecycle of creating and persisting a new component within an execution context. +// +// Type Parameters: +// - C: The component type to create, must implement [RootComponent] +// - I: The input type passed to the factory function +// - O: The output type returned by the factory function +// +// Parameters: +// - ctx: Context containing the CHASM engine (must be created via [NewEngineContext]) +// - key: Unique identifier for the execution, used for deduplication and lookup +// - startFn: Factory function that creates the component and produces output. +// Receives a [MutableContext] for accessing engine capabilities and the input value. +// - input: Application-specific data passed to startFn +// - opts: Optional [TransitionOption] functions to configure creation behavior: +// - [WithBusinessIDPolicy]: Controls duplicate handling and conflict resolution +// - [WithRequestID]: Sets a request ID for idempotency +// - [WithSpeculative]: Defers persistence until the next non-speculative transition +// +// Returns: +// - O: The output value produced by startFn +// - [NewExecutionResult]: Contains the execution key, serialized ref, and whether a new execution was created +// - error: Non-nil if creation failed or policy constraints were violated +func StartExecution[C RootComponent, I any]( + ctx context.Context, + key ExecutionKey, + startFn func(MutableContext, I) (C, error), + input I, + opts ...TransitionOption, +) (StartExecutionResult, error) { + result, err := engineFromContext(ctx).StartExecution( + ctx, + NewComponentRef[C](key), + func(mutableContext MutableContext) (_ RootComponent, retErr error) { + defer log.CapturePanic(mutableContext.Logger(), &retErr) + + var c C + var err error + c, err = startFn(mutableContext, input) + return c, err + }, + opts..., + ) + if err != nil { + return StartExecutionResult{}, err + } + + return StartExecutionResult{ + ExecutionKey: result.ExecutionKey, + ExecutionRef: result.ExecutionRef, + Created: result.Created, + }, nil +} + +func UpdateWithStartExecution[C RootComponent, I any, O any]( + ctx context.Context, + key ExecutionKey, + startFn func(MutableContext, I) (C, error), + updateFn func(C, MutableContext, I) (O, error), + input I, + opts ...TransitionOption, +) (UpdateWithStartExecutionResult[O], error) { + var output O + result, err := engineFromContext(ctx).UpdateWithStartExecution( + ctx, + NewComponentRef[C](key), + func(mutableContext MutableContext) (_ RootComponent, retErr error) { + defer log.CapturePanic(mutableContext.Logger(), &retErr) + + var c C + var err error + c, err = startFn(mutableContext, input) + return c, err + }, + func(mutableContext MutableContext, c Component) (retErr error) { + defer log.CapturePanic(mutableContext.Logger(), &retErr) + + var err error + output, err = updateFn( + c.(C), + mutableContext, + input, + ) + return err + }, + opts..., + ) + if err != nil { + return UpdateWithStartExecutionResult[O]{ + UpdateOutput: output, + }, err + } + return UpdateWithStartExecutionResult[O]{ + ExecutionKey: result.ExecutionKey, + ExecutionRef: result.ExecutionRef, + Created: result.Created, + UpdateOutput: output, + }, nil +} + +// TODO: +// - consider merge with ReadComponent +// - consider remove ComponentRef from the return value and allow components to get +// the ref in the transition function. There are some caveats there, check the +// comment of the NewRef method in MutableContext. +// +// UpdateComponent applies updateFn to the component identified by the supplied component reference. +// opts are currently ignored. +// +// It returns the result, along with the new component reference. The returned reference may be +// nil when updateFn deletes the component in the same transaction and the component is not the +// root component. +func UpdateComponent[C any, R []byte | ComponentRef, I any, O any]( + ctx context.Context, + r R, + updateFn func(C, MutableContext, I) (O, error), + input I, + opts ...TransitionOption, +) (O, []byte, error) { + var output O + + ref, err := convertComponentRef(r) + if err != nil { + return output, nil, err + } + + newSerializedRef, err := engineFromContext(ctx).UpdateComponent( + ctx, + ref, + func(mutableContext MutableContext, c Component) (retErr error) { + defer log.CapturePanic(mutableContext.Logger(), &retErr) + + var err error + output, err = updateFn( + c.(C), + mutableContext, + input, + ) + return err + }, + opts..., + ) + + if err != nil { + return output, nil, err + } + return output, newSerializedRef, err +} + +// ReadComponent returns the result of evaluating readFn against the component identified by the +// component reference. opts are currently ignored. +func ReadComponent[C any, R []byte | ComponentRef, I any, O any]( + ctx context.Context, + r R, + readFn func(C, Context, I) (O, error), + input I, + opts ...TransitionOption, +) (O, error) { + var output O + + ref, err := convertComponentRef(r) + if err != nil { + return output, err + } + + err = engineFromContext(ctx).ReadComponent( + ctx, + ref, + func(chasmContext Context, c Component) (retErr error) { + defer log.CapturePanic(chasmContext.Logger(), &retErr) + + var err error + output, err = readFn( + c.(C), + chasmContext, + input, + ) + return err + }, + opts..., + ) + return output, err +} + +// PollComponent waits until the predicate is true when evaluated against the component identified +// by the supplied component reference. If this times out due to a server-imposed long-poll timeout +// then it returns (nil, nil, nil), as an indication that the caller should continue long-polling. +// Otherwise it returns (output, ref, err), where output is the output of the predicate function, +// and ref is a component reference identifying the state at which the predicate was satisfied. The +// predicate must be monotonic: if it returns true at execution state transition s then it must +// return true at all transitions t > s. If the predicate is true at the outset then PollComponent +// returns immediately. opts are currently ignored. +func PollComponent[C any, R []byte | ComponentRef, I any, O any]( + ctx context.Context, + r R, + monotonicPredicate func(C, Context, I) (O, bool, error), + input I, + opts ...TransitionOption, +) (O, []byte, error) { + var output O + + ref, err := convertComponentRef(r) + if err != nil { + return output, nil, err + } + + newSerializedRef, err := engineFromContext(ctx).PollComponent( + ctx, + ref, + func(chasmContext Context, c Component) (_ bool, retErr error) { + defer log.CapturePanic(chasmContext.Logger(), &retErr) + + out, satisfied, err := monotonicPredicate( + c.(C), + chasmContext, + input, + ) + if satisfied { + output = out + } + return satisfied, err + }, + opts..., + ) + if err != nil { + return output, nil, err + } + return output, newSerializedRef, err +} + +// DeleteExecution deletes the execution identified by the supplied execution key. +// If the execution is still running, it is terminated first. A DeleteExecutionTask is +// then queued to remove all execution data from persistence. +func DeleteExecution[C RootComponent]( + ctx context.Context, + key ExecutionKey, + request DeleteExecutionRequest, +) error { + return engineFromContext(ctx).DeleteExecution( + ctx, + NewComponentRef[C](key), + request, + ) +} + +func convertComponentRef[R []byte | ComponentRef]( + r R, +) (ComponentRef, error) { + if refToken, ok := any(r).([]byte); ok { + return DeserializeComponentRef(refToken) + } + + //revive:disable-next-line:unchecked-type-assertion + return any(r).(ComponentRef), nil +} + +type engineCtxKeyType string + +const engineCtxKey engineCtxKeyType = "chasmEngine" + +// this will be done by the nexus handler? +// alternatively the engine can be a global variable, +// but not a good practice in fx. +func NewEngineContext( + ctx context.Context, + engine Engine, +) context.Context { + return context.WithValue(ctx, engineCtxKey, engine) +} + +func engineFromContext( + ctx context.Context, +) Engine { + e, ok := ctx.Value(engineCtxKey).(Engine) + if !ok { + return nil + } + return e +} diff --git a/chasm/engine_mock.go b/chasm/engine_mock.go new file mode 100644 index 00000000000..a3d86b11ba9 --- /dev/null +++ b/chasm/engine_mock.go @@ -0,0 +1,166 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: engine.go +// +// Generated by this command: +// +// mockgen -package chasm -source engine.go -destination engine_mock.go +// + +// Package chasm is a generated GoMock package. +package chasm + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockEngine is a mock of Engine interface. +type MockEngine struct { + ctrl *gomock.Controller + recorder *MockEngineMockRecorder + isgomock struct{} +} + +// MockEngineMockRecorder is the mock recorder for MockEngine. +type MockEngineMockRecorder struct { + mock *MockEngine +} + +// NewMockEngine creates a new mock instance. +func NewMockEngine(ctrl *gomock.Controller) *MockEngine { + mock := &MockEngine{ctrl: ctrl} + mock.recorder = &MockEngineMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEngine) EXPECT() *MockEngineMockRecorder { + return m.recorder +} + +// DeleteExecution mocks base method. +func (m *MockEngine) DeleteExecution(arg0 context.Context, arg1 ComponentRef, arg2 DeleteExecutionRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteExecution", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteExecution indicates an expected call of DeleteExecution. +func (mr *MockEngineMockRecorder) DeleteExecution(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteExecution", reflect.TypeOf((*MockEngine)(nil).DeleteExecution), arg0, arg1, arg2) +} + +// NotifyExecution mocks base method. +func (m *MockEngine) NotifyExecution(arg0 ExecutionKey) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "NotifyExecution", arg0) +} + +// NotifyExecution indicates an expected call of NotifyExecution. +func (mr *MockEngineMockRecorder) NotifyExecution(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotifyExecution", reflect.TypeOf((*MockEngine)(nil).NotifyExecution), arg0) +} + +// PollComponent mocks base method. +func (m *MockEngine) PollComponent(arg0 context.Context, arg1 ComponentRef, arg2 func(Context, Component) (bool, error), arg3 ...TransitionOption) ([]byte, error) { + m.ctrl.T.Helper() + varargs := []any{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "PollComponent", varargs...) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PollComponent indicates an expected call of PollComponent. +func (mr *MockEngineMockRecorder) PollComponent(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollComponent", reflect.TypeOf((*MockEngine)(nil).PollComponent), varargs...) +} + +// ReadComponent mocks base method. +func (m *MockEngine) ReadComponent(arg0 context.Context, arg1 ComponentRef, arg2 func(Context, Component) error, arg3 ...TransitionOption) error { + m.ctrl.T.Helper() + varargs := []any{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReadComponent", varargs...) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReadComponent indicates an expected call of ReadComponent. +func (mr *MockEngineMockRecorder) ReadComponent(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadComponent", reflect.TypeOf((*MockEngine)(nil).ReadComponent), varargs...) +} + +// StartExecution mocks base method. +func (m *MockEngine) StartExecution(arg0 context.Context, arg1 ComponentRef, arg2 func(MutableContext) (RootComponent, error), arg3 ...TransitionOption) (StartExecutionResult, error) { + m.ctrl.T.Helper() + varargs := []any{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "StartExecution", varargs...) + ret0, _ := ret[0].(StartExecutionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StartExecution indicates an expected call of StartExecution. +func (mr *MockEngineMockRecorder) StartExecution(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartExecution", reflect.TypeOf((*MockEngine)(nil).StartExecution), varargs...) +} + +// UpdateComponent mocks base method. +func (m *MockEngine) UpdateComponent(arg0 context.Context, arg1 ComponentRef, arg2 func(MutableContext, Component) error, arg3 ...TransitionOption) ([]byte, error) { + m.ctrl.T.Helper() + varargs := []any{arg0, arg1, arg2} + for _, a := range arg3 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateComponent", varargs...) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateComponent indicates an expected call of UpdateComponent. +func (mr *MockEngineMockRecorder) UpdateComponent(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0, arg1, arg2}, arg3...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateComponent", reflect.TypeOf((*MockEngine)(nil).UpdateComponent), varargs...) +} + +// UpdateWithStartExecution mocks base method. +func (m *MockEngine) UpdateWithStartExecution(arg0 context.Context, arg1 ComponentRef, arg2 func(MutableContext) (RootComponent, error), arg3 func(MutableContext, Component) error, arg4 ...TransitionOption) (EngineUpdateWithStartExecutionResult, error) { + m.ctrl.T.Helper() + varargs := []any{arg0, arg1, arg2, arg3} + for _, a := range arg4 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateWithStartExecution", varargs...) + ret0, _ := ret[0].(EngineUpdateWithStartExecutionResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateWithStartExecution indicates an expected call of UpdateWithStartExecution. +func (mr *MockEngineMockRecorder) UpdateWithStartExecution(arg0, arg1, arg2, arg3 any, arg4 ...any) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]any{arg0, arg1, arg2, arg3}, arg4...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateWithStartExecution", reflect.TypeOf((*MockEngine)(nil).UpdateWithStartExecution), varargs...) +} diff --git a/chasm/errors.go b/chasm/errors.go new file mode 100644 index 00000000000..f3eace657ba --- /dev/null +++ b/chasm/errors.go @@ -0,0 +1,21 @@ +package chasm + +type ExecutionAlreadyStartedError struct { + Message string + CurrentRequestID string + CurrentRunID string +} + +func NewExecutionAlreadyStartedErr( + message, currentRequestID, currentRunID string, +) *ExecutionAlreadyStartedError { + return &ExecutionAlreadyStartedError{ + Message: message, + CurrentRequestID: currentRequestID, + CurrentRunID: currentRunID, + } +} + +func (e *ExecutionAlreadyStartedError) Error() string { + return e.Message +} diff --git a/chasm/export_test.go b/chasm/export_test.go new file mode 100644 index 00000000000..8ade7a7a21a --- /dev/null +++ b/chasm/export_test.go @@ -0,0 +1,42 @@ +package chasm + +import ( + "context" + "reflect" +) + +func (r *Registry) Component(fqn string) (*RegistrableComponent, bool) { + return r.component(fqn) +} + +func (r *Registry) Task(fqn string) (*RegistrableTask, bool) { + return r.task(fqn) +} + +func (r *Registry) ComponentFor(componentInstance any) (*RegistrableComponent, bool) { + return r.componentFor(componentInstance) +} + +func (r *Registry) ComponentOf(componentGoType reflect.Type) (*RegistrableComponent, bool) { + return r.componentOf(componentGoType) +} + +func (r *Registry) TaskFor(taskInstance any) (*RegistrableTask, bool) { + return r.taskFor(taskInstance) +} + +func (r *Registry) TaskOf(taskGoType reflect.Type) (*RegistrableTask, bool) { + return r.taskOf(taskGoType) +} + +func (rc RegistrableComponent) FqType() string { + return rc.fqType() +} + +func (rt RegistrableTask) FqType() string { + return rt.fqType() +} + +func EngineFromContext(ctx context.Context) Engine { + return engineFromContext(ctx) +} diff --git a/chasm/field.go b/chasm/field.go new file mode 100644 index 00000000000..31fbd562507 --- /dev/null +++ b/chasm/field.go @@ -0,0 +1,169 @@ +package chasm + +import ( + "reflect" + + "go.temporal.io/api/serviceerror" + "google.golang.org/protobuf/proto" +) + +const ( + // Used by reflection. + internalFieldName = "Internal" +) + +type Field[T any] struct { + // This struct needs to be created via reflection, but reflection can't set private fields. + Internal fieldInternal +} + +// re. Data v.s. Component. +// Components have behavior and has a lifecycle. +// while Data doesn't and must be attached to a component. +// +// You can define a component just for storing the data, +// that may contain other information like ref count etc. +// most importantly, the framework needs to know when it's safe to delete the data. +// i.e. the lifecycle of that data component reaches completed. +func NewDataField[D proto.Message]( + ctx MutableContext, + d D, +) Field[D] { + return Field[D]{ + Internal: newFieldInternalWithValue(fieldTypeData, d), + } +} + +func NewComponentField[C Component]( + ctx MutableContext, + c C, + options ...ComponentFieldOption, +) Field[C] { + opts := &componentFieldOptions{} + for _, o := range options { + o(opts) + } + internal := newFieldInternalWithValue(fieldTypeComponent, c) + internal.detached = opts.detached + return Field[C]{ + Internal: internal, + } +} + +// ComponentPointerTo returns a CHASM field populated with a pointer to the given +// component. The target component must be a proper ancestor of the referring +// component within the same component tree. Pointers to non-ancestor components +// (e.g., siblings, descendants, or components from a different tree) will cause +// the transaction to fail when it is closed. +func ComponentPointerTo[C Component]( + ctx MutableContext, + c C, +) Field[C] { + return Field[C]{ + Internal: newFieldInternalWithValue(fieldTypeDeferredPointer, c), + } +} + +// DataPointerTo returns a CHASM field populated with a pointer to the given +// message. Pointers are resolved at the time the transaction is closed, and the +// transaction will fail if any pointers cannot be resolved. +func DataPointerTo[D proto.Message]( + ctx MutableContext, + d D, +) Field[D] { + return Field[D]{ + Internal: newFieldInternalWithValue(fieldTypeDeferredPointer, d), + } +} + +// TryGet returns the value of the field and a boolean indicating if the value was found, deserializing if necessary. +// Panics rather than returning an error, as errors are supposed to be handled by the framework as opposed to the +// application, even if the error is an application bug. +func (f Field[T]) TryGet(chasmContext Context) (T, bool) { + var nilT T + + // If node is nil, then there is nothing to deserialize from, return value (even if it is also nil). + if f.Internal.node == nil { + if f.Internal.v == nil { + return nilT, false + } + vT, isT := f.Internal.v.(T) + if !isT { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(serviceerror.NewInternalf("internal value doesn't implement %s", reflect.TypeFor[T]().Name())) + } + return vT, true + } + + var nodeValue any + switch f.Internal.fieldType() { + case fieldTypeComponent: + if err := f.Internal.node.prepareComponentValue(chasmContext); err != nil { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(err) + } + nodeValue = f.Internal.node.value + case fieldTypeData: + // For data fields, T is always a concrete type. + if err := f.Internal.node.prepareDataValue(chasmContext, reflect.TypeFor[T]()); err != nil { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(err) + } + nodeValue = f.Internal.node.value + case fieldTypePointer: + if err := f.Internal.node.preparePointerValue(); err != nil { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(err) + } + //nolint:revive // value is guaranteed to be of type []string. + path := f.Internal.value().([]string) + if referencedNode, found := f.Internal.node.root().findNode(path); found { + var err error + switch referencedNode.fieldType() { + case fieldTypeComponent: + err = referencedNode.prepareComponentValue(chasmContext) + case fieldTypeData: + err = referencedNode.prepareDataValue(chasmContext, reflect.TypeFor[T]()) + default: + err = serviceerror.NewInternalf("pointer field referenced an unhandled value: %v", referencedNode.fieldType()) + } + if err != nil { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(err) + } + nodeValue = referencedNode.value + } + case fieldTypeDeferredPointer: + // For deferred pointers, return the component directly stored in v + nodeValue = f.Internal.v + default: + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(serviceerror.NewInternalf("unsupported field type: %v", f.Internal.fieldType())) + } + + if nodeValue == nil { + return nilT, false + } + vT, isT := nodeValue.(T) + if !isT { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(serviceerror.NewInternalf("node value doesn't implement %s", reflect.TypeFor[T]().Name())) + } + return vT, true +} + +// Get returns the value of the field, deserializing it if necessary. +// Panics rather than returning an error, as errors are supposed to be handled by the framework as opposed to the +// application, even if the error is an application bug. +func (f Field[T]) Get(chasmContext Context) T { + v, ok := f.TryGet(chasmContext) + if !ok { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(serviceerror.NewInternalf("field value of type %s not found", reflect.TypeFor[T]().Name())) + } + return v +} + +func NewEmptyField[T any]() Field[T] { + return Field[T]{} +} diff --git a/chasm/field_internal.go b/chasm/field_internal.go new file mode 100644 index 00000000000..1c3566caf6a --- /dev/null +++ b/chasm/field_internal.go @@ -0,0 +1,56 @@ +package chasm + +type fieldInternal struct { + // These 2 fields are used when node is not set yet (i.e., node==nil). + // Don't access them directly outside of this file. Use corresponding getters instead. + ft fieldType + v any // Component | Data | Pointer + + // Pointer to the corresponding tree node. Can be nil for the just created fields. + node *Node + + // Detached field option. When true, the node created from this field + // will be detached regardless of the component type's registration. + detached bool +} + +func newFieldInternalWithValue(ft fieldType, v any) fieldInternal { + return fieldInternal{ + ft: ft, + v: v, + } +} + +func newFieldInternalWithNode(node *Node) fieldInternal { + return fieldInternal{ + node: node, + } +} + +func (fi fieldInternal) isEmpty() bool { + return fi.v == nil && fi.node == nil +} + +func (fi fieldInternal) value() any { + // Deferred pointers are special-cased, since their serialized nodes are + // initialized as regular persistable pointers. + // + // Deferred pointers may have a non-nil node after syncSubComponents, but before + // resolution. + if fi.node == nil || fi.ft == fieldTypeDeferredPointer { + return fi.v + } + return fi.node.value +} + +func (fi fieldInternal) fieldType() fieldType { + // Deferred pointers are special-cased, since their serialized nodes are + // initialized as regular persistable pointers. + // + // Deferred pointers may have a non-nil node after syncSubComponents, but before + // resolution. + if fi.node == nil || fi.ft == fieldTypeDeferredPointer { + return fi.ft + } + return fi.node.fieldType() +} diff --git a/chasm/field_test.go b/chasm/field_test.go new file mode 100644 index 00000000000..1f25c230840 --- /dev/null +++ b/chasm/field_test.go @@ -0,0 +1,438 @@ +package chasm + +import ( + "context" + "reflect" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/testing/protorequire" + "go.temporal.io/server/common/testing/testlogger" + "go.uber.org/mock/gomock" +) + +type fieldSuite struct { + suite.Suite + *require.Assertions + protorequire.ProtoAssertions + + controller *gomock.Controller + nodeBackend *MockNodeBackend + + registry *Registry + timeSource *clock.EventTimeSource + nodePathEncoder NodePathEncoder + logger log.Logger + metricsHandler metrics.Handler +} + +func TestFieldSuite(t *testing.T) { + suite.Run(t, new(fieldSuite)) +} + +func (s *fieldSuite) SetupTest() { + s.initAssertions() + s.controller = gomock.NewController(s.T()) + s.nodeBackend = &MockNodeBackend{} + + s.logger = testlogger.NewTestLogger(s.T(), testlogger.FailOnAnyUnexpectedError) + s.metricsHandler = metrics.NoopMetricsHandler + s.registry = NewRegistry(s.logger) + err := s.registry.Register(newTestLibrary(s.controller)) + s.NoError(err) + + s.timeSource = clock.NewEventTimeSource() + s.nodePathEncoder = &testNodePathEncoder{} +} + +func (s *fieldSuite) SetupSubTest() { + s.initAssertions() +} + +func (s *fieldSuite) initAssertions() { + // `s.Assertions` (as well as other test helpers which depends on `s.T()`) must be initialized on + // both test and subtest levels (but not suite level, where `s.T()` is `nil`). + // + // If these helpers are not reinitialized on subtest level, any failed `assert` in + // subtest will fail the entire test (not subtest) immediately without running other subtests. + + s.Assertions = require.New(s.T()) + s.ProtoAssertions = protorequire.New(s.T()) +} + +func (s *fieldSuite) TestInternalFieldName() { + f := Field[any]{} + fT := reflect.TypeOf(f) + + _, ok := fT.FieldByName(internalFieldName) + s.True(ok, "expected field %s not found", internalFieldName) +} + +func (s *fieldSuite) TestFieldGetSimple() { + tests := []struct { + name string + field Field[*TestSubComponent1] + expected *TestSubComponent1 + }{ + { + name: "Get with non-nil value", + field: Field[*TestSubComponent1]{ + Internal: newFieldInternalWithValue( + fieldTypeComponent, + &TestSubComponent1{SubComponent1Data: &protoMessageType{ + CreateRequestId: "component-data", + }}, + )}, + expected: &TestSubComponent1{SubComponent1Data: &protoMessageType{ + CreateRequestId: "component-data", + }}, + }, + { + name: "Get with nil value and nil node", + field: Field[*TestSubComponent1]{ + Internal: newFieldInternalWithNode(nil), + }, + expected: nil, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + result, _ := tt.field.TryGet(nil) + s.Equal(tt.expected, result) + }) + } +} + +func (s *fieldSuite) TestFieldGetComponent() { + serializedNodes := testComponentSerializedNodes() + + node, err := s.newTestTree(serializedNodes) + s.NoError(err) + + chasmContext := NewMutableContext(context.Background(), node) + + c, err := node.Component(chasmContext, ComponentRef{componentPath: rootPath}) + s.NoError(err) + s.NotNil(c) + + tc := c.(*TestComponent) + + sc1 := tc.SubComponent1.Get(chasmContext) + s.NotNil(sc1) + s.ProtoEqual(&protoMessageType{ + CreateRequestId: "sub-component1-data", + }, sc1.SubComponent1Data) + + sd1 := tc.SubData1.Get(chasmContext) + s.NotNil(sd1) + s.ProtoEqual(&protoMessageType{ + CreateRequestId: "sub-data1", + }, sd1) +} + +func (s *fieldSuite) newTestTree( + serializedNodes map[string]*persistencespb.ChasmNode, +) (*Node, error) { + if len(serializedNodes) == 0 { + return NewEmptyTree( + s.registry, + s.timeSource, + s.nodeBackend, + s.nodePathEncoder, + s.logger, + s.metricsHandler, + ), nil + } + return NewTreeFromDB( + serializedNodes, + s.registry, + s.timeSource, + s.nodeBackend, + s.nodePathEncoder, + s.logger, + s.metricsHandler, + ) +} + +// setupComponentWithTree creates a basic component structure and attaches it to the tree. +func (s *fieldSuite) setupComponentWithTree(rootComponent *TestComponent) (*Node, MutableContext, error) { + rootNode := NewEmptyTree( + s.registry, + s.timeSource, + s.nodeBackend, + s.nodePathEncoder, + s.logger, + s.metricsHandler, + ) + if err := rootNode.SetRootComponent(rootComponent); err != nil { + return nil, nil, err + } + + return rootNode, NewMutableContext(context.Background(), rootNode), nil +} + +func (s *fieldSuite) TestDeferredPointerResolution() { + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + s.nodeBackend = &MockNodeBackend{ + HandleNextTransitionCount: func() int64 { return 1 }, + HandleGetCurrentVersion: func() int64 { return 1 }, + HandleGetWorkflowKey: func() definition.WorkflowKey { return workflowKey }, + } + + sc1 := &TestSubComponent1{ + SubComponent1Data: &protoMessageType{ + CreateRequestId: "sub-component1-data", + }, + } + + rootComponent := &TestComponent{ + ComponentData: &protoMessageType{ + CreateRequestId: "component-data", + }, + SubComponent1: NewComponentField(nil, sc1), + } + + rootNode, ctx, err := s.setupComponentWithTree(rootComponent) + s.NoError(err) + + // Get components from tree to mark nodes as needing sync. + rootComponentInterface, err := rootNode.Component(ctx, ComponentRef{}) + s.NoError(err) + rootComponent = rootComponentInterface.(*TestComponent) + sc1 = rootComponent.SubComponent1.Get(ctx) + + // sc1 (child) points to rootComponent (parent) via component pointer. + sc1.RootPointer = ComponentPointerTo(ctx, rootComponent) + + // Verify deferred state. + s.Equal(fieldTypeDeferredPointer, sc1.RootPointer.Internal.fieldType()) + s.Equal(rootComponent, sc1.RootPointer.Internal.v) + + // CloseTransaction should resolve the deferred pointer. + mutations, err := rootNode.CloseTransaction() + s.NoError(err) + s.NotEmpty(mutations.UpdatedNodes) + + // Verify the pointer was resolved to a regular pointer with path. + s.Equal(fieldTypePointer, sc1.RootPointer.Internal.fieldType()) + + cResolvedPath, ok := sc1.RootPointer.Internal.v.([]string) + s.True(ok) + s.Equal([]string{}, cResolvedPath) + + // Verify we can dereference the component pointer. + resolvedComponent := sc1.RootPointer.Get(ctx) + s.Equal(rootComponent, resolvedComponent) +} + +func (s *fieldSuite) TestMixedPointerScenario() { + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + s.nodeBackend = &MockNodeBackend{ + HandleNextTransitionCount: func() int64 { return 1 }, + HandleGetCurrentVersion: func() int64 { return 1 }, + HandleGetWorkflowKey: func() definition.WorkflowKey { return workflowKey }, + } + + sc11 := &TestSubComponent11{ + SubComponent11Data: &protoMessageType{CreateRequestId: "sub-component11-data"}, + } + + sc1 := &TestSubComponent1{ + SubComponent1Data: &protoMessageType{CreateRequestId: "sub-component1-data"}, + SubComponent11: NewComponentField(nil, sc11), + } + + rootComponent := &TestComponent{ + ComponentData: &protoMessageType{CreateRequestId: "component-data"}, + SubComponent1: NewComponentField(nil, sc1), + } + + rootNode, ctx, err := s.setupComponentWithTree(rootComponent) + s.NoError(err) + + // Get components from tree to mark nodes as needing sync. + rootComponentInterface, err := rootNode.Component(ctx, ComponentRef{}) + s.NoError(err) + rootComponent = rootComponentInterface.(*TestComponent) + sc1 = rootComponent.SubComponent1.Get(ctx) + sc11 = sc1.SubComponent11.Get(ctx) + + // Transaction 1: sc11 points to root (grandparent). + sc11.GrandparentPointer = ComponentPointerTo(ctx, rootComponent) + + _, err = rootNode.CloseTransaction() + s.NoError(err) + s.Equal(fieldTypePointer, sc11.GrandparentPointer.Internal.fieldType()) + + // Transaction 2: sc1 points to root (parent). + ctx2 := NewMutableContext(context.Background(), rootNode) + rootComponentInterface, err = rootNode.Component(ctx2, ComponentRef{}) + s.NoError(err) + + rootComponent = rootComponentInterface.(*TestComponent) + sc1 = rootComponent.SubComponent1.Get(ctx2) + sc11 = sc1.SubComponent11.Get(ctx2) + + sc1.RootPointer = ComponentPointerTo(ctx2, rootComponent) + + s.Equal(fieldTypePointer, sc11.GrandparentPointer.Internal.fieldType()) + s.Equal(fieldTypeDeferredPointer, sc1.RootPointer.Internal.fieldType()) + + _, err = rootNode.CloseTransaction() + s.NoError(err) + + // Ensure both pointers have been resolved. + s.Equal(fieldTypePointer, sc11.GrandparentPointer.Internal.fieldType()) + s.Equal(fieldTypePointer, sc1.RootPointer.Internal.fieldType()) + + resolved1 := sc11.GrandparentPointer.Get(ctx2) + s.Equal(rootComponent, resolved1) + + resolved2 := sc1.RootPointer.Get(ctx2) + s.Equal(rootComponent, resolved2) +} + +func (s *fieldSuite) TestUnresolvableDeferredPointerError() { + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + s.nodeBackend = &MockNodeBackend{ + HandleNextTransitionCount: func() int64 { return 1 }, + HandleGetCurrentVersion: func() int64 { return 1 }, + HandleGetWorkflowKey: func() definition.WorkflowKey { return workflowKey }, + } + + s.logger.(*testlogger.TestLogger). + Expect(testlogger.Error, "failed to resolve deferred pointer during transaction close") + + orphanComponent := &TestSubComponent11{ + SubComponent11Data: &protoMessageType{ + CreateRequestId: "orphan-component", + }, + } + + rootComponent := &TestComponent{ + ComponentData: &protoMessageType{ + CreateRequestId: "component-data", + }, + } + + rootNode, ctx, err := s.setupComponentWithTree(rootComponent) + s.NoError(err) + + // Get component from tree to mark node as needing sync. + rootComponentInterface, err := rootNode.Component(ctx, ComponentRef{}) + s.NoError(err) + rootComponent = rootComponentInterface.(*TestComponent) + + rootComponent.SubComponent11Pointer = ComponentPointerTo(ctx, orphanComponent) + s.Equal(fieldTypeDeferredPointer, rootComponent.SubComponent11Pointer.Internal.fieldType()) + + _, err = rootNode.CloseTransaction() + s.Error(err) + s.Contains(err.Error(), "failed to resolve deferred pointer during transaction close") +} + +func (s *fieldSuite) TestNonAncestorComponentPointerRejected() { + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + s.nodeBackend = &MockNodeBackend{ + HandleNextTransitionCount: func() int64 { return 1 }, + HandleGetCurrentVersion: func() int64 { return 1 }, + HandleGetWorkflowKey: func() definition.WorkflowKey { return workflowKey }, + } + + s.logger.(*testlogger.TestLogger). + Expect(testlogger.Error, "failed to resolve deferred pointer during transaction close") + + sc11 := &TestSubComponent11{ + SubComponent11Data: &protoMessageType{CreateRequestId: "sub-component11-data"}, + } + + sc1 := &TestSubComponent1{ + SubComponent1Data: &protoMessageType{CreateRequestId: "sub-component1-data"}, + SubComponent11: NewComponentField(nil, sc11), + } + + rootComponent := &TestComponent{ + ComponentData: &protoMessageType{CreateRequestId: "component-data"}, + SubComponent1: NewComponentField(nil, sc1), + } + + rootNode, ctx, err := s.setupComponentWithTree(rootComponent) + s.NoError(err) + + rootComponentInterface, err := rootNode.Component(ctx, ComponentRef{}) + s.NoError(err) + rootComponent = rootComponentInterface.(*TestComponent) + sc1 = rootComponent.SubComponent1.Get(ctx) + sc11 = sc1.SubComponent11.Get(ctx) + + // Root pointing to descendant sc11 should be rejected. + rootComponent.SubComponent11Pointer = ComponentPointerTo(ctx, sc11) + + _, err = rootNode.CloseTransaction() + s.Error(err) + s.Contains(err.Error(), "is not an ancestor of component") +} + +func (s *fieldSuite) TestChildComponentPointerRejected() { + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + s.nodeBackend = &MockNodeBackend{ + HandleNextTransitionCount: func() int64 { return 1 }, + HandleGetCurrentVersion: func() int64 { return 1 }, + HandleGetWorkflowKey: func() definition.WorkflowKey { return workflowKey }, + } + + s.logger.(*testlogger.TestLogger). + Expect(testlogger.Error, "failed to resolve deferred pointer during transaction close") + + sc1 := &TestSubComponent1{ + SubComponent1Data: &protoMessageType{CreateRequestId: "sub-component1-data"}, + } + + rootComponent := &TestComponent{ + ComponentData: &protoMessageType{CreateRequestId: "component-data"}, + SubComponent1: NewComponentField(nil, sc1), + } + + rootNode, ctx, err := s.setupComponentWithTree(rootComponent) + s.NoError(err) + + rootComponentInterface, err := rootNode.Component(ctx, ComponentRef{}) + s.NoError(err) + rootComponent = rootComponentInterface.(*TestComponent) + sc1 = rootComponent.SubComponent1.Get(ctx) + + // Root pointing to child sc1 via interface pointer should be rejected. + rootComponent.SubComponentInterfacePointer = ComponentPointerTo[Component](ctx, sc1) + + _, err = rootNode.CloseTransaction() + s.Error(err) + s.Contains(err.Error(), "is not an ancestor of component") +} diff --git a/chasm/field_type.go b/chasm/field_type.go new file mode 100644 index 00000000000..eda80bf934b --- /dev/null +++ b/chasm/field_type.go @@ -0,0 +1,11 @@ +package chasm + +type fieldType int + +const ( + fieldTypeUnspecified fieldType = iota + fieldTypeComponent + fieldTypePointer + fieldTypeDeferredPointer + fieldTypeData +) diff --git a/chasm/fields_iterator.go b/chasm/fields_iterator.go new file mode 100644 index 00000000000..645349ae011 --- /dev/null +++ b/chasm/fields_iterator.go @@ -0,0 +1,177 @@ +package chasm + +import ( + "iter" + "reflect" + "strings" + + "go.temporal.io/api/serviceerror" +) + +const ( + chasmFieldTypePrefix = "chasm.Field[" + chasmMapTypePrefix = "chasm.Map[" + chasmMSPointerType = "chasm.MSPointer" + chasmParentPointerTypePrefix = "chasm.ParentPtr[" + + fieldNameTag = "name" +) + +type fieldKind uint8 + +const ( + fieldKindUnspecified fieldKind = iota + fieldKindData + fieldKindSubField + fieldKindSubMap + fieldKindMutableState + fieldKindParentPtr +) + +type fieldInfo struct { + val reflect.Value + typ reflect.Type + name string + kind fieldKind + err error +} + +// fieldsOf iterates across all CHASM-managed fields of a struct. Other fields +// are not yielded. +// +//nolint:revive // cognitive complexity 26 (> max enabled 25) +func fieldsOf(valueV reflect.Value) iter.Seq[fieldInfo] { + valueT := valueV.Type() + dataFieldName := "" + return func(yield func(fi fieldInfo) bool) { + for i := 0; i < valueT.Elem().NumField(); i++ { + fieldV := valueV.Elem().Field(i) + fieldT := fieldV.Type() + if fieldT == UnimplementedComponentT { + continue + } + + fieldN := fieldName(valueT.Elem().Field(i)) + var fieldErr error + fieldK := fieldKindUnspecified + if fieldT.AssignableTo(protoMessageT) { + if dataFieldName != "" { + fieldErr = serviceerror.NewInternalf("%s.%s: only one data field %s (implements proto.Message) allowed in component", valueT, fieldN, dataFieldName) + } + dataFieldName = fieldN + fieldK = fieldKindData + } else { + prefix := genericTypePrefix(fieldT) + if strings.HasPrefix(prefix, "*") { + switch prefix[1:] { + case chasmFieldTypePrefix, + chasmMapTypePrefix, + chasmMSPointerType, + chasmParentPointerTypePrefix: + fieldErr = serviceerror.NewInternalf("%s.%s: CHASM fields must not be pointers", valueT, fieldN) + default: + continue + } + } else { + switch prefix { + case chasmFieldTypePrefix: + fieldK = fieldKindSubField + case chasmMapTypePrefix: + fieldK = fieldKindSubMap + case chasmMSPointerType: + fieldK = fieldKindMutableState + case chasmParentPointerTypePrefix: + fieldK = fieldKindParentPtr + default: + continue // Skip non-CHASM fields. + } + } + + } + + if !yield(fieldInfo{val: fieldV, typ: fieldT, name: fieldN, kind: fieldK, err: fieldErr}) { + return + } + } + // If the data field is not found, generate one more fake field with only an error set. + if dataFieldName == "" { + yield(fieldInfo{err: serviceerror.NewInternalf("%s: no data field (implements proto.Message) found", valueT)}) + } + } +} + +// unmanagedFieldsOf yields all non-CHASM managed fields of a struct. +func unmanagedFieldsOf(valueT reflect.Type) iter.Seq[fieldInfo] { + return func(yield func(fi fieldInfo) bool) { + if valueT.Kind() == reflect.Pointer { + valueT = valueT.Elem() + } + for i := range valueT.NumField() { + fieldT := valueT.Field(i).Type + if fieldT == UnimplementedComponentT { + continue + } + + // Skip the data field, which is always CHASM-managed. + if fieldT.AssignableTo(protoMessageT) { + continue + } + + fieldN := fieldName(valueT.Field(i)) + prefix := genericTypePrefix(fieldT) + switch prefix { + case chasmFieldTypePrefix, + chasmMapTypePrefix, + chasmMSPointerType, + chasmParentPointerTypePrefix: + continue // Skip CHASM fields. + default: + if !yield(fieldInfo{typ: fieldT, name: fieldN}) { + return + } + } + } + } +} + +func genericTypePrefix(t reflect.Type) string { + tn := t.String() + if tn == chasmMSPointerType { + return chasmMSPointerType + } + bracketPos := strings.Index(tn, "[") + if bracketPos == -1 { + return "" + } + return tn[:bracketPos+1] +} + +func fieldName(f reflect.StructField) string { + if tagName := f.Tag.Get(fieldNameTag); tagName != "" { + return tagName + } + return f.Name +} + +// visibilityFieldT is the reflect.Type for Field[*Visibility], used to detect +// components that use Visibility at registration time. +var visibilityFieldT = reflect.TypeFor[Field[*Visibility]]() + +// hasVisibilityField returns true if the given component type has a Field[*Visibility]. +// This is used at registration time to validate that archetypes using Visibility +// have configured a businessID alias. +func hasVisibilityField(componentT reflect.Type) bool { + if componentT.Kind() == reflect.Pointer { + componentT = componentT.Elem() + } + if componentT.Kind() != reflect.Struct { + return false + } + for i := range componentT.NumField() { + fieldT := componentT.Field(i).Type + if fieldT == visibilityFieldT { + return true + } + } + return false +} diff --git a/chasm/fields_iterator_test.go b/chasm/fields_iterator_test.go new file mode 100644 index 00000000000..261ca0ff68a --- /dev/null +++ b/chasm/fields_iterator_test.go @@ -0,0 +1,245 @@ +package chasm + +import ( + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.uber.org/mock/gomock" +) + +type fieldsIteratorSuite struct { + suite.Suite + *require.Assertions + + controller *gomock.Controller +} + +func TestFieldsIteratorSuite(t *testing.T) { + suite.Run(t, new(fieldsIteratorSuite)) +} + +func (s *fieldsIteratorSuite) SetupTest() { + s.initAssertions() + s.controller = gomock.NewController(s.T()) +} + +func (s *fieldsIteratorSuite) SetupSubTest() { + s.initAssertions() +} + +func (s *fieldsIteratorSuite) initAssertions() { + // `s.Assertions` (as well as other test helpers which depends on `s.T()`) must be initialized on + // both test and subtest levels (but not suite level, where `s.T()` is `nil`). + // + // If these helpers are not reinitialized on subtest level, any failed `assert` in + // subtest will fail the entire test (not subtest) immediately without running other subtests. + + s.Assertions = require.New(s.T()) +} + +func (s *fieldsIteratorSuite) TestGenericTypePrefix() { + tests := []struct { + name string + input any + expected string + }{ + { + name: "Field type", + input: Field[string]{}, + expected: chasmFieldTypePrefix, + }, + { + name: "Map type", + input: Map[string, int]{}, + expected: chasmMapTypePrefix, + }, + { + name: "Non-generic type", + input: 0, + expected: "", + }, + { + name: "Map type", + input: map[string]int{}, + expected: "map[", + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + typ := reflect.TypeOf(tt.input) + result := genericTypePrefix(typ) + s.Equal(tt.expected, result) + }) + } +} + +func (s *fieldsIteratorSuite) TestChasmFieldTypePrefix() { + f := Field[any]{} + fT := reflect.TypeOf(f) + s.True(strings.HasPrefix(fT.String(), chasmFieldTypePrefix)) +} + +func (s *fieldsIteratorSuite) TestChasmMapTypePrefix() { + c := Map[string, any]{} + cT := reflect.TypeOf(c) + s.True(strings.HasPrefix(cT.String(), chasmMapTypePrefix)) +} + +func (s *fieldsIteratorSuite) TestFieldsOf() { + type fieldPointer struct { + DataField *protoMessageType + InvalidField *Field[string] + } + + type noDataField struct { + SubField Field[string] + SubMap Map[string, int] + } + + type twoDataFields struct { + DataField *protoMessageType + AnotherDataField *protoMessageType + } + + type unimplementedComponentOnly struct { + UnimplementedComponent + } + + tests := []struct { + name string + input any + expectedKinds []fieldKind + expectedNames []string + expectedTypes []string + expectedErrors []string + }{ + { + name: "Valid component with one data field", + input: &struct { + UnimplementedComponent + DataField *protoMessageType + SubField Field[string] + SubMap Map[string, int] + ignored *struct{} + }{}, + expectedKinds: []fieldKind{fieldKindData, fieldKindSubField, fieldKindSubMap}, + expectedNames: []string{"DataField", "SubField", "SubMap"}, + expectedTypes: []string{"*persistence.WorkflowExecutionState", "chasm.Field[string]", "chasm.Map[string,int]"}, + expectedErrors: []string{"", "", ""}, + }, + { + name: "Component with no data field", + input: &noDataField{}, + expectedKinds: []fieldKind{fieldKindSubField, fieldKindSubMap, fieldKindUnspecified}, + expectedNames: []string{"SubField", "SubMap", ""}, + expectedTypes: []string{"chasm.Field[string]", "chasm.Map[string,int]", ""}, + expectedErrors: []string{"", "", "*chasm.noDataField: no data field (implements proto.Message) found"}, + }, + { + name: "Component with *Field", + input: &fieldPointer{}, + expectedKinds: []fieldKind{fieldKindData, fieldKindUnspecified}, + expectedNames: []string{"DataField", "InvalidField"}, + expectedTypes: []string{"*persistence.WorkflowExecutionState", "*chasm.Field[string]"}, + expectedErrors: []string{"", "*chasm.fieldPointer.InvalidField: CHASM fields must not be pointers"}, + }, + { + name: "Component with multiple data fields", + input: &twoDataFields{}, + expectedKinds: []fieldKind{fieldKindData, fieldKindData}, + expectedNames: []string{"DataField", "AnotherDataField"}, + expectedTypes: []string{"*persistence.WorkflowExecutionState", "*persistence.WorkflowExecutionState"}, + expectedErrors: []string{"", "*chasm.twoDataFields.AnotherDataField: only one data field DataField (implements proto.Message) allowed in component"}, + }, + { + name: "Component with UnimplementedComponent only", + input: &unimplementedComponentOnly{}, + expectedKinds: []fieldKind{fieldKindUnspecified}, + expectedNames: []string{""}, + expectedTypes: []string{""}, + expectedErrors: []string{"*chasm.unimplementedComponentOnly: no data field (implements proto.Message) found"}, + }, + } + + for _, tt := range tests { + s.Run(tt.name, func() { + valueV := reflect.ValueOf(tt.input) + + var actualKinds []fieldKind + var actualNames []string + var actualTypes []string + var actualErrors []string + + for field := range fieldsOf(valueV) { + actualKinds = append(actualKinds, field.kind) + actualNames = append(actualNames, field.name) + if field.typ != nil { + actualTypes = append(actualTypes, field.typ.String()) + } else { + actualTypes = append(actualTypes, "") + } + if field.err != nil { + actualErrors = append(actualErrors, field.err.Error()) + } else { + actualErrors = append(actualErrors, "") + } + } + + s.Equal(tt.expectedKinds, actualKinds) + s.Equal(tt.expectedNames, actualNames) + s.Equal(tt.expectedTypes, actualTypes) + s.Equal(tt.expectedErrors, actualErrors) + }) + } +} + +func (s *fieldsIteratorSuite) TestUnmanagedFieldsOf() { + type unmanagedFields struct { + UnimplementedComponent + + DataField *protoMessageType + SomeField Field[string] + unmanaged struct{} + anotherPtr *struct{} + } + + var result []string + for r := range unmanagedFieldsOf(reflect.TypeFor[unmanagedFields]()) { + result = append(result, r.name) + } + s.Equal(2, len(result)) + s.ElementsMatch([]string{"unmanaged", "anotherPtr"}, result) +} + +func (s *fieldsIteratorSuite) TestHasVisibilityField() { + type componentWithVisibility struct { + UnimplementedComponent + DataField *protoMessageType + Visibility Field[*Visibility] + } + + type componentWithoutVisibility struct { + UnimplementedComponent + DataField *protoMessageType + SomeField Field[string] + } + + s.Run("component with Visibility field", func() { + s.True(hasVisibilityField(reflect.TypeFor[componentWithVisibility]())) + s.True(hasVisibilityField(reflect.TypeFor[*componentWithVisibility]())) + }) + + s.Run("component without Visibility field", func() { + s.False(hasVisibilityField(reflect.TypeFor[componentWithoutVisibility]())) + s.False(hasVisibilityField(reflect.TypeFor[*componentWithoutVisibility]())) + }) + + s.Run("non-struct type", func() { + s.False(hasVisibilityField(reflect.TypeFor[string]())) + s.False(hasVisibilityField(reflect.TypeFor[int]())) + }) +} diff --git a/chasm/fx.go b/chasm/fx.go new file mode 100644 index 00000000000..60a6d5338e1 --- /dev/null +++ b/chasm/fx.go @@ -0,0 +1,11 @@ +package chasm + +import "go.uber.org/fx" + +var Module = fx.Module( + "chasm", + fx.Provide(NewRegistry), + fx.Invoke(func(registry *Registry) error { + return registry.Register(&CoreLibrary{}) + }), +) diff --git a/chasm/interceptor_test.go b/chasm/interceptor_test.go new file mode 100644 index 00000000000..4b7f235d7c6 --- /dev/null +++ b/chasm/interceptor_test.go @@ -0,0 +1,113 @@ +package chasm_test + +import ( + "context" + "net" + "testing" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/tests/gen/testspb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +type ServiceHandler struct { + testspb.UnimplementedTestServiceServer +} + +func (h ServiceHandler) Test( + ctx context.Context, + req *testspb.TestRequest, +) (resp *testspb.TestResponse, err error) { + hasEngineCtx := chasm.EngineFromContext(ctx) != nil + + return &testspb.TestResponse{ + RequestId: req.RequestId, + HasEngineCtx: hasEngineCtx, + }, nil +} + +type ServiceLibrary struct { + chasm.UnimplementedLibrary +} + +func NewServiceLibrary() *ServiceLibrary { + return &ServiceLibrary{} +} + +func (l *ServiceLibrary) RegisterServices(server *grpc.Server) { + testspb.RegisterTestServiceServer(server, ServiceHandler{}) +} + +func TestChasmEngineInterceptor_ShouldRespond(t *testing.T) { + ctrl := gomock.NewController(t) + + mockEngine := chasm.NewMockEngine(ctrl) + engineInterceptor := chasm.ChasmEngineInterceptorProvider( + mockEngine, + log.NewNoopLogger(), + metrics.NoopMetricsHandler, + ) + + server, address := startTestServer(t, grpc.UnaryInterceptor(engineInterceptor.Intercept)) + defer server.Stop() + + response := testRoundTrip(t, address) + require.True(t, response.HasEngineCtx) +} + +func TestChasmVisibilityInterceptor_ShouldRespond(t *testing.T) { + ctrl := gomock.NewController(t) + + mockVisibilityManager := chasm.NewMockVisibilityManager(ctrl) + visibilityInterceptor := chasm.ChasmVisibilityInterceptorProvider(mockVisibilityManager) + + server, address := startTestServer(t, grpc.UnaryInterceptor(visibilityInterceptor.Intercept)) + defer server.Stop() + testRoundTrip(t, address) +} + +func testRoundTrip(t *testing.T, address string) *testspb.TestResponse { + conn, err := grpc.NewClient(address, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatalf("failed to connect: %v", err) + } + + defer func() { + err := conn.Close() + require.NoError(t, err) + }() + + client := testspb.NewTestServiceClient(conn) + + var response *testspb.TestResponse + response, err = client.Test(context.Background(), &testspb.TestRequest{ + RequestId: "test-request-id", + }) + require.NoError(t, err) + require.Equal(t, "test-request-id", response.GetRequestId()) + + return response +} + +func startTestServer(t *testing.T, opt ...grpc.ServerOption) (*grpc.Server, string) { + server := grpc.NewServer(opt...) + listener, err := net.Listen("tcp", "localhost:0") // :0 picks a random available port + if err != nil { + panic(err) + } + + lib := NewServiceLibrary() + lib.RegisterServices(server) + + go func() { + err := server.Serve(listener) + require.NoError(t, err) + }() + + return server, listener.Addr().String() +} diff --git a/chasm/interceptors.go b/chasm/interceptors.go new file mode 100644 index 00000000000..484eb5f0c48 --- /dev/null +++ b/chasm/interceptors.go @@ -0,0 +1,67 @@ +package chasm + +import ( + "context" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "google.golang.org/grpc" +) + +// ChasmEngineInterceptor Interceptor that intercepts RPC requests, +// detects CHASM-specific calls and does additional boilerplate processing before +// handing off. Visibility is injected separately with +// ChasmVisibilityInterceptor. +type ChasmEngineInterceptor struct { + engine Engine + logger log.SnTaggedLogger + metricsHandler metrics.Handler +} + +func (i *ChasmEngineInterceptor) Intercept( + ctx context.Context, + req any, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, +) (resp any, retError error) { + // Capture panics for any handler method, not just CHASM-specific ones. This could have gone into a separate + // interceptor, but having it here avoids the overhead of adding another layer to the interceptor chain. + defer metrics.CapturePanic(i.logger, i.metricsHandler, &retError) + + ctx = NewEngineContext(ctx, i.engine) + return handler(ctx, req) +} + +func ChasmEngineInterceptorProvider( + engine Engine, + logger log.Logger, + metricsHandler metrics.Handler, +) *ChasmEngineInterceptor { + return &ChasmEngineInterceptor{ + engine: engine, + logger: logger, + metricsHandler: metricsHandler, + } +} + +// ChasmVisibilityInterceptor intercepts RPC requests and adds the CHASM +// VisibilityManager to their context. +type ChasmVisibilityInterceptor struct { + visibilityMgr VisibilityManager +} + +func (i *ChasmVisibilityInterceptor) Intercept( + ctx context.Context, + req any, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, +) (resp any, retError error) { + ctx = NewVisibilityManagerContext(ctx, i.visibilityMgr) + return handler(ctx, req) +} + +func ChasmVisibilityInterceptorProvider(visibilityMgr VisibilityManager) *ChasmVisibilityInterceptor { + return &ChasmVisibilityInterceptor{ + visibilityMgr: visibilityMgr, + } +} diff --git a/chasm/lib/activity/activity.go b/chasm/lib/activity/activity.go new file mode 100644 index 00000000000..df45a9ac490 --- /dev/null +++ b/chasm/lib/activity/activity.go @@ -0,0 +1,1151 @@ +package activity + +import ( + "errors" + "fmt" + "slices" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + apiactivitypb "go.temporal.io/api/activity/v1" //nolint:importas + callbackpb "go.temporal.io/api/callback/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/chasm/lib/callback" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/contextutil" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/payload" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.temporal.io/server/common/tqid" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // WorkflowTypeTag is a required workflow tag for standalone activities to ensure consistent + // metric labeling between workflows and activities. + WorkflowTypeTag = "__temporal_standalone_activity__" + + // ByIDTokenAttempt is used in synthesized tokens for by-ID API calls where the caller does not specify the attempt. + // The validator skips the attempt check when it sees this value. + // 0 is safe because polled tokens always carry Count >= 1 (TransitionScheduled increments from 0). + ByIDTokenAttempt int32 = 0 +) + +var ( + TypeSearchAttribute = chasm.NewSearchAttributeKeyword("ActivityType", chasm.SearchAttributeFieldKeyword01) + StatusSearchAttribute = chasm.NewSearchAttributeKeyword("ExecutionStatus", chasm.SearchAttributeFieldLowCardinalityKeyword01) +) + +var _ chasm.VisibilitySearchAttributesProvider = (*Activity)(nil) +var _ callback.CompletionSource = (*Activity)(nil) + +type ActivityStore interface { + // RecordCompleted applies the provided function to record activity completion + RecordCompleted(ctx chasm.MutableContext, applyFn func(ctx chasm.MutableContext) error) error +} + +// Activity component represents an activity execution persistence object and can be either standalone activity or one +// embedded within a workflow. +type Activity struct { + chasm.UnimplementedComponent + + *activitypb.ActivityState + + Visibility chasm.Field[*chasm.Visibility] + LastAttempt chasm.Field[*activitypb.ActivityAttemptState] + LastHeartbeat chasm.Field[*activitypb.ActivityHeartbeatState] + // Standalone only + RequestData chasm.Field[*activitypb.ActivityRequestData] + Outcome chasm.Field[*activitypb.ActivityOutcome] + // Pointer to an implementation of the "store". For a workflow activity this would be a parent + // pointer back to the workflow. For a standalone activity this is nil (Activity itself + // implements the ActivityStore interface). + // TODO(saa-preview): figure out better naming. + Store chasm.ParentPtr[ActivityStore] + + // Callbacks holds completion callbacks to be invoked when this standalone activity reaches a terminal state. Nil + // for workflow-embedded activities as the workflow handles its own callbacks. + Callbacks chasm.Map[string, *callback.Callback] +} + +// WithToken wraps a request with its deserialized task token. +type WithToken[R any] struct { + Token *tokenspb.Task + Request R +} + +// RespondCompletedEvent wraps the RespondActivityTaskCompletedRequest with context-specific data. +type RespondCompletedEvent struct { + Request *historyservice.RespondActivityTaskCompletedRequest + Token *tokenspb.Task +} + +// RespondFailedEvent wraps the RespondActivityTaskFailedRequest with context-specific data. +type RespondFailedEvent struct { + Request *historyservice.RespondActivityTaskFailedRequest + Token *tokenspb.Task +} + +// RespondCancelledEvent wraps the RespondActivityTaskCanceledRequest with context-specific data. +type RespondCancelledEvent struct { + Request *historyservice.RespondActivityTaskCanceledRequest + Token *tokenspb.Task +} + +// LifecycleState implements the chasm.Component interface. +func (a *Activity) LifecycleState(_ chasm.Context) chasm.LifecycleState { + switch a.Status { + case activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED: + return chasm.LifecycleStateCompleted + case activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, + activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, + activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED: + return chasm.LifecycleStateFailed + default: + return chasm.LifecycleStateRunning + } +} + +func (a *Activity) ContextMetadata(_ chasm.Context) map[string]string { + md := make(map[string]string, 2) + if actType := a.GetActivityType().GetName(); actType != "" { + md[contextutil.MetadataKeyStandaloneActivityType] = actType + } + if tq := a.GetTaskQueue().GetName(); tq != "" { + md[contextutil.MetadataKeyStandaloneActivityTaskQueue] = tq + } + if len(md) == 0 { + return nil + } + return md +} + +// NewStandaloneActivity creates a new activity component and adds associated tasks to start execution. +func NewStandaloneActivity( + ctx chasm.MutableContext, + request *workflowservice.StartActivityExecutionRequest, +) (*Activity, error) { + visibility := chasm.NewVisibilityWithData( + ctx, + request.GetSearchAttributes().GetIndexedFields(), + nil, + ) + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: request.ActivityType, + TaskQueue: request.GetTaskQueue(), + ScheduleToCloseTimeout: request.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: request.GetScheduleToStartTimeout(), + StartToCloseTimeout: request.GetStartToCloseTimeout(), + HeartbeatTimeout: request.GetHeartbeatTimeout(), + RetryPolicy: request.GetRetryPolicy(), + Priority: request.Priority, + StartDelay: request.GetStartDelay(), + }, + LastAttempt: chasm.NewDataField(ctx, &activitypb.ActivityAttemptState{}), + RequestData: chasm.NewDataField(ctx, &activitypb.ActivityRequestData{ + Input: request.Input, + Header: request.Header, + UserMetadata: request.UserMetadata, + }), + Outcome: chasm.NewDataField(ctx, &activitypb.ActivityOutcome{}), + Visibility: chasm.NewComponentField(ctx, visibility), + } + + activity.ScheduleTime = timestamppb.New(ctx.Now(activity)) + + return activity, nil +} + +func NewEmbeddedActivity( + ctx chasm.MutableContext, + state *activitypb.ActivityState, + parent ActivityStore, +) { +} + +func (a *Activity) createAddActivityTaskRequest(ctx chasm.Context, namespaceID string) (*matchingservice.AddActivityTaskRequest, error) { + // Get latest component ref and unmarshal into proto ref + componentRef, err := ctx.Ref(a) + if err != nil { + return nil, err + } + + // Note: No need to set the vector clock here, as the components track version conflicts for read/write + // TODO: Need to fill in VersionDirective once we decide how to handle versioning for standalone activities + return &matchingservice.AddActivityTaskRequest{ + NamespaceId: namespaceID, + ScheduleToStartTimeout: a.ScheduleToStartTimeout, + TaskQueue: a.GetTaskQueue(), + Priority: a.GetPriority(), + ComponentRef: componentRef, + Stamp: a.LastAttempt.Get(ctx).GetStamp(), + }, nil +} + +// HandleStarted updates the activity on recording activity task started and populates the response. +func (a *Activity) HandleStarted(ctx chasm.MutableContext, request *historyservice.RecordActivityTaskStartedRequest) ( + *historyservice.RecordActivityTaskStartedResponse, error, +) { + lastAttempt := a.LastAttempt.Get(ctx) + // If already started, return existing response if request ID matches to make retry idempotent, else error. + if a.StateMachineState() == activitypb.ACTIVITY_EXECUTION_STATUS_STARTED && request.GetRequestId() == lastAttempt.GetStartRequestId() { + return a.GenerateRecordActivityTaskStartedResponse(ctx, request.GetPollRequest().GetNamespace()) + } + if lastAttempt.GetStamp() != request.GetStamp() { + return nil, serviceerrors.NewObsoleteMatchingTask("activity attempt stamp mismatch") + } + if err := TransitionStarted.Apply(a, ctx, request); err != nil { + if errors.Is(err, chasm.ErrInvalidTransition) { + return nil, serviceerrors.NewObsoleteMatchingTask(err.Error()) + } + return nil, err + } + return a.GenerateRecordActivityTaskStartedResponse(ctx, request.GetPollRequest().GetNamespace()) +} + +// GenerateRecordActivityTaskStartedResponse generates the response for HandleStarted. +func (a *Activity) GenerateRecordActivityTaskStartedResponse( + ctx chasm.Context, + namespace string, +) (*historyservice.RecordActivityTaskStartedResponse, error) { + key := ctx.ExecutionKey() + lastHeartbeat, _ := a.LastHeartbeat.TryGet(ctx) + requestData := a.RequestData.Get(ctx) + attempt := a.LastAttempt.Get(ctx) + + return &historyservice.RecordActivityTaskStartedResponse{ + StartedTime: attempt.GetStartedTime(), + Attempt: attempt.GetCount(), + Priority: a.GetPriority(), + RetryPolicy: a.GetRetryPolicy(), + ActivityRunId: key.RunID, + WorkflowNamespace: namespace, + HeartbeatDetails: lastHeartbeat.GetDetails(), + CurrentAttemptScheduledTime: a.attemptScheduleTime(attempt), + ScheduledEvent: &historypb.HistoryEvent{ + EventType: enumspb.EVENT_TYPE_ACTIVITY_TASK_SCHEDULED, + EventTime: a.GetScheduleTime(), + Attributes: &historypb.HistoryEvent_ActivityTaskScheduledEventAttributes{ + ActivityTaskScheduledEventAttributes: &historypb.ActivityTaskScheduledEventAttributes{ + ActivityId: key.BusinessID, + ActivityType: a.GetActivityType(), + Input: requestData.GetInput(), + Header: requestData.GetHeader(), + TaskQueue: a.GetTaskQueue(), + ScheduleToCloseTimeout: a.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: a.GetScheduleToStartTimeout(), + StartToCloseTimeout: a.GetStartToCloseTimeout(), + HeartbeatTimeout: a.GetHeartbeatTimeout(), + }, + }, + }, + }, nil +} + +// attemptScheduleTime returns when the given attempt was scheduled to run: +// the activity's schedule time plus start delay for the first attempt, or +// calculated from attemptScheduleTimeForRetry on retries. +func (a *Activity) attemptScheduleTime(attempt *activitypb.ActivityAttemptState) *timestamppb.Timestamp { + if attempt.GetCount() == 1 { + return timestamppb.New(a.firstDispatchTime()) + } + return attemptScheduleTimeForRetry(attempt) +} + +// attemptScheduleTimeForRetry computes the time a retried attempt is scheduled to start, +// as complete_time + retry_interval. Returns nil if either field is missing or zero. +func attemptScheduleTimeForRetry(attempt *activitypb.ActivityAttemptState) *timestamppb.Timestamp { + retryInterval := attempt.GetCurrentRetryInterval() + completeTime := attempt.GetCompleteTime() + if retryInterval != nil && retryInterval.AsDuration() > 0 && completeTime != nil { + return timestamppb.New(completeTime.AsTime().Add(retryInterval.AsDuration())) + } + return nil +} + +// RecordCompleted applies the provided function to record activity completion. +// For standalone activities, it also triggers any registered completion callbacks. +func (a *Activity) RecordCompleted(ctx chasm.MutableContext, applyFn func(ctx chasm.MutableContext) error) error { + if err := applyFn(ctx); err != nil { + return err + } + return callback.ScheduleStandbyCallbacks(ctx, a.Callbacks) +} + +func (a *Activity) addCompletionCallbacks( + ctx chasm.MutableContext, + requestID string, + completionCallbacks []*commonpb.Callback, + maxCallbacks int, +) error { + if len(completionCallbacks) == 0 { + return nil + } + if a.LifecycleState(ctx).IsClosed() { + return serviceerror.NewFailedPrecondition("cannot attach callbacks to a closed activity") + } + + currentCount := len(a.Callbacks) + if len(completionCallbacks)+currentCount > maxCallbacks { + return serviceerror.NewFailedPreconditionf( + "cannot attach more than %d callbacks to an activity (%d callbacks already attached)", + maxCallbacks, + currentCount, + ) + } + + if a.Callbacks == nil { + a.Callbacks = make(chasm.Map[string, *callback.Callback], len(completionCallbacks)) + } + + registrationTime := timestamppb.New(ctx.Now(a)) + + for idx, cb := range completionCallbacks { + chasmCB := &callbackspb.Callback{ + Links: cb.GetLinks(), + } + switch variant := cb.Variant.(type) { + case *commonpb.Callback_Nexus_: + chasmCB.Variant = &callbackspb.Callback_Nexus_{ + Nexus: &callbackspb.Callback_Nexus{ + Url: variant.Nexus.GetUrl(), + Header: variant.Nexus.GetHeader(), + }, + } + default: + return serviceerror.NewInvalidArgumentf("unsupported callback variant: %T", variant) + } + + // requestID (unique per API call) + idx (position within the request) ensures unique,idempotent callback IDs. + id := fmt.Sprintf("%s-%d", requestID, idx) + callbackObj := callback.NewCallback(requestID, registrationTime, &callbackspb.CallbackState{}, chasmCB) + a.Callbacks[id] = chasm.NewComponentField(ctx, callbackObj) + } + return nil +} + +// GetNexusCompletion returns the activity's completion data in the format required by the Nexus callback invocation. +// Implements callback.CompletionSource. +func (a *Activity) GetNexusCompletion(ctx chasm.Context, _ string) (nexusrpc.CompleteOperationOptions, error) { + if !a.LifecycleState(ctx).IsClosed() { + return nexusrpc.CompleteOperationOptions{}, serviceerror.NewInternal("activity has not completed yet") + } + + opts := nexusrpc.CompleteOperationOptions{ + StartTime: a.GetScheduleTime().AsTime(), + CloseTime: ctx.ExecutionInfo().CloseTime, + } + + outcome := a.Outcome.Get(ctx) + if successful := outcome.GetSuccessful(); successful != nil { + // Successful completion: return the first output payload as the result as Nexus supports only a single payload + var p *commonpb.Payload + if payloads := successful.GetOutput().GetPayloads(); len(payloads) > 0 { + p = payloads[0] + } + opts.Result = p + return opts, nil + } + + failure := a.terminalFailure(ctx) + if failure != nil { + state := nexus.OperationStateFailed + message := "operation failed" + if a.Status == activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED { + state = nexus.OperationStateCanceled + message = "operation canceled" + } + + nf, err := commonnexus.TemporalFailureToNexusFailure(failure) + if err != nil { + return nexusrpc.CompleteOperationOptions{}, serviceerror.NewInternalf("failed to convert failure: %v", err) + } + + opErr := &nexus.OperationError{ + State: state, + Message: message, + Cause: &nexus.FailureError{Failure: nf}, + } + if err := nexusrpc.MarkAsWrapperError(nexusrpc.DefaultFailureConverter(), opErr); err != nil { + return nexusrpc.CompleteOperationOptions{}, err + } + opts.Error = opErr + return opts, nil + } + + return nexusrpc.CompleteOperationOptions{}, serviceerror.NewInternalf("activity in status %v has no outcome", a.Status) +} + +// HandleCompleted updates the activity on activity completion. +func (a *Activity) HandleCompleted( + ctx chasm.MutableContext, + event RespondCompletedEvent, +) (*historyservice.RespondActivityTaskCompletedResponse, error) { + if err := a.validateActivityTaskToken(ctx, event.Token, event.Request.GetNamespaceId()); err != nil { + return nil, err + } + + metricsHandler, err := a.enrichMetricsHandler(ctx, metrics.HistoryRespondActivityTaskCompletedScope) + if err != nil { + return nil, err + } + + if err := TransitionCompleted.Apply(a, ctx, completeEvent{ + req: event.Request, + metricsHandler: metricsHandler, + }); err != nil { + return nil, err + } + + return &historyservice.RespondActivityTaskCompletedResponse{}, nil +} + +// HandleFailed updates the activity on activity failure. if the activity is retryable, it will be rescheduled +// for retry instead. +func (a *Activity) HandleFailed( + ctx chasm.MutableContext, + event RespondFailedEvent, +) (*historyservice.RespondActivityTaskFailedResponse, error) { + if err := a.validateActivityTaskToken(ctx, event.Token, event.Request.GetNamespaceId()); err != nil { + return nil, err + } + + metricsHandler, err := a.enrichMetricsHandler(ctx, metrics.HistoryRespondActivityTaskFailedScope) + if err != nil { + return nil, err + } + failure := event.Request.GetFailedRequest().GetFailure() + + appFailure := failure.GetApplicationFailureInfo() + isRetryable := appFailure != nil && + !appFailure.GetNonRetryable() && + !slices.Contains(a.GetRetryPolicy().GetNonRetryableErrorTypes(), appFailure.GetType()) + + if isRetryable { + rescheduled, err := a.tryReschedule(ctx, appFailure.GetNextRetryDelay().AsDuration(), failure) + if err != nil { + return nil, err + } + if rescheduled { + a.emitOnAttemptFailedMetrics(ctx, metricsHandler) + + return &historyservice.RespondActivityTaskFailedResponse{}, nil + } + } + + if err := TransitionFailed.Apply(a, ctx, failedEvent{ + req: event.Request, + metricsHandler: metricsHandler, + }); err != nil { + return nil, err + } + + return &historyservice.RespondActivityTaskFailedResponse{}, nil +} + +// HandleCanceled updates the activity on activity canceled. +func (a *Activity) HandleCanceled( + ctx chasm.MutableContext, + event RespondCancelledEvent, +) (*historyservice.RespondActivityTaskCanceledResponse, error) { + if err := a.validateActivityTaskToken(ctx, event.Token, event.Request.GetNamespaceId()); err != nil { + return nil, err + } + + metricsHandler, err := a.enrichMetricsHandler(ctx, metrics.HistoryRespondActivityTaskCanceledScope) + if err != nil { + return nil, err + } + + if err := TransitionCanceled.Apply(a, ctx, cancelEvent{ + details: event.Request.GetCancelRequest().GetDetails(), + handler: metricsHandler, + fromStatus: a.GetStatus(), + }); err != nil { + return nil, err + } + + return &historyservice.RespondActivityTaskCanceledResponse{}, nil +} + +// Terminate implements the chasm.RootComponent interface. +func (a *Activity) Terminate( + ctx chasm.MutableContext, + req chasm.TerminateComponentRequest, +) (chasm.TerminateComponentResponse, error) { + // If already in terminated state, fail if request ID is different, else no-op + if a.GetStatus() == activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED { + newReqID := req.RequestID + existingReqID := a.GetTerminateState().GetRequestId() + + if existingReqID != newReqID { + return chasm.TerminateComponentResponse{}, serviceerror.NewFailedPreconditionf( + "already terminated with request ID %s", existingReqID) + } + + return chasm.TerminateComponentResponse{}, nil + } + + metricsHandler, err := a.enrichMetricsHandler(ctx, metrics.ActivityTerminatedScope) + if err != nil { + return chasm.TerminateComponentResponse{}, err + } + return chasm.TerminateComponentResponse{}, TransitionTerminated.Apply(a, ctx, terminateEvent{ + request: req, + metricsHandler: metricsHandler, + fromStatus: a.GetStatus(), + }) +} + +// getOrCreateLastHeartbeat retrieves the last heartbeat state, initializing it if not present. The heartbeat is lazily created +// to avoid unnecessary writes when heartbeats are not used. +func (a *Activity) getOrCreateLastHeartbeat(ctx chasm.MutableContext) *activitypb.ActivityHeartbeatState { + heartbeat, ok := a.LastHeartbeat.TryGet(ctx) + if !ok { + heartbeat = &activitypb.ActivityHeartbeatState{} + a.LastHeartbeat = chasm.NewDataField(ctx, heartbeat) + } + return heartbeat +} + +func (a *Activity) handleCancellationRequested(ctx chasm.MutableContext, request *activitypb.RequestCancelActivityExecutionRequest) ( + *activitypb.RequestCancelActivityExecutionResponse, error, +) { + req := request.GetFrontendRequest() + newReqID := req.GetRequestId() + existingReqID := a.GetCancelState().GetRequestId() + + // If already in cancel requested state, fail if request ID is different, else no-op + if a.GetStatus() == activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED { + if existingReqID != newReqID { + return nil, serviceerror.NewFailedPrecondition( + fmt.Sprintf("cancellation already requested with request ID %s", existingReqID)) + } + + return &activitypb.RequestCancelActivityExecutionResponse{}, nil + } + + // If in scheduled state, cancel immediately right after marking cancel requested + isCancelImmediately := a.GetStatus() == activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED + + if err := TransitionCancelRequested.Apply(a, ctx, req); err != nil { + return nil, err + } + + if isCancelImmediately { + details := &commonpb.Payloads{ + Payloads: []*commonpb.Payload{ + payload.EncodeString(req.GetReason()), + }, + } + + metricsHandler, err := a.enrichMetricsHandler(ctx, metrics.HistoryRespondActivityTaskCanceledScope) + if err != nil { + return nil, err + } + err = TransitionCanceled.Apply(a, ctx, cancelEvent{ + details: details, + handler: metricsHandler, + fromStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, // if we're here the original status was scheduled + }) + if err != nil { + return nil, err + } + } + + return &activitypb.RequestCancelActivityExecutionResponse{}, nil +} + +// recordScheduleToStartOrCloseTimeoutFailure records schedule-to-start or schedule-to-close timeouts. Such timeouts are not retried so we +// set the outcome failure directly and leave the attempt failure as is. +func (a *Activity) recordScheduleToStartOrCloseTimeoutFailure(ctx chasm.MutableContext, timeoutType enumspb.TimeoutType) error { + outcome := a.Outcome.Get(ctx) + + failure := &failurepb.Failure{ + Message: fmt.Sprintf(common.FailureReasonActivityTimeout, timeoutType.String()), + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: timeoutType, + }, + }, + } + + outcome.Variant = &activitypb.ActivityOutcome_Failed_{ + Failed: &activitypb.ActivityOutcome_Failed{ + Failure: failure, + }, + } + + return nil +} + +// recordFailedAttempt records any failures resulting from a tried attempt, including worker application failures and +// start-to-close timeouts. Since the calls come from retried attempts we update the attempt failure info but leave +// the outcome failure empty to avoid duplication. +func (a *Activity) recordFailedAttempt( + ctx chasm.MutableContext, + retryInterval time.Duration, + failure *failurepb.Failure, + currentTime time.Time, + noRetriesLeft bool, +) error { + attempt := a.LastAttempt.Get(ctx) + + attempt.LastFailureDetails = &activitypb.ActivityAttemptState_LastFailureDetails{ + Failure: failure, + Time: timestamppb.New(currentTime), + } + attempt.CompleteTime = timestamppb.New(currentTime) + + if noRetriesLeft { + attempt.CurrentRetryInterval = nil + } else { + attempt.CurrentRetryInterval = durationpb.New(retryInterval) + } + return nil +} + +// tryReschedule attempts to reschedule the activity for retry. Returns true if rescheduled, false +// if retry is not possible. +func (a *Activity) tryReschedule( + ctx chasm.MutableContext, + overridingRetryInterval time.Duration, + failure *failurepb.Failure, +) (bool, error) { + shouldRetry, retryInterval := a.shouldRetry(ctx, overridingRetryInterval) + if !shouldRetry { + return false, nil + } + return true, TransitionRescheduled.Apply(a, ctx, rescheduleEvent{ + retryInterval: retryInterval, + failure: failure, + }) +} + +func (a *Activity) shouldRetry(ctx chasm.Context, overridingRetryInterval time.Duration) (bool, time.Duration) { + if !TransitionRescheduled.Possible(a) { + return false, 0 + } + attempt := a.LastAttempt.Get(ctx) + retryPolicy := a.RetryPolicy + + enoughAttempts := retryPolicy.GetMaximumAttempts() == 0 || attempt.GetCount() < retryPolicy.GetMaximumAttempts() + enoughTime, retryInterval := a.hasEnoughTimeForRetry(ctx, overridingRetryInterval) + return enoughAttempts && enoughTime, retryInterval +} + +// hasEnoughTimeForRetry checks if there is enough time left in the schedule-to-close timeout. If sufficient time +// remains, it will also return a valid retry interval. +func (a *Activity) hasEnoughTimeForRetry(ctx chasm.Context, overridingRetryInterval time.Duration) (bool, time.Duration) { + attempt := a.LastAttempt.Get(ctx) + + // Use overriding retry interval if provided, else calculate based on retry policy + retryInterval := overridingRetryInterval + if retryInterval <= 0 { + retryInterval = backoff.CalculateExponentialRetryInterval(a.RetryPolicy, attempt.Count) + } + + scheduleToClose := a.GetScheduleToCloseTimeout().AsDuration() + if scheduleToClose == 0 { + return true, retryInterval + } + + deadline := a.scheduleToCloseDeadline() + return ctx.Now(a).Add(retryInterval).Before(deadline), retryInterval +} + +func (a *Activity) firstDispatchTime() time.Time { + return a.ScheduleTime.AsTime().Add(a.GetStartDelay().AsDuration()) +} + +// scheduleToCloseDeadline returns the absolute time at which the ScheduleToClose timeout expires, +// accounting for start delay. Returns zero time if no ScheduleToClose timeout is set. +func (a *Activity) scheduleToCloseDeadline() time.Time { + timeout := a.GetScheduleToCloseTimeout().AsDuration() + if timeout == 0 { + return time.Time{} + } + return a.firstDispatchTime().Add(timeout) +} + +func createStartToCloseTimeoutFailure() *failurepb.Failure { + return &failurepb.Failure{ + Message: fmt.Sprintf(common.FailureReasonActivityTimeout, enumspb.TIMEOUT_TYPE_START_TO_CLOSE.String()), + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + }, + } +} + +func createHeartbeatTimeoutFailure() *failurepb.Failure { + return &failurepb.Failure{ + Message: fmt.Sprintf(common.FailureReasonActivityTimeout, enumspb.TIMEOUT_TYPE_HEARTBEAT.String()), + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + }, + }, + } +} + +// RecordHeartbeat records a heartbeat for the activity. +func (a *Activity) RecordHeartbeat( + ctx chasm.MutableContext, + input WithToken[*historyservice.RecordActivityTaskHeartbeatRequest], +) (*historyservice.RecordActivityTaskHeartbeatResponse, error) { + err := a.validateActivityTaskToken(ctx, input.Token, input.Request.GetNamespaceId()) + if err != nil { + return nil, err + } + prevHeartbeat, _ := a.LastHeartbeat.TryGet(ctx) + a.LastHeartbeat = chasm.NewDataField(ctx, &activitypb.ActivityHeartbeatState{ + RecordedTime: timestamppb.New(ctx.Now(a)), + Details: input.Request.GetHeartbeatRequest().GetDetails(), + TotalHeartbeatCount: prevHeartbeat.GetTotalHeartbeatCount() + 1, + }) + if heartbeatTimeout := a.GetHeartbeatTimeout().AsDuration(); heartbeatTimeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: ctx.Now(a).Add(heartbeatTimeout), + }, + &activitypb.HeartbeatTimeoutTask{ + Stamp: a.LastAttempt.Get(ctx).GetStamp(), + }, + ) + } + return &historyservice.RecordActivityTaskHeartbeatResponse{ + CancelRequested: a.Status == activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + // TODO(saa-preview): ActivityPaused, ActivityReset + }, nil +} + +// InternalStatusToAPIStatus converts internal activity execution status to API status. +func InternalStatusToAPIStatus(status activitypb.ActivityExecutionStatus) enumspb.ActivityExecutionStatus { + switch status { + case activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED: + return enumspb.ACTIVITY_EXECUTION_STATUS_RUNNING + case activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED: + return enumspb.ACTIVITY_EXECUTION_STATUS_COMPLETED + case activitypb.ACTIVITY_EXECUTION_STATUS_FAILED: + return enumspb.ACTIVITY_EXECUTION_STATUS_FAILED + case activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED: + return enumspb.ACTIVITY_EXECUTION_STATUS_CANCELED + case activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED: + return enumspb.ACTIVITY_EXECUTION_STATUS_TERMINATED + case activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT: + return enumspb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT + case activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED: + return enumspb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED + default: + panic(fmt.Sprintf("unknown activity execution status: %v", status)) //nolint:forbidigo + } +} + +func internalStatusToRunState(status activitypb.ActivityExecutionStatus) enumspb.PendingActivityState { + switch status { + case activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED: + return enumspb.PENDING_ACTIVITY_STATE_SCHEDULED + case activitypb.ACTIVITY_EXECUTION_STATUS_STARTED: + return enumspb.PENDING_ACTIVITY_STATE_STARTED + case activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED: + return enumspb.PENDING_ACTIVITY_STATE_CANCEL_REQUESTED + case activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED, + activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED, + activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, + activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, + activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED: + return enumspb.PENDING_ACTIVITY_STATE_UNSPECIFIED + default: + panic(fmt.Sprintf("unknown activity execution status: %v", status)) //nolint:forbidigo + } +} + +func (a *Activity) buildActivityExecutionInfo(ctx chasm.Context) *apiactivitypb.ActivityExecutionInfo { + // TODO(saa-preview): support pause states + status := InternalStatusToAPIStatus(a.GetStatus()) + runState := internalStatusToRunState(a.GetStatus()) + + requestData := a.RequestData.Get(ctx) + attempt := a.LastAttempt.Get(ctx) + heartbeat, _ := a.LastHeartbeat.TryGet(ctx) + key := ctx.ExecutionKey() + executionInfo := ctx.ExecutionInfo() + + var closeTime *timestamppb.Timestamp + var executionDuration *durationpb.Duration + if a.LifecycleState(ctx) != chasm.LifecycleStateRunning { + executionDuration = durationpb.New(executionInfo.CloseTime.Sub(a.GetScheduleTime().AsTime())) + closeTime = timestamppb.New(executionInfo.CloseTime) + } + + var expirationTime *timestamppb.Timestamp + if deadline := a.scheduleToCloseDeadline(); !deadline.IsZero() { + expirationTime = timestamppb.New(deadline) + } + + sa := &commonpb.SearchAttributes{ + IndexedFields: a.Visibility.Get(ctx).CustomSearchAttributes(ctx), + } + + info := &apiactivitypb.ActivityExecutionInfo{ + ActivityId: key.BusinessID, + ActivityType: a.GetActivityType(), + Attempt: attempt.GetCount(), + CanceledReason: a.CancelState.GetReason(), + CloseTime: closeTime, + CurrentRetryInterval: attempt.GetCurrentRetryInterval(), + ExecutionDuration: executionDuration, + ExpirationTime: expirationTime, + Header: requestData.GetHeader(), + HeartbeatDetails: heartbeat.GetDetails(), + HeartbeatTimeout: a.GetHeartbeatTimeout(), + TotalHeartbeatCount: heartbeat.GetTotalHeartbeatCount(), + LastAttemptCompleteTime: attempt.GetCompleteTime(), + LastFailure: attempt.GetLastFailureDetails().GetFailure(), + LastHeartbeatTime: heartbeat.GetRecordedTime(), + LastStartedTime: attempt.GetStartedTime(), + LastWorkerIdentity: attempt.GetLastWorkerIdentity(), + NextAttemptScheduleTime: attemptScheduleTimeForRetry(attempt), + Priority: a.GetPriority(), + RetryPolicy: a.GetRetryPolicy(), + RunId: key.RunID, + RunState: runState, + ScheduleTime: a.GetScheduleTime(), + ScheduleToCloseTimeout: a.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: a.GetScheduleToStartTimeout(), + StartToCloseTimeout: a.GetStartToCloseTimeout(), + StateSizeBytes: int64(executionInfo.ApproximateStateSize), + StateTransitionCount: executionInfo.StateTransitionCount, + SearchAttributes: sa, + Status: status, + TaskQueue: a.GetTaskQueue().GetName(), + UserMetadata: requestData.GetUserMetadata(), + } + + return info +} + +func (a *Activity) buildDescribeActivityExecutionResponse( + ctx chasm.Context, + req *activitypb.DescribeActivityExecutionRequest, +) (*activitypb.DescribeActivityExecutionResponse, error) { + request := req.GetFrontendRequest() + + token, err := ctx.Ref(a) + if err != nil { + return nil, err + } + + info := a.buildActivityExecutionInfo(ctx) + + var input *commonpb.Payloads + if request.GetIncludeInput() { + input = a.RequestData.Get(ctx).GetInput() + } + + callbackInfos, err := a.buildCallbackInfos(ctx) + if err != nil { + return nil, err + } + + response := &workflowservice.DescribeActivityExecutionResponse{ + Info: info, + RunId: ctx.ExecutionKey().RunID, + Input: input, + LongPollToken: token, + Callbacks: callbackInfos, + } + + if request.GetIncludeOutcome() { + response.Outcome = a.outcome(ctx) + } + + return &activitypb.DescribeActivityExecutionResponse{ + FrontendResponse: response, + }, nil +} + +func (a *Activity) buildCallbackInfos(ctx chasm.Context) ([]*apiactivitypb.CallbackInfo, error) { + if len(a.Callbacks) == 0 { + return nil, nil + } + + cbInfos := make([]*apiactivitypb.CallbackInfo, 0, len(a.Callbacks)) + for _, field := range a.Callbacks { + cb := field.Get(ctx) + + cbSpec, err := cb.ToAPICallback() + if err != nil { + return nil, err + } + + var state enumspb.CallbackState + switch cb.Status { + case callbackspb.CALLBACK_STATUS_UNSPECIFIED: + return nil, serviceerror.NewInternal("callback with UNSPECIFIED state") + case callbackspb.CALLBACK_STATUS_STANDBY: + state = enumspb.CALLBACK_STATE_STANDBY + case callbackspb.CALLBACK_STATUS_SCHEDULED: + state = enumspb.CALLBACK_STATE_SCHEDULED + case callbackspb.CALLBACK_STATUS_BACKING_OFF: + state = enumspb.CALLBACK_STATE_BACKING_OFF + case callbackspb.CALLBACK_STATUS_FAILED: + state = enumspb.CALLBACK_STATE_FAILED + case callbackspb.CALLBACK_STATUS_SUCCEEDED: + state = enumspb.CALLBACK_STATE_SUCCEEDED + default: + return nil, serviceerror.NewInternalf("unknown callback state: %v", cb.Status) + } + + cbInfos = append(cbInfos, &apiactivitypb.CallbackInfo{ + Trigger: &apiactivitypb.CallbackInfo_Trigger{ + Variant: &apiactivitypb.CallbackInfo_Trigger_ActivityClosed{}, + }, + Info: &callbackpb.CallbackInfo{ + Callback: cbSpec, + RegistrationTime: cb.RegistrationTime, + State: state, + Attempt: cb.Attempt, + LastAttemptCompleteTime: cb.LastAttemptCompleteTime, + LastAttemptFailure: cb.LastAttemptFailure, + NextAttemptScheduleTime: cb.NextAttemptScheduleTime, + }, + }) + } + return cbInfos, nil +} + +func (a *Activity) buildPollActivityExecutionResponse( + ctx chasm.Context, +) *activitypb.PollActivityExecutionResponse { + return &activitypb.PollActivityExecutionResponse{ + FrontendResponse: &workflowservice.PollActivityExecutionResponse{ + RunId: ctx.ExecutionKey().RunID, + Outcome: a.outcome(ctx), + }, + } +} + +// outcome retrieves the activity outcome (result or failure) if the activity has completed. +// Returns nil if the activity has not completed. +func (a *Activity) outcome(ctx chasm.Context) *apiactivitypb.ActivityExecutionOutcome { + if !a.LifecycleState(ctx).IsClosed() { + return nil + } + activityOutcome := a.Outcome.Get(ctx) + if successful := activityOutcome.GetSuccessful(); successful != nil { + return &apiactivitypb.ActivityExecutionOutcome{ + Value: &apiactivitypb.ActivityExecutionOutcome_Result{Result: successful.GetOutput()}, + } + } + if failure := a.terminalFailure(ctx); failure != nil { + return &apiactivitypb.ActivityExecutionOutcome{ + Value: &apiactivitypb.ActivityExecutionOutcome_Failure{Failure: failure}, + } + } + return nil +} + +// terminalFailure returns the failure for a closed activity. The failure may be stored in Outcome.Failed +// (terminated, canceled, timed out) or in LastAttempt.LastFailureDetails (failed after exhausting retries). +// Returns nil if no failure is found. +func (a *Activity) terminalFailure(ctx chasm.Context) *failurepb.Failure { + if f := a.Outcome.Get(ctx).GetFailed(); f != nil { + return f.GetFailure() + } + if details := a.LastAttempt.Get(ctx).GetLastFailureDetails(); details != nil { + return details.GetFailure() + } + return nil +} + +// StoreOrSelf returns the store for the activity. If the store is not set as a field (e.g. +// standalone activities), it returns the activity itself. +func (a *Activity) StoreOrSelf(ctx chasm.Context) ActivityStore { + store, ok := a.Store.TryGet(ctx) + if ok { + return store + } + return a +} + +// validateActivityTaskToken validates a task token against the current activity state. +func (a *Activity) validateActivityTaskToken( + ctx chasm.Context, + token *tokenspb.Task, + requestNamespaceID string, +) error { + if a.Status != activitypb.ACTIVITY_EXECUTION_STATUS_STARTED && + a.Status != activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED { + return serviceerror.NewNotFound("activity task not found") + } + if token.Attempt != ByIDTokenAttempt && token.Attempt != a.LastAttempt.Get(ctx).GetCount() { + return serviceerror.NewNotFound("activity task not found") + } + + ref, err := chasm.DeserializeComponentRef(token.GetComponentRef()) + if err != nil { + return serviceerror.NewInvalidArgument("malformed token") + } + + // Validate that the request namespace matches the token's namespace. + // This prevents cross-namespace token reuse attacks where an attacker could use a valid token from namespace B to + // complete an activity in namespace A. + if requestNamespaceID != ref.NamespaceID { + return serviceerror.NewInvalidArgument("token does not match namespace") + } + + return nil +} + +func (a *Activity) enrichMetricsHandler(ctx chasm.Context, operationTag string) (metrics.Handler, error) { + // activityContextFromChasm panics if the context value is missing; this is intentional and + // indicates a library registration bug rather than a runtime error. + actCtx := activityContextFromChasm(ctx) + namespaceName, err := actCtx.namespaceRegistry.GetNamespaceName(namespace.ID(ctx.ExecutionKey().NamespaceID)) + if err != nil { + return nil, err + } + breakdownMetricsByTaskQueue := actCtx.config.BreakdownMetricsByTaskQueue + taskQueueFamily := a.GetTaskQueue().GetName() + return metrics.GetPerTaskQueueFamilyScope( + ctx.MetricsHandler(), + namespaceName.String(), + tqid.UnsafeTaskQueueFamily(namespaceName.String(), taskQueueFamily), + breakdownMetricsByTaskQueue(namespaceName.String(), taskQueueFamily, enumspb.TASK_QUEUE_TYPE_ACTIVITY), + metrics.OperationTag(operationTag), + metrics.ActivityTypeTag(a.GetActivityType().GetName()), + metrics.VersioningBehaviorTag(enumspb.VERSIONING_BEHAVIOR_UNSPECIFIED), + metrics.WorkflowTypeTag(WorkflowTypeTag), + ), nil +} + +func (a *Activity) emitOnAttemptTimedOutMetrics(ctx chasm.Context, handler metrics.Handler, timeoutType enumspb.TimeoutType) { + attempt := a.LastAttempt.Get(ctx) + startedTime := attempt.GetStartedTime().AsTime() + + latency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(latency) + + timeoutTag := metrics.StringTag("timeout_type", timeoutType.String()) + metrics.ActivityTaskTimeout.With(handler).Record(1, timeoutTag) +} + +func (a *Activity) emitOnAttemptFailedMetrics(ctx chasm.Context, handler metrics.Handler) { + attempt := a.LastAttempt.Get(ctx) + startedTime := attempt.GetStartedTime().AsTime() + + latency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(latency) + + metrics.ActivityTaskFail.With(handler).Record(1) +} + +func (a *Activity) emitOnCompletedMetrics(ctx chasm.Context, handler metrics.Handler) { + attempt := a.LastAttempt.Get(ctx) + startedTime := attempt.GetStartedTime().AsTime() + + startToCloseLatency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(startToCloseLatency) + + scheduleToCloseLatency := time.Since(a.GetScheduleTime().AsTime()) + metrics.ActivityScheduleToCloseLatency.With(handler).Record(scheduleToCloseLatency) + + metrics.ActivitySuccess.With(handler).Record(1) +} + +func (a *Activity) emitOnFailedMetrics(ctx chasm.Context, handler metrics.Handler) { + attempt := a.LastAttempt.Get(ctx) + startedTime := attempt.GetStartedTime().AsTime() + + startToCloseLatency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(startToCloseLatency) + + scheduleToCloseLatency := time.Since(a.GetScheduleTime().AsTime()) + metrics.ActivityScheduleToCloseLatency.With(handler).Record(scheduleToCloseLatency) + + metrics.ActivityTaskFail.With(handler).Record(1) + metrics.ActivityFail.With(handler).Record(1) +} + +func (a *Activity) emitOnTerminatedMetrics( + handler metrics.Handler, +) { + // Terminated activities do not count as properly finished activities so we do not + // record any of the latency metrics. + metrics.ActivityTerminate.With(handler).Record(1) +} + +func (a *Activity) emitOnCanceledMetrics( + ctx chasm.Context, + handler metrics.Handler, + fromStatus activitypb.ActivityExecutionStatus, +) { + // Only record start-to-close latency if a current attempt was running. If it in scheduled status, it means the current attempt never started. + if fromStatus != activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED { + startedTime := a.LastAttempt.Get(ctx).GetStartedTime().AsTime() + startToCloseLatency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(startToCloseLatency) + } + + scheduleToCloseLatency := time.Since(a.GetScheduleTime().AsTime()) + metrics.ActivityScheduleToCloseLatency.With(handler).Record(scheduleToCloseLatency) + + metrics.ActivityCancel.With(handler).Record(1) +} + +func (a *Activity) emitOnTimedOutMetrics( + ctx chasm.Context, + handler metrics.Handler, + timeoutType enumspb.TimeoutType, + fromStatus activitypb.ActivityExecutionStatus, +) { + // Only record start-to-close latency if a current attempt was running. If it in scheduled status, it means the current attempt never started. + if fromStatus != activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED { + startedTime := a.LastAttempt.Get(ctx).GetStartedTime().AsTime() + startToCloseLatency := time.Since(startedTime) + metrics.ActivityStartToCloseLatency.With(handler).Record(startToCloseLatency) + } + + scheduleToCloseLatency := time.Since(a.GetScheduleTime().AsTime()) + metrics.ActivityScheduleToCloseLatency.With(handler).Record(scheduleToCloseLatency) + + timeoutTag := metrics.StringTag("timeout_type", timeoutType.String()) + metrics.ActivityTaskTimeout.With(handler).Record(1, timeoutTag) + metrics.ActivityTimeout.With(handler).Record(1, timeoutTag) +} + +// SearchAttributes implements chasm.VisibilitySearchAttributesProvider interface. +// Returns the current search attribute values for this activity execution. +func (a *Activity) SearchAttributes(_ chasm.Context) []chasm.SearchAttributeKeyValue { + return []chasm.SearchAttributeKeyValue{ + TypeSearchAttribute.Value(a.GetActivityType().GetName()), + StatusSearchAttribute.Value(InternalStatusToAPIStatus(a.GetStatus()).String()), + chasm.SearchAttributeTaskQueue.Value(a.GetTaskQueue().GetName()), + } +} diff --git a/chasm/lib/activity/activity_tasks.go b/chasm/lib/activity/activity_tasks.go new file mode 100644 index 00000000000..e22b2f586a6 --- /dev/null +++ b/chasm/lib/activity/activity_tasks.go @@ -0,0 +1,279 @@ +package activity + +import ( + "context" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/util" + "go.uber.org/fx" +) + +type activityDispatchTaskHandlerOptions struct { + fx.In + + MatchingClient resource.MatchingClient +} + +type activityDispatchTaskHandler struct { + chasm.SideEffectTaskHandlerBase[*activitypb.ActivityDispatchTask] + opts activityDispatchTaskHandlerOptions +} + +func newActivityDispatchTaskHandler(opts activityDispatchTaskHandlerOptions) *activityDispatchTaskHandler { + return &activityDispatchTaskHandler{ + opts: opts, + } +} + +func (h *activityDispatchTaskHandler) Validate( + ctx chasm.Context, + activity *Activity, + _ chasm.TaskAttributes, + task *activitypb.ActivityDispatchTask, +) (bool, error) { + // TODO(saa-preview): make sure we handle resets when we support them, as they will reset the attempt count + return (TransitionStarted.Possible(activity) && + task.Stamp == activity.LastAttempt.Get(ctx).GetStamp()), nil +} + +func (h *activityDispatchTaskHandler) Execute( + ctx context.Context, + activityRef chasm.ComponentRef, + _ chasm.TaskAttributes, + _ *activitypb.ActivityDispatchTask, +) error { + return h.pushToMatching(ctx, activityRef) +} + +// Discard spills the task to matching instead of silently discarding it on standby clusters when the activity +// dispatch task has been pending past the discard delay. +func (h *activityDispatchTaskHandler) Discard( + ctx context.Context, + activityRef chasm.ComponentRef, + _ chasm.TaskAttributes, + _ *activitypb.ActivityDispatchTask, +) error { + return h.pushToMatching(ctx, activityRef) +} + +func (h *activityDispatchTaskHandler) pushToMatching( + ctx context.Context, + activityRef chasm.ComponentRef, +) error { + request, err := chasm.ReadComponent( + ctx, + activityRef, + (*Activity).createAddActivityTaskRequest, + activityRef.NamespaceID, + ) + if err != nil { + return err + } + + _, err = h.opts.MatchingClient.AddActivityTask(ctx, request) + + return err +} + +type scheduleToStartTimeoutTaskHandler struct { + chasm.PureTaskHandlerBase +} + +func newScheduleToStartTimeoutTaskHandler() *scheduleToStartTimeoutTaskHandler { + return &scheduleToStartTimeoutTaskHandler{} +} + +func (h *scheduleToStartTimeoutTaskHandler) Validate( + ctx chasm.Context, + activity *Activity, + _ chasm.TaskAttributes, + task *activitypb.ScheduleToStartTimeoutTask, +) (bool, error) { + return (activity.Status == activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED && + task.Stamp == activity.LastAttempt.Get(ctx).GetStamp()), nil +} + +func (h *scheduleToStartTimeoutTaskHandler) Execute( + ctx chasm.MutableContext, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.ScheduleToStartTimeoutTask, +) error { + metricsHandler, err := activity.enrichMetricsHandler(ctx, metrics.TimerActiveTaskActivityTimeoutScope) + if err != nil { + return err + } + + event := timeoutEvent{ + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + metricsHandler: metricsHandler, + fromStatus: activity.GetStatus(), + } + + return TransitionTimedOut.Apply(activity, ctx, event) +} + +type scheduleToCloseTimeoutTaskHandler struct{ chasm.PureTaskHandlerBase } + +func newScheduleToCloseTimeoutTaskHandler() *scheduleToCloseTimeoutTaskHandler { + return &scheduleToCloseTimeoutTaskHandler{} +} + +func (h *scheduleToCloseTimeoutTaskHandler) Validate( + _ chasm.Context, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.ScheduleToCloseTimeoutTask, +) (bool, error) { + return TransitionTimedOut.Possible(activity), nil +} + +func (h *scheduleToCloseTimeoutTaskHandler) Execute( + ctx chasm.MutableContext, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.ScheduleToCloseTimeoutTask, +) error { + metricsHandler, err := activity.enrichMetricsHandler(ctx, metrics.TimerActiveTaskActivityTimeoutScope) + if err != nil { + return err + } + event := timeoutEvent{ + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + metricsHandler: metricsHandler, + fromStatus: activity.GetStatus(), + } + + return TransitionTimedOut.Apply(activity, ctx, event) +} + +type startToCloseTimeoutTaskHandler struct{ chasm.PureTaskHandlerBase } + +func newStartToCloseTimeoutTaskHandler() *startToCloseTimeoutTaskHandler { + return &startToCloseTimeoutTaskHandler{} +} + +func (h *startToCloseTimeoutTaskHandler) Validate( + ctx chasm.Context, + activity *Activity, + _ chasm.TaskAttributes, + task *activitypb.StartToCloseTimeoutTask, +) (bool, error) { + valid := ((activity.Status == activitypb.ACTIVITY_EXECUTION_STATUS_STARTED || + activity.Status == activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED) && + task.Stamp == activity.LastAttempt.Get(ctx).GetStamp()) + return valid, nil +} + +// Execute executes a StartToCloseTimeoutTask. It fails the attempt, leading to retry or activity +// failure. +func (h *startToCloseTimeoutTaskHandler) Execute( + ctx chasm.MutableContext, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.StartToCloseTimeoutTask, +) error { + rescheduled, err := activity.tryReschedule(ctx, 0, createStartToCloseTimeoutFailure()) + if err != nil { + return err + } + + metricsHandler, err := activity.enrichMetricsHandler(ctx, metrics.TimerActiveTaskActivityTimeoutScope) + if err != nil { + return err + } + + if rescheduled { + activity.emitOnAttemptTimedOutMetrics(ctx, metricsHandler, enumspb.TIMEOUT_TYPE_START_TO_CLOSE) + + return nil + } + + return TransitionTimedOut.Apply(activity, ctx, timeoutEvent{ + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + metricsHandler: metricsHandler, + fromStatus: activity.GetStatus(), + }) +} + +// HeartbeatTimeoutTask is a pure task that enforces heartbeat timeouts. +type heartbeatTimeoutTaskHandler struct{ chasm.PureTaskHandlerBase } + +func newHeartbeatTimeoutTaskHandler() *heartbeatTimeoutTaskHandler { + return &heartbeatTimeoutTaskHandler{} +} + +// Validate validates a HeartbeatTimeoutTask. +func (h *heartbeatTimeoutTaskHandler) Validate( + ctx chasm.Context, + activity *Activity, + taskAttrs chasm.TaskAttributes, + task *activitypb.HeartbeatTimeoutTask, +) (bool, error) { + // Let T = user-configured heartbeat timeout and let hb_i be the time of the ith user-submitted + // heartbeat request. (hb_0 = 0 since we always start a timer task when an attempt starts). + + // There are two concurrent sequences of events: + // 1. A worker is sending heartbeats at times hb_i. + // 2. This task is being executed at (shortly after) times hb_i + T. + + // On the i-th execution of this function, we look back into the past and determine whether the + // last heartbeat was received after hb_i. If so, we reject this timeout task. Otherwise, the + // Execute function runs and we fail the attempt. + if activity.Status != activitypb.ACTIVITY_EXECUTION_STATUS_STARTED && + activity.Status != activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED { + return false, nil + } + // Task attempt must still match current attempt. + attempt := activity.LastAttempt.Get(ctx) + if attempt.GetStamp() != task.Stamp { + return false, nil + } + + // Must not have been a heartbeat since this task was created + hbTimeout := activity.GetHeartbeatTimeout().AsDuration() // T + attemptStartTime := attempt.GetStartedTime().AsTime() + lastHb, _ := activity.LastHeartbeat.TryGet(ctx) // could be nil, or from a previous attempt + // No hbs in attempt so far is equivalent to hb having been sent at attempt start time. + lastHbTime := util.MaxTime(lastHb.GetRecordedTime().AsTime(), attemptStartTime) + thisTaskHbTime := taskAttrs.ScheduledTime.Add(-hbTimeout) // hb_i + if lastHbTime.After(thisTaskHbTime) { + // another heartbeat has invalidated this task's heartbeat + return false, nil + } + return true, nil +} + +// Execute executes a HeartbeatTimeoutTask. It fails the attempt, leading to retry or activity +// failure. +func (h *heartbeatTimeoutTaskHandler) Execute( + ctx chasm.MutableContext, + activity *Activity, + _ chasm.TaskAttributes, + _ *activitypb.HeartbeatTimeoutTask, +) error { + rescheduled, err := activity.tryReschedule(ctx, 0, createHeartbeatTimeoutFailure()) + if err != nil { + return err + } + + metricsHandler, err := activity.enrichMetricsHandler(ctx, metrics.TimerActiveTaskActivityTimeoutScope) + if err != nil { + return err + } + + if rescheduled { + activity.emitOnAttemptTimedOutMetrics(ctx, metricsHandler, enumspb.TIMEOUT_TYPE_HEARTBEAT) + return nil + } + + return TransitionTimedOut.Apply(activity, ctx, timeoutEvent{ + timeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + metricsHandler: metricsHandler, + fromStatus: activity.GetStatus(), + }) +} diff --git a/chasm/lib/activity/activity_test.go b/chasm/lib/activity/activity_test.go new file mode 100644 index 00000000000..b614c3eba7a --- /dev/null +++ b/chasm/lib/activity/activity_test.go @@ -0,0 +1,307 @@ +package activity + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/namespace" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestHandleStarted(t *testing.T) { + testTime := time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + testRequestID := "test-request-id" + testStamp := int32(1) + + testCases := []struct { + name string + activityStatus activitypb.ActivityExecutionStatus + attemptStamp int32 + requestStamp int32 + startRequestID string + requestID string + checkOutcome func(t *testing.T, response *historyservice.RecordActivityTaskStartedResponse, err error) + }{ + { + name: "successful transition from scheduled", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + attemptStamp: testStamp, + requestStamp: testStamp, + requestID: testRequestID, + checkOutcome: func(t *testing.T, response *historyservice.RecordActivityTaskStartedResponse, err error) { + require.Equal(t, int32(1), response.Attempt) + require.NoError(t, err) + }, + }, + { + name: "idempotent retry - same request ID", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + attemptStamp: testStamp, + requestStamp: testStamp, + startRequestID: testRequestID, + requestID: testRequestID, + checkOutcome: func(t *testing.T, response *historyservice.RecordActivityTaskStartedResponse, err error) { + require.Equal(t, int32(1), response.Attempt) + require.NoError(t, err) + }, + }, + { + name: "error - already started with different request ID", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + attemptStamp: testStamp, + requestStamp: testStamp, + startRequestID: "different-request-id", + requestID: testRequestID, + checkOutcome: func(t *testing.T, response *historyservice.RecordActivityTaskStartedResponse, err error) { + require.ErrorAs(t, err, new(*serviceerrors.ObsoleteMatchingTask)) + }, + }, + { + name: "error - stamp mismatch", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + attemptStamp: testStamp, + requestStamp: testStamp + 1, + requestID: testRequestID, + checkOutcome: func(t *testing.T, response *historyservice.RecordActivityTaskStartedResponse, err error) { + require.ErrorAs(t, err, new(*serviceerrors.ObsoleteMatchingTask)) + }, + }, + { + name: "error - invalid transition from completed", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED, + attemptStamp: testStamp, + requestStamp: testStamp, + requestID: testRequestID, + checkOutcome: func(t *testing.T, response *historyservice.RecordActivityTaskStartedResponse, err error) { + require.ErrorAs(t, err, new(*serviceerrors.ObsoleteMatchingTask)) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Setup mock context + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return testTime }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{ + BusinessID: "test-activity-id", + RunID: "test-run-id", + } + }, + }, + } + + // Setup activity state + attemptState := &activitypb.ActivityAttemptState{ + Count: 1, + Stamp: tc.attemptStamp, + StartRequestId: tc.startRequestID, + } + if tc.activityStatus == activitypb.ACTIVITY_EXECUTION_STATUS_STARTED { + attemptState.StartedTime = timestamppb.New(testTime.Add(-1 * time.Minute)) + } + + // Determine heartbeat timeout based on test case + heartbeatTimeout := 1 * time.Minute + if tc.name == "successful transition without heartbeat timeout" { + heartbeatTimeout = 0 + } + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + Status: tc.activityStatus, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Minute), + ScheduleToStartTimeout: durationpb.New(2 * time.Minute), + StartToCloseTimeout: durationpb.New(3 * time.Minute), + HeartbeatTimeout: durationpb.New(heartbeatTimeout), + ScheduleTime: timestamppb.New(testTime.Add(-30 * time.Second)), + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + RequestData: chasm.NewDataField(ctx, &activitypb.ActivityRequestData{ + Input: &commonpb.Payloads{ + Payloads: []*commonpb.Payload{{Data: []byte("test-input")}}, + }, + Header: &commonpb.Header{ + Fields: map[string]*commonpb.Payload{ + "test-header": {Data: []byte("test-value")}, + }, + }, + }), + Outcome: chasm.NewDataField(ctx, &activitypb.ActivityOutcome{}), + } + + // Create request + request := &historyservice.RecordActivityTaskStartedRequest{ + Stamp: tc.requestStamp, + RequestId: tc.requestID, + } + + // Execute HandleStarted + response, err := activity.HandleStarted(ctx, request) + + tc.checkOutcome(t, response, err) + }) + } +} + +func TestActivityTerminate(t *testing.T) { + testCases := []struct { + name string + activityStatus activitypb.ActivityExecutionStatus + expectErr string + }{ + { + name: "terminate scheduled activity", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + }, + { + name: "terminate started activity", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + }, + { + name: "terminate cancel-requested activity", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + { + name: "error on completed activity", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED, + expectErr: "invalid transition from Completed", + }, + { + name: "no-op on already terminated activity", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, + }, + { + name: "error on failed activity", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, + expectErr: "invalid transition from Failed", + }, + { + name: "error on timed out activity", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, + expectErr: "invalid transition from TimedOut", + }, + { + name: "error on canceled activity", + activityStatus: activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED, + expectErr: "invalid transition from Canceled", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + nsRegistry := namespace.NewMockRegistry(ctrl) + nsRegistry.EXPECT().GetNamespaceName(gomock.Any()).Return(namespace.Name("test-namespace"), nil).AnyTimes() + + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + GoCtx: context.WithValue(context.Background(), ctxKeyActivityContext, &activityContext{ + config: &Config{ + BreakdownMetricsByTaskQueue: dynamicconfig.GetBoolPropertyFnFilteredByTaskQueue(true), + }, + namespaceRegistry: nsRegistry, + }), + }, + } + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + Status: tc.activityStatus, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Minute), + ScheduleToStartTimeout: durationpb.New(2 * time.Minute), + StartToCloseTimeout: durationpb.New(3 * time.Minute), + }, + LastAttempt: chasm.NewDataField(ctx, &activitypb.ActivityAttemptState{Count: 1}), + Outcome: chasm.NewDataField(ctx, &activitypb.ActivityOutcome{}), + } + + _, err := activity.Terminate(ctx, chasm.TerminateComponentRequest{ + Reason: "Delete activity execution", + }) + + if tc.expectErr != "" { + require.EqualError(t, err, tc.expectErr) + require.Equal(t, tc.activityStatus, activity.Status, "expected no state change on error") + } else { + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, activity.Status) + } + }) + } +} + +func TestContextMetadata(t *testing.T) { + t.Run("returns activity type and task queue", func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "my-activity"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: "my-task-queue"}, + }, + } + + md := activity.ContextMetadata(ctx) + require.Equal(t, map[string]string{ + "standalone-activity-type": "my-activity", + "standalone-activity-task-queue": "my-task-queue", + }, md) + }) + + t.Run("returns only activity type when task queue is empty", func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "my-activity"}, + }, + } + + md := activity.ContextMetadata(ctx) + require.Equal(t, map[string]string{ + "standalone-activity-type": "my-activity", + }, md) + }) + + t.Run("returns only task queue when activity type is empty", func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + TaskQueue: &taskqueuepb.TaskQueue{Name: "my-task-queue"}, + }, + } + + md := activity.ContextMetadata(ctx) + require.Equal(t, map[string]string{ + "standalone-activity-task-queue": "my-task-queue", + }, md) + }) + + t.Run("returns nil when both are empty", func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + activity := &Activity{ + ActivityState: &activitypb.ActivityState{}, + } + + md := activity.ContextMetadata(ctx) + require.Nil(t, md) + }) +} diff --git a/chasm/lib/activity/config.go b/chasm/lib/activity/config.go new file mode 100644 index 00000000000..79f86946843 --- /dev/null +++ b/chasm/lib/activity/config.go @@ -0,0 +1,66 @@ +package activity + +import ( + "go.temporal.io/server/chasm/lib/callback" + "go.temporal.io/server/common" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/retrypolicy" +) + +var ( + Enabled = dynamicconfig.NewNamespaceBoolSetting( + "activity.enableStandalone", + false, + `Toggles standalone activity functionality on the server.`, + ) + + LongPollTimeout = dynamicconfig.NewNamespaceDurationSetting( + "activity.longPollTimeout", + common.DefaultLongPollTimeout, + `Timeout for activity long-poll requests.`, + ) + + LongPollBuffer = dynamicconfig.NewNamespaceDurationSetting( + "activity.longPollBuffer", + common.DefaultLongPollBuffer, + `A buffer used to adjust the activity long-poll timeouts. + Specifically, activity long-poll requests are timed out at a time which leaves at least the buffer's duration + remaining before the caller's deadline, if permitted by the caller's deadline.`, + ) + + StartDelayEnabled = dynamicconfig.NewNamespaceBoolSetting( + "activity.startDelayEnabled", + false, + `Allows non-zero start_delay on StartActivityExecution requests.`, + ) +) + +type Config struct { + BlobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter + BlobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter + BreakdownMetricsByTaskQueue dynamicconfig.TypedPropertyFnWithTaskQueueFilter[bool] + Enabled dynamicconfig.BoolPropertyFnWithNamespaceFilter + LongPollBuffer dynamicconfig.DurationPropertyFnWithNamespaceFilter + LongPollTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter + MaxIDLengthLimit dynamicconfig.IntPropertyFn + MaxCallbacksPerExecution dynamicconfig.IntPropertyFnWithNamespaceFilter + DefaultActivityRetryPolicy dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings] + StartDelayEnabled dynamicconfig.BoolPropertyFnWithNamespaceFilter + VisibilityMaxPageSize dynamicconfig.IntPropertyFnWithNamespaceFilter +} + +func ConfigProvider(dc *dynamicconfig.Collection) *Config { + return &Config{ + BlobSizeLimitError: dynamicconfig.BlobSizeLimitError.Get(dc), + BlobSizeLimitWarn: dynamicconfig.BlobSizeLimitWarn.Get(dc), + BreakdownMetricsByTaskQueue: dynamicconfig.MetricsBreakdownByTaskQueue.Get(dc), + DefaultActivityRetryPolicy: dynamicconfig.DefaultActivityRetryPolicy.Get(dc), + Enabled: Enabled.Get(dc), + LongPollBuffer: LongPollBuffer.Get(dc), + LongPollTimeout: LongPollTimeout.Get(dc), + MaxIDLengthLimit: dynamicconfig.MaxIDLengthLimit.Get(dc), + StartDelayEnabled: StartDelayEnabled.Get(dc), + MaxCallbacksPerExecution: callback.MaxPerExecution.Get(dc), + VisibilityMaxPageSize: dynamicconfig.FrontendVisibilityMaxPageSize.Get(dc), + } +} diff --git a/chasm/lib/activity/frontend.go b/chasm/lib/activity/frontend.go new file mode 100644 index 00000000000..6dae15ece5b --- /dev/null +++ b/chasm/lib/activity/frontend.go @@ -0,0 +1,439 @@ +package activity + +import ( + "context" + + "github.com/google/uuid" + apiactivitypb "go.temporal.io/api/activity/v1" //nolint:importas + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/chasm/lib/callback" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/searchattribute" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type FrontendHandler interface { + StartActivityExecution(ctx context.Context, req *workflowservice.StartActivityExecutionRequest) (*workflowservice.StartActivityExecutionResponse, error) + DescribeActivityExecution(ctx context.Context, req *workflowservice.DescribeActivityExecutionRequest) (*workflowservice.DescribeActivityExecutionResponse, error) + PollActivityExecution(ctx context.Context, req *workflowservice.PollActivityExecutionRequest) (*workflowservice.PollActivityExecutionResponse, error) + CountActivityExecutions(context.Context, *workflowservice.CountActivityExecutionsRequest) (*workflowservice.CountActivityExecutionsResponse, error) + DeleteActivityExecution(context.Context, *workflowservice.DeleteActivityExecutionRequest) (*workflowservice.DeleteActivityExecutionResponse, error) + ListActivityExecutions(context.Context, *workflowservice.ListActivityExecutionsRequest) (*workflowservice.ListActivityExecutionsResponse, error) + RequestCancelActivityExecution(context.Context, *workflowservice.RequestCancelActivityExecutionRequest) (*workflowservice.RequestCancelActivityExecutionResponse, error) + TerminateActivityExecution(context.Context, *workflowservice.TerminateActivityExecutionRequest) (*workflowservice.TerminateActivityExecutionResponse, error) + IsStandaloneActivityEnabled(namespaceName string) bool +} + +var ErrStandaloneActivityDisabled = serviceerror.NewUnimplemented("Standalone activity is disabled") + +type frontendHandler struct { + FrontendHandler + callbackValidator callback.Validator + client activitypb.ActivityServiceClient + config *Config + logger log.Logger + metricsHandler metrics.Handler + namespaceRegistry namespace.Registry + saMapperProvider searchattribute.MapperProvider + saValidator *searchattribute.Validator +} + +// NewFrontendHandler creates a new FrontendHandler instance for processing activity frontend requests. +func NewFrontendHandler( + callbackValidator callback.Validator, + client activitypb.ActivityServiceClient, + config *Config, + logger log.Logger, + metricsHandler metrics.Handler, + namespaceRegistry namespace.Registry, + saMapperProvider searchattribute.MapperProvider, + saValidator *searchattribute.Validator, +) FrontendHandler { + return &frontendHandler{ + callbackValidator: callbackValidator, + client: client, + config: config, + logger: logger, + metricsHandler: metricsHandler, + namespaceRegistry: namespaceRegistry, + saMapperProvider: saMapperProvider, + saValidator: saValidator, + } +} + +// IsStandaloneActivityEnabled checks if standalone activities are enabled for the given namespace +func (h *frontendHandler) IsStandaloneActivityEnabled(namespaceName string) bool { + return h.config.Enabled(namespaceName) +} + +// StartActivityExecution initiates a standalone activity execution in the specified namespace. +// It validates the request, resolves the namespace ID, applies default configurations, +// and forwards the request to the activity service handler. +// +// The method performs the following steps: +// 1. Resolves the namespace name to its internal ID +// 2. Validates and populates request fields (timeouts, retry policies, search attributes). The request is cloned +// before mutation to preserve the original for retries. +// 3. Sends the request to the history activity service. +func (h *frontendHandler) StartActivityExecution(ctx context.Context, req *workflowservice.StartActivityExecutionRequest) (*workflowservice.StartActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, ErrStandaloneActivityDisabled + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + modifiedReq, err := h.validateAndPopulateStartRequest(ctx, req, namespaceID) + if err != nil { + return nil, err + } + + resp, err := h.client.StartActivityExecution(ctx, &activitypb.StartActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: modifiedReq, + }) + + return resp.GetFrontendResponse(), err +} + +// DescribeActivityExecution queries current activity state, optionally as a long-poll that waits +// for any state change. +func (h *frontendHandler) DescribeActivityExecution( + ctx context.Context, + req *workflowservice.DescribeActivityExecutionRequest, +) (*workflowservice.DescribeActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, ErrStandaloneActivityDisabled + } + + err := validateDescribeActivityExecutionRequest( + req, + h.config.MaxIDLengthLimit(), + ) + if err != nil { + return nil, err + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + resp, err := h.client.DescribeActivityExecution(ctx, &activitypb.DescribeActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + return resp.GetFrontendResponse(), err +} + +// PollActivityExecution long-polls for activity outcome. +func (h *frontendHandler) PollActivityExecution( + ctx context.Context, + req *workflowservice.PollActivityExecutionRequest, +) (*workflowservice.PollActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, ErrStandaloneActivityDisabled + } + + err := validatePollActivityExecutionRequest( + req, + h.config.MaxIDLengthLimit(), + ) + if err != nil { + return nil, err + } + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + resp, err := h.client.PollActivityExecution(ctx, &activitypb.PollActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + return resp.GetFrontendResponse(), err +} + +// ListActivityExecutions lists activity executions matching the query in the request. +func (h *frontendHandler) ListActivityExecutions( + ctx context.Context, + req *workflowservice.ListActivityExecutionsRequest, +) (*workflowservice.ListActivityExecutionsResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, ErrStandaloneActivityDisabled + } + + pageSize := req.GetPageSize() + if maxPageSize := int32(h.config.VisibilityMaxPageSize(req.GetNamespace())); pageSize <= 0 || pageSize > maxPageSize { + pageSize = maxPageSize + } + + resp, err := chasm.ListExecutions[*Activity, *emptypb.Empty](ctx, &chasm.ListExecutionsRequest{ + NamespaceName: req.GetNamespace(), + PageSize: int(pageSize), + NextPageToken: req.GetNextPageToken(), + Query: req.GetQuery(), + }) + if err != nil { + return nil, err + } + + executions := make([]*apiactivitypb.ActivityExecutionListInfo, 0, len(resp.Executions)) + for _, exec := range resp.Executions { + activityType, _ := chasm.SearchAttributeValue(exec.ChasmSearchAttributes, TypeSearchAttribute) + taskQueue, _ := chasm.SearchAttributeValue(exec.ChasmSearchAttributes, chasm.SearchAttributeTaskQueue) + statusStr, _ := chasm.SearchAttributeValue(exec.ChasmSearchAttributes, StatusSearchAttribute) + status, _ := enumspb.ActivityExecutionStatusFromString(statusStr) + + info := &apiactivitypb.ActivityExecutionListInfo{ + ActivityId: exec.BusinessID, + RunId: exec.RunID, + ScheduleTime: timestamppb.New(exec.StartTime), + StateTransitionCount: exec.StateTransitionCount, + StateSizeBytes: exec.HistorySizeBytes, + SearchAttributes: &commonpb.SearchAttributes{IndexedFields: exec.CustomSearchAttributes}, + ActivityType: &commonpb.ActivityType{Name: activityType}, + TaskQueue: taskQueue, + Status: status, + } + if !exec.CloseTime.IsZero() { + info.CloseTime = timestamppb.New(exec.CloseTime) + if !exec.StartTime.IsZero() { + info.ExecutionDuration = durationpb.New(exec.CloseTime.Sub(exec.StartTime)) + } + } + executions = append(executions, info) + } + + return &workflowservice.ListActivityExecutionsResponse{ + Executions: executions, + NextPageToken: resp.NextPageToken, + }, nil +} + +// CountActivityExecutions counts activity executions matching the query in the request. +func (h *frontendHandler) CountActivityExecutions( + ctx context.Context, + req *workflowservice.CountActivityExecutionsRequest, +) (*workflowservice.CountActivityExecutionsResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, ErrStandaloneActivityDisabled + } + + resp, err := chasm.CountExecutions[*Activity](ctx, &chasm.CountExecutionsRequest{ + NamespaceName: req.GetNamespace(), + Query: req.GetQuery(), + }) + if err != nil { + return nil, err + } + + groups := make([]*workflowservice.CountActivityExecutionsResponse_AggregationGroup, 0, len(resp.Groups)) + for _, g := range resp.Groups { + groups = append(groups, &workflowservice.CountActivityExecutionsResponse_AggregationGroup{ + GroupValues: g.Values, + Count: g.Count, + }) + } + + return &workflowservice.CountActivityExecutionsResponse{ + Count: resp.Count, + Groups: groups, + }, nil +} + +// DeleteActivityExecution terminates and schedules a standalone activity execution for deletion. +func (h *frontendHandler) DeleteActivityExecution( + ctx context.Context, + req *workflowservice.DeleteActivityExecutionRequest, +) (*workflowservice.DeleteActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, ErrStandaloneActivityDisabled + } + + if err := validateAndNormalizeDeleteRequest(req, h.config.MaxIDLengthLimit()); err != nil { + return nil, err + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + _, err = h.client.DeleteActivityExecution(ctx, &activitypb.DeleteActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + if err != nil { + return nil, err + } + + return &workflowservice.DeleteActivityExecutionResponse{}, nil +} + +// TerminateActivityExecution terminates a standalone activity execution +func (h *frontendHandler) TerminateActivityExecution( + ctx context.Context, + req *workflowservice.TerminateActivityExecutionRequest, +) (*workflowservice.TerminateActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, ErrStandaloneActivityDisabled + } + + namespaceName := req.GetNamespace() + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(namespaceName)) + if err != nil { + return nil, err + } + + if err := validateAndNormalizeTerminateRequest( + req, + h.config.MaxIDLengthLimit(), + h.config.BlobSizeLimitError, + h.config.BlobSizeLimitWarn, + h.logger); err != nil { + return nil, err + } + + _, err = h.client.TerminateActivityExecution(ctx, &activitypb.TerminateActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + if err != nil { + return nil, err + } + + return &workflowservice.TerminateActivityExecutionResponse{}, nil +} + +func (h *frontendHandler) RequestCancelActivityExecution( + ctx context.Context, + req *workflowservice.RequestCancelActivityExecutionRequest, +) (*workflowservice.RequestCancelActivityExecutionResponse, error) { + if !h.config.Enabled(req.GetNamespace()) { + return nil, ErrStandaloneActivityDisabled + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + if err := validateAndNormalizeCancelRequest( + req, + h.config.MaxIDLengthLimit(), + h.config.BlobSizeLimitError, + h.config.BlobSizeLimitWarn, + h.logger); err != nil { + return nil, err + } + + _, err = h.client.RequestCancelActivityExecution(ctx, &activitypb.RequestCancelActivityExecutionRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + if err != nil { + return nil, err + } + + return &workflowservice.RequestCancelActivityExecutionResponse{}, nil +} + +func (h *frontendHandler) validateAndPopulateStartRequest( + ctx context.Context, + req *workflowservice.StartActivityExecutionRequest, + namespaceID namespace.ID, +) (*workflowservice.StartActivityExecutionRequest, error) { + // Since validation mutates the request, clone it first so that retries use the original + // request. However if the client did not set a request ID then set that before cloning so that + // retries use the same request ID. + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } + req = common.CloneProto(req) + activityType := req.ActivityType.GetName() + + if req.RetryPolicy == nil { + req.RetryPolicy = &commonpb.RetryPolicy{} + } + + if err := validateStartDelay(req.GetStartDelay()); err != nil { + return nil, err + } + if req.GetStartDelay().AsDuration() > 0 && !h.config.StartDelayEnabled(req.GetNamespace()) { + return nil, serviceerror.NewInvalidArgument("start_delay is not enabled for this namespace") + } + // TODO(saa): when eager start is supported, deny it if start delay > 0 (same as workflow behavior). + + opts := activityOptionsFromStartRequest(req) + err := ValidateAndNormalizeStandaloneActivity( + req.ActivityId, + activityType, + h.config.DefaultActivityRetryPolicy, + h.config.MaxIDLengthLimit(), + namespaceID, + opts, + req.Priority, + durationpb.New(0), + ) + if err != nil { + return nil, err + } + applyActivityOptionsToStartRequest(opts, req) + + err = validateAndNormalizeStartRequest( + req, + h.config.MaxIDLengthLimit(), + h.config.BlobSizeLimitError, + h.config.BlobSizeLimitWarn, + h.logger, + h.saMapperProvider, + h.saValidator, + ) + if err != nil { + return nil, err + } + + if cbs := req.GetCompletionCallbacks(); len(cbs) > 0 { + if err := h.callbackValidator.Validate(ctx, req.GetNamespace(), cbs); err != nil { + return nil, err + } + } + + return req, nil +} + +// activityOptionsFromStartRequest builds an ActivityOptions from the inlined fields +// of a StartActivityExecutionRequest for use with shared validation logic. +func activityOptionsFromStartRequest(req *workflowservice.StartActivityExecutionRequest) *apiactivitypb.ActivityOptions { + return &apiactivitypb.ActivityOptions{ + TaskQueue: req.TaskQueue, + ScheduleToCloseTimeout: req.ScheduleToCloseTimeout, + ScheduleToStartTimeout: req.ScheduleToStartTimeout, + StartToCloseTimeout: req.StartToCloseTimeout, + HeartbeatTimeout: req.HeartbeatTimeout, + RetryPolicy: req.RetryPolicy, + } +} + +// applyActivityOptionsToStartRequest copies normalized values from ActivityOptions +// back to the StartActivityExecutionRequest. +func applyActivityOptionsToStartRequest(opts *apiactivitypb.ActivityOptions, req *workflowservice.StartActivityExecutionRequest) { + req.TaskQueue = opts.TaskQueue + req.ScheduleToCloseTimeout = opts.ScheduleToCloseTimeout + req.ScheduleToStartTimeout = opts.ScheduleToStartTimeout + req.StartToCloseTimeout = opts.StartToCloseTimeout + req.HeartbeatTimeout = opts.HeartbeatTimeout + req.RetryPolicy = opts.RetryPolicy +} diff --git a/chasm/lib/activity/frontend_test.go b/chasm/lib/activity/frontend_test.go new file mode 100644 index 00000000000..e692de13b84 --- /dev/null +++ b/chasm/lib/activity/frontend_test.go @@ -0,0 +1,102 @@ +package activity + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + "google.golang.org/protobuf/types/known/durationpb" +) + +type hasRequestID interface { + GetRequestId() string +} + +// TestRequestIdStableAcrossRetries verifies that a request ID is re-used +// across retries, even if server-generated. +func TestRequestIdStableAcrossRetries(t *testing.T) { + h := &frontendHandler{ + config: &Config{ + BlobSizeLimitError: defaultBlobSizeLimitError, + BlobSizeLimitWarn: defaultBlobSizeLimitWarn, + MaxIDLengthLimit: func() int { return defaultMaxIDLengthLimit }, + DefaultActivityRetryPolicy: getDefaultRetrySettings, + }, + logger: log.NewNoopLogger(), + } + nsID := namespace.ID("test-namespace-id") + + newReq := func(requestId string) *workflowservice.StartActivityExecutionRequest { + return &workflowservice.StartActivityExecutionRequest{ + Namespace: "test-namespace", + ActivityId: "test-activity", + ActivityType: &commonpb.ActivityType{ + Name: "test-type", + }, + TaskQueue: &taskqueuepb.TaskQueue{ + Name: "test-queue", + }, + StartToCloseTimeout: durationpb.New(time.Minute), + RequestId: requestId, + } + } + + // Simulate two RetryableInterceptor attempts: both call + // validateAndPopulateStartRequest with the same request pointer. + validateTwoAttempts := func(t *testing.T, req *workflowservice.StartActivityExecutionRequest) { + t.Helper() + clone1, err := h.validateAndPopulateStartRequest(context.Background(), req, nsID) + require.NoError(t, err) + require.NotEmpty(t, clone1.RequestId) + + clone2, err := h.validateAndPopulateStartRequest(context.Background(), req, nsID) + require.NoError(t, err) + require.Equal(t, clone1.RequestId, clone2.RequestId) + } + + // validateTwice calls validate twice and asserts the request ID is stable. + validateTwice := func(t *testing.T, req hasRequestID, validate func() error) { + t.Helper() + require.NoError(t, validate()) + require.NotEmpty(t, req.GetRequestId()) + firstID := req.GetRequestId() + require.NoError(t, validate()) + require.Equal(t, firstID, req.GetRequestId()) + } + + t.Run("start/server-generated", func(t *testing.T) { + validateTwoAttempts(t, newReq("")) + }) + + t.Run("start/client-provided", func(t *testing.T) { + validateTwoAttempts(t, newReq("my-request-id")) + }) + + t.Run("terminate/server-generated", func(t *testing.T) { + req := &workflowservice.TerminateActivityExecutionRequest{ + Namespace: "test-namespace", + ActivityId: "test-activity", + } + validateTwice(t, req, func() error { + return validateAndNormalizeTerminateRequest( + req, defaultMaxIDLengthLimit, defaultBlobSizeLimitError, defaultBlobSizeLimitWarn, log.NewNoopLogger()) + }) + }) + + t.Run("cancel/server-generated", func(t *testing.T) { + req := &workflowservice.RequestCancelActivityExecutionRequest{ + Namespace: "test-namespace", + ActivityId: "test-activity", + } + validateTwice(t, req, func() error { + return validateAndNormalizeCancelRequest( + req, defaultMaxIDLengthLimit, defaultBlobSizeLimitError, defaultBlobSizeLimitWarn, log.NewNoopLogger()) + }) + }) +} diff --git a/chasm/lib/activity/fx.go b/chasm/lib/activity/fx.go new file mode 100644 index 00000000000..905042382c2 --- /dev/null +++ b/chasm/lib/activity/fx.go @@ -0,0 +1,39 @@ +package activity + +import ( + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/resource" + "go.uber.org/fx" +) + +var HistoryModule = fx.Module( + "activity-history", + fx.Provide( + ConfigProvider, + newActivityDispatchTaskHandler, + newScheduleToStartTimeoutTaskHandler, + newScheduleToCloseTimeoutTaskHandler, + newStartToCloseTimeoutTaskHandler, + newHeartbeatTimeoutTaskHandler, + newHandler, + newLibrary, + ), + fx.Invoke(func(l *library, registry *chasm.Registry) error { + return registry.Register(l) + }), +) + +var FrontendModule = fx.Module( + "activity-frontend", + fx.Provide(ConfigProvider), + fx.Provide(activitypb.NewActivityServiceLayeredClient), + fx.Provide(NewFrontendHandler), + fx.Provide(resource.SearchAttributeValidatorProvider), + fx.Provide(newComponentOnlyLibrary), + fx.Invoke(func(l *componentOnlyLibrary, registry *chasm.Registry) error { + // Frontend needs to register the component in order to serialize ComponentRefs, but doesn't + // need task handlers. + return registry.Register(l) + }), +) diff --git a/chasm/lib/activity/gen/activitypb/v1/activity_state.go-helpers.pb.go b/chasm/lib/activity/gen/activitypb/v1/activity_state.go-helpers.pb.go new file mode 100644 index 00000000000..a765e208683 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/activity_state.go-helpers.pb.go @@ -0,0 +1,292 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package activitypb + +import ( + "fmt" + + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ActivityState to the protobuf v3 wire format +func (val *ActivityState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityState from the protobuf v3 wire format +func (val *ActivityState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityState + switch t := that.(type) { + case *ActivityState: + that1 = t + case ActivityState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityCancelState to the protobuf v3 wire format +func (val *ActivityCancelState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityCancelState from the protobuf v3 wire format +func (val *ActivityCancelState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityCancelState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityCancelState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityCancelState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityCancelState + switch t := that.(type) { + case *ActivityCancelState: + that1 = t + case ActivityCancelState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityTerminateState to the protobuf v3 wire format +func (val *ActivityTerminateState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityTerminateState from the protobuf v3 wire format +func (val *ActivityTerminateState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityTerminateState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityTerminateState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityTerminateState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityTerminateState + switch t := that.(type) { + case *ActivityTerminateState: + that1 = t + case ActivityTerminateState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityAttemptState to the protobuf v3 wire format +func (val *ActivityAttemptState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityAttemptState from the protobuf v3 wire format +func (val *ActivityAttemptState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityAttemptState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityAttemptState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityAttemptState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityAttemptState + switch t := that.(type) { + case *ActivityAttemptState: + that1 = t + case ActivityAttemptState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityHeartbeatState to the protobuf v3 wire format +func (val *ActivityHeartbeatState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityHeartbeatState from the protobuf v3 wire format +func (val *ActivityHeartbeatState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityHeartbeatState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityHeartbeatState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityHeartbeatState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityHeartbeatState + switch t := that.(type) { + case *ActivityHeartbeatState: + that1 = t + case ActivityHeartbeatState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityRequestData to the protobuf v3 wire format +func (val *ActivityRequestData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityRequestData from the protobuf v3 wire format +func (val *ActivityRequestData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityRequestData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityRequestData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityRequestData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityRequestData + switch t := that.(type) { + case *ActivityRequestData: + that1 = t + case ActivityRequestData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ActivityOutcome to the protobuf v3 wire format +func (val *ActivityOutcome) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityOutcome from the protobuf v3 wire format +func (val *ActivityOutcome) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityOutcome) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityOutcome values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityOutcome) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityOutcome + switch t := that.(type) { + case *ActivityOutcome: + that1 = t + case ActivityOutcome: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +var ( + ActivityExecutionStatus_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Scheduled": 1, + "Started": 2, + "CancelRequested": 3, + "Completed": 4, + "Failed": 5, + "Canceled": 6, + "Terminated": 7, + "TimedOut": 8, + } +) + +// ActivityExecutionStatusFromString parses a ActivityExecutionStatus value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to ActivityExecutionStatus +func ActivityExecutionStatusFromString(s string) (ActivityExecutionStatus, error) { + if v, ok := ActivityExecutionStatus_value[s]; ok { + return ActivityExecutionStatus(v), nil + } else if v, ok := ActivityExecutionStatus_shorthandValue[s]; ok { + return ActivityExecutionStatus(v), nil + } + return ActivityExecutionStatus(0), fmt.Errorf("%s is not a valid ActivityExecutionStatus", s) +} diff --git a/chasm/lib/activity/gen/activitypb/v1/activity_state.pb.go b/chasm/lib/activity/gen/activitypb/v1/activity_state.pb.go new file mode 100644 index 00000000000..3e95ee84f59 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/activity_state.pb.go @@ -0,0 +1,1084 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/activity/proto/v1/activity_state.proto + +package activitypb + +import ( + reflect "reflect" + "strconv" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/common/v1" + v12 "go.temporal.io/api/deployment/v1" + v14 "go.temporal.io/api/failure/v1" + v13 "go.temporal.io/api/sdk/v1" + v11 "go.temporal.io/api/taskqueue/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ActivityExecutionStatus int32 + +const ( + ACTIVITY_EXECUTION_STATUS_UNSPECIFIED ActivityExecutionStatus = 0 + // The activity has been scheduled, but a worker has not accepted the task for the current + // attempt. The activity may be backing off between attempts or waiting for a worker to pick it + // up. + ACTIVITY_EXECUTION_STATUS_SCHEDULED ActivityExecutionStatus = 1 + // A worker has accepted a task for the current attempt. + ACTIVITY_EXECUTION_STATUS_STARTED ActivityExecutionStatus = 2 + // A caller has requested cancellation of the activity. + ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED ActivityExecutionStatus = 3 + // The activity completed successfully. + ACTIVITY_EXECUTION_STATUS_COMPLETED ActivityExecutionStatus = 4 + // The activity completed with failure. + ACTIVITY_EXECUTION_STATUS_FAILED ActivityExecutionStatus = 5 + // The activity completed as canceled. + // Requesting to cancel an activity does not automatically transition the activity to canceled status. If the worker + // responds to cancel the activity after requesting cancellation, the status will transition to cancelled. If the + // activity completes, fails, times out or terminates after cancel is requested and before the worker responds with + // cancelled. The activity will be stay in the terminal non-cancelled status. + ACTIVITY_EXECUTION_STATUS_CANCELED ActivityExecutionStatus = 6 + // The activity was terminated. Termination does not reach the worker and the activity code cannot react to it. + // A terminated activity may have a running attempt and will be requested to be canceled by the server when it + // heartbeats. + ACTIVITY_EXECUTION_STATUS_TERMINATED ActivityExecutionStatus = 7 + // The activity has timed out by reaching the specified schedule-to-start or schedule-to-close timeouts. + // Additionally, after all retries are exhausted for start-to-close or heartbeat timeouts, the activity will also + // transition to timed out status. + ACTIVITY_EXECUTION_STATUS_TIMED_OUT ActivityExecutionStatus = 8 +) + +// Enum value maps for ActivityExecutionStatus. +var ( + ActivityExecutionStatus_name = map[int32]string{ + 0: "ACTIVITY_EXECUTION_STATUS_UNSPECIFIED", + 1: "ACTIVITY_EXECUTION_STATUS_SCHEDULED", + 2: "ACTIVITY_EXECUTION_STATUS_STARTED", + 3: "ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED", + 4: "ACTIVITY_EXECUTION_STATUS_COMPLETED", + 5: "ACTIVITY_EXECUTION_STATUS_FAILED", + 6: "ACTIVITY_EXECUTION_STATUS_CANCELED", + 7: "ACTIVITY_EXECUTION_STATUS_TERMINATED", + 8: "ACTIVITY_EXECUTION_STATUS_TIMED_OUT", + } + ActivityExecutionStatus_value = map[string]int32{ + "ACTIVITY_EXECUTION_STATUS_UNSPECIFIED": 0, + "ACTIVITY_EXECUTION_STATUS_SCHEDULED": 1, + "ACTIVITY_EXECUTION_STATUS_STARTED": 2, + "ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED": 3, + "ACTIVITY_EXECUTION_STATUS_COMPLETED": 4, + "ACTIVITY_EXECUTION_STATUS_FAILED": 5, + "ACTIVITY_EXECUTION_STATUS_CANCELED": 6, + "ACTIVITY_EXECUTION_STATUS_TERMINATED": 7, + "ACTIVITY_EXECUTION_STATUS_TIMED_OUT": 8, + } +) + +func (x ActivityExecutionStatus) Enum() *ActivityExecutionStatus { + p := new(ActivityExecutionStatus) + *p = x + return p +} + +func (x ActivityExecutionStatus) String() string { + switch x { + case ACTIVITY_EXECUTION_STATUS_UNSPECIFIED: + return "Unspecified" + case ACTIVITY_EXECUTION_STATUS_SCHEDULED: + return "Scheduled" + case ACTIVITY_EXECUTION_STATUS_STARTED: + return "Started" + case ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED: + return "CancelRequested" + case ACTIVITY_EXECUTION_STATUS_COMPLETED: + return "Completed" + case ACTIVITY_EXECUTION_STATUS_FAILED: + return "Failed" + case ACTIVITY_EXECUTION_STATUS_CANCELED: + return "Canceled" + case ACTIVITY_EXECUTION_STATUS_TERMINATED: + return "Terminated" + case ACTIVITY_EXECUTION_STATUS_TIMED_OUT: + return "TimedOut" + + // Deprecated: Use ActivityExecutionStatus.Descriptor instead. + default: + return strconv.Itoa(int(x)) + } + +} + +func (ActivityExecutionStatus) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_enumTypes[0].Descriptor() +} + +func (ActivityExecutionStatus) Type() protoreflect.EnumType { + return &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_enumTypes[0] +} + +func (x ActivityExecutionStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +func (ActivityExecutionStatus) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{0} +} + +type ActivityState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The type of the activity, a string that maps to a registered activity on a worker. + ActivityType *v1.ActivityType `protobuf:"bytes,1,opt,name=activity_type,json=activityType,proto3" json:"activity_type,omitempty"` + TaskQueue *v11.TaskQueue `protobuf:"bytes,2,opt,name=task_queue,json=taskQueue,proto3" json:"task_queue,omitempty"` + // Indicates how long the caller is willing to wait for an activity completion. Limits how long + // retries will be attempted. Either this or `start_to_close_timeout` must be specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + ScheduleToCloseTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=schedule_to_close_timeout,json=scheduleToCloseTimeout,proto3" json:"schedule_to_close_timeout,omitempty"` + // Limits time an activity task can stay in a task queue before a worker picks it up. This + // timeout is always non retryable, as all a retry would achieve is to put it back into the same + // queue. Defaults to `schedule_to_close_timeout` or workflow execution timeout if not + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` + // Maximum time an activity is allowed to execute after being picked up by a worker. This + // timeout is always retryable. Either this or `schedule_to_close_timeout` must be + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // + // aip.dev/not-precedent: "to" is used to indicate interval. --) + StartToCloseTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3" json:"start_to_close_timeout,omitempty"` + // Maximum permitted time between successful worker heartbeats. + HeartbeatTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=heartbeat_timeout,json=heartbeatTimeout,proto3" json:"heartbeat_timeout,omitempty"` + // The retry policy for the activity. Will never exceed `schedule_to_close_timeout`. + RetryPolicy *v1.RetryPolicy `protobuf:"bytes,7,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // All of the possible activity statuses (covers both the public ActivityExecutionStatus and PendingActivityState). + // TODO: consider moving this into ActivityAttemptState and renaming that message. This could save mutating two + // components on each attempt transition. + Status ActivityExecutionStatus `protobuf:"varint,8,opt,name=status,proto3,enum=temporal.server.chasm.lib.activity.proto.v1.ActivityExecutionStatus" json:"status,omitempty"` + // Time the activity was originally scheduled via a StartActivityExecution request. + ScheduleTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=schedule_time,json=scheduleTime,proto3" json:"schedule_time,omitempty"` + // Priority metadata. + Priority *v1.Priority `protobuf:"bytes,10,opt,name=priority,proto3" json:"priority,omitempty"` + // Set if activity cancellation was requested. + CancelState *ActivityCancelState `protobuf:"bytes,11,opt,name=cancel_state,json=cancelState,proto3" json:"cancel_state,omitempty"` + // Set if the activity was terminated + TerminateState *ActivityTerminateState `protobuf:"bytes,12,opt,name=terminate_state,json=terminateState,proto3" json:"terminate_state,omitempty"` + // Amount of time to wait before dispatching the activity task to the task queue for the first time. If the activity + // has a retry policy, retry attempts will not have start delay applied. + StartDelay *durationpb.Duration `protobuf:"bytes,13,opt,name=start_delay,json=startDelay,proto3" json:"start_delay,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityState) Reset() { + *x = ActivityState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityState) ProtoMessage() {} + +func (x *ActivityState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityState.ProtoReflect.Descriptor instead. +func (*ActivityState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{0} +} + +func (x *ActivityState) GetActivityType() *v1.ActivityType { + if x != nil { + return x.ActivityType + } + return nil +} + +func (x *ActivityState) GetTaskQueue() *v11.TaskQueue { + if x != nil { + return x.TaskQueue + } + return nil +} + +func (x *ActivityState) GetScheduleToCloseTimeout() *durationpb.Duration { + if x != nil { + return x.ScheduleToCloseTimeout + } + return nil +} + +func (x *ActivityState) GetScheduleToStartTimeout() *durationpb.Duration { + if x != nil { + return x.ScheduleToStartTimeout + } + return nil +} + +func (x *ActivityState) GetStartToCloseTimeout() *durationpb.Duration { + if x != nil { + return x.StartToCloseTimeout + } + return nil +} + +func (x *ActivityState) GetHeartbeatTimeout() *durationpb.Duration { + if x != nil { + return x.HeartbeatTimeout + } + return nil +} + +func (x *ActivityState) GetRetryPolicy() *v1.RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *ActivityState) GetStatus() ActivityExecutionStatus { + if x != nil { + return x.Status + } + return ACTIVITY_EXECUTION_STATUS_UNSPECIFIED +} + +func (x *ActivityState) GetScheduleTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduleTime + } + return nil +} + +func (x *ActivityState) GetPriority() *v1.Priority { + if x != nil { + return x.Priority + } + return nil +} + +func (x *ActivityState) GetCancelState() *ActivityCancelState { + if x != nil { + return x.CancelState + } + return nil +} + +func (x *ActivityState) GetTerminateState() *ActivityTerminateState { + if x != nil { + return x.TerminateState + } + return nil +} + +func (x *ActivityState) GetStartDelay() *durationpb.Duration { + if x != nil { + return x.StartDelay + } + return nil +} + +type ActivityCancelState struct { + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + RequestTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=request_time,json=requestTime,proto3" json:"request_time,omitempty"` + Identity string `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityCancelState) Reset() { + *x = ActivityCancelState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityCancelState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityCancelState) ProtoMessage() {} + +func (x *ActivityCancelState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityCancelState.ProtoReflect.Descriptor instead. +func (*ActivityCancelState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{1} +} + +func (x *ActivityCancelState) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *ActivityCancelState) GetRequestTime() *timestamppb.Timestamp { + if x != nil { + return x.RequestTime + } + return nil +} + +func (x *ActivityCancelState) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *ActivityCancelState) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type ActivityTerminateState struct { + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityTerminateState) Reset() { + *x = ActivityTerminateState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityTerminateState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityTerminateState) ProtoMessage() {} + +func (x *ActivityTerminateState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityTerminateState.ProtoReflect.Descriptor instead. +func (*ActivityTerminateState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{2} +} + +func (x *ActivityTerminateState) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +type ActivityAttemptState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The attempt this activity is currently on. + // Incremented each time a new attempt is scheduled. A newly created activity will immediately be scheduled, and + // the count is set to 1. + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // Time from the last attempt failure to the next activity retry. + // If the activity is currently running, this represents the next retry interval in case the attempt fails. + // If activity is currently backing off between attempt, this represents the current retry interval. + // If there is no next retry allowed, this field will be null. + // This interval is typically calculated from the specified retry policy, but may be modified if an activity fails + // with a retryable application failure specifying a retry delay. + CurrentRetryInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=current_retry_interval,json=currentRetryInterval,proto3" json:"current_retry_interval,omitempty"` + // Time the last attempt was started. + StartedTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + // The time when the last activity attempt completed. If activity has not been completed yet, it will be null. + CompleteTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=complete_time,json=completeTime,proto3" json:"complete_time,omitempty"` + // Details about the last failure. This will only be updated when an activity attempt fails, + // including start-to-close timeout. Activity success, termination, schedule-to-start and schedule-to-close timeouts + // will not reset it. + LastFailureDetails *ActivityAttemptState_LastFailureDetails `protobuf:"bytes,5,opt,name=last_failure_details,json=lastFailureDetails,proto3" json:"last_failure_details,omitempty"` + // An incremental version number used to validate tasks. + // Initially this only verifies that a task belong to the current attempt. + // Later on this stamp will be used to also invalidate tasks when the activity is paused, reset, or has its options + // updated. + Stamp int32 `protobuf:"varint,6,opt,name=stamp,proto3" json:"stamp,omitempty"` + LastWorkerIdentity string `protobuf:"bytes,7,opt,name=last_worker_identity,json=lastWorkerIdentity,proto3" json:"last_worker_identity,omitempty"` + // The Worker Deployment Version this activity was dispatched to most recently. + // If nil, the activity has not yet been dispatched or was last dispatched to an unversioned worker. + LastDeploymentVersion *v12.WorkerDeploymentVersion `protobuf:"bytes,8,opt,name=last_deployment_version,json=lastDeploymentVersion,proto3" json:"last_deployment_version,omitempty"` + // The request ID that came from matching's RecordActivityTaskStarted API call. Used to make this API idempotent in + // case of implicit retries. + StartRequestId string `protobuf:"bytes,9,opt,name=start_request_id,json=startRequestId,proto3" json:"start_request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityAttemptState) Reset() { + *x = ActivityAttemptState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityAttemptState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityAttemptState) ProtoMessage() {} + +func (x *ActivityAttemptState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityAttemptState.ProtoReflect.Descriptor instead. +func (*ActivityAttemptState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{3} +} + +func (x *ActivityAttemptState) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *ActivityAttemptState) GetCurrentRetryInterval() *durationpb.Duration { + if x != nil { + return x.CurrentRetryInterval + } + return nil +} + +func (x *ActivityAttemptState) GetStartedTime() *timestamppb.Timestamp { + if x != nil { + return x.StartedTime + } + return nil +} + +func (x *ActivityAttemptState) GetCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.CompleteTime + } + return nil +} + +func (x *ActivityAttemptState) GetLastFailureDetails() *ActivityAttemptState_LastFailureDetails { + if x != nil { + return x.LastFailureDetails + } + return nil +} + +func (x *ActivityAttemptState) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +func (x *ActivityAttemptState) GetLastWorkerIdentity() string { + if x != nil { + return x.LastWorkerIdentity + } + return "" +} + +func (x *ActivityAttemptState) GetLastDeploymentVersion() *v12.WorkerDeploymentVersion { + if x != nil { + return x.LastDeploymentVersion + } + return nil +} + +func (x *ActivityAttemptState) GetStartRequestId() string { + if x != nil { + return x.StartRequestId + } + return "" +} + +type ActivityHeartbeatState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Details provided in the last recorded activity heartbeat. + Details *v1.Payloads `protobuf:"bytes,1,opt,name=details,proto3" json:"details,omitempty"` + // Time the last heartbeat was recorded. + RecordedTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=recorded_time,json=recordedTime,proto3" json:"recorded_time,omitempty"` + // Total number of heartbeats recorded across all attempts of this activity, including retries. + TotalHeartbeatCount int64 `protobuf:"varint,3,opt,name=total_heartbeat_count,json=totalHeartbeatCount,proto3" json:"total_heartbeat_count,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityHeartbeatState) Reset() { + *x = ActivityHeartbeatState{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityHeartbeatState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityHeartbeatState) ProtoMessage() {} + +func (x *ActivityHeartbeatState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityHeartbeatState.ProtoReflect.Descriptor instead. +func (*ActivityHeartbeatState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{4} +} + +func (x *ActivityHeartbeatState) GetDetails() *v1.Payloads { + if x != nil { + return x.Details + } + return nil +} + +func (x *ActivityHeartbeatState) GetRecordedTime() *timestamppb.Timestamp { + if x != nil { + return x.RecordedTime + } + return nil +} + +func (x *ActivityHeartbeatState) GetTotalHeartbeatCount() int64 { + if x != nil { + return x.TotalHeartbeatCount + } + return 0 +} + +type ActivityRequestData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Serialized activity input, passed as arguments to the activity function. + Input *v1.Payloads `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + Header *v1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header,omitempty"` + // Metadata for use by user interfaces to display the fixed as-of-start summary and details of the activity. + UserMetadata *v13.UserMetadata `protobuf:"bytes,3,opt,name=user_metadata,json=userMetadata,proto3" json:"user_metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityRequestData) Reset() { + *x = ActivityRequestData{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityRequestData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityRequestData) ProtoMessage() {} + +func (x *ActivityRequestData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityRequestData.ProtoReflect.Descriptor instead. +func (*ActivityRequestData) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{5} +} + +func (x *ActivityRequestData) GetInput() *v1.Payloads { + if x != nil { + return x.Input + } + return nil +} + +func (x *ActivityRequestData) GetHeader() *v1.Header { + if x != nil { + return x.Header + } + return nil +} + +func (x *ActivityRequestData) GetUserMetadata() *v13.UserMetadata { + if x != nil { + return x.UserMetadata + } + return nil +} + +type ActivityOutcome struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Variant: + // + // *ActivityOutcome_Successful_ + // *ActivityOutcome_Failed_ + Variant isActivityOutcome_Variant `protobuf_oneof:"variant"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityOutcome) Reset() { + *x = ActivityOutcome{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityOutcome) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityOutcome) ProtoMessage() {} + +func (x *ActivityOutcome) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityOutcome.ProtoReflect.Descriptor instead. +func (*ActivityOutcome) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{6} +} + +func (x *ActivityOutcome) GetVariant() isActivityOutcome_Variant { + if x != nil { + return x.Variant + } + return nil +} + +func (x *ActivityOutcome) GetSuccessful() *ActivityOutcome_Successful { + if x != nil { + if x, ok := x.Variant.(*ActivityOutcome_Successful_); ok { + return x.Successful + } + } + return nil +} + +func (x *ActivityOutcome) GetFailed() *ActivityOutcome_Failed { + if x != nil { + if x, ok := x.Variant.(*ActivityOutcome_Failed_); ok { + return x.Failed + } + } + return nil +} + +type isActivityOutcome_Variant interface { + isActivityOutcome_Variant() +} + +type ActivityOutcome_Successful_ struct { + Successful *ActivityOutcome_Successful `protobuf:"bytes,1,opt,name=successful,proto3,oneof"` +} + +type ActivityOutcome_Failed_ struct { + Failed *ActivityOutcome_Failed `protobuf:"bytes,2,opt,name=failed,proto3,oneof"` +} + +func (*ActivityOutcome_Successful_) isActivityOutcome_Variant() {} + +func (*ActivityOutcome_Failed_) isActivityOutcome_Variant() {} + +type ActivityAttemptState_LastFailureDetails struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The last time the activity attempt failed. + Time *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // Failure details from the last failed attempt. + Failure *v14.Failure `protobuf:"bytes,2,opt,name=failure,proto3" json:"failure,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityAttemptState_LastFailureDetails) Reset() { + *x = ActivityAttemptState_LastFailureDetails{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityAttemptState_LastFailureDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityAttemptState_LastFailureDetails) ProtoMessage() {} + +func (x *ActivityAttemptState_LastFailureDetails) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityAttemptState_LastFailureDetails.ProtoReflect.Descriptor instead. +func (*ActivityAttemptState_LastFailureDetails) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *ActivityAttemptState_LastFailureDetails) GetTime() *timestamppb.Timestamp { + if x != nil { + return x.Time + } + return nil +} + +func (x *ActivityAttemptState_LastFailureDetails) GetFailure() *v14.Failure { + if x != nil { + return x.Failure + } + return nil +} + +type ActivityOutcome_Successful struct { + state protoimpl.MessageState `protogen:"open.v1"` + Output *v1.Payloads `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityOutcome_Successful) Reset() { + *x = ActivityOutcome_Successful{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityOutcome_Successful) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityOutcome_Successful) ProtoMessage() {} + +func (x *ActivityOutcome_Successful) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityOutcome_Successful.ProtoReflect.Descriptor instead. +func (*ActivityOutcome_Successful) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *ActivityOutcome_Successful) GetOutput() *v1.Payloads { + if x != nil { + return x.Output + } + return nil +} + +type ActivityOutcome_Failed struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Only filled on schedule-to-start timeouts, schedule-to-close timeouts or terminations. All other attempt + // failures will be recorded in ActivityAttemptState.last_failure_details. + Failure *v14.Failure `protobuf:"bytes,1,opt,name=failure,proto3" json:"failure,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityOutcome_Failed) Reset() { + *x = ActivityOutcome_Failed{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityOutcome_Failed) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityOutcome_Failed) ProtoMessage() {} + +func (x *ActivityOutcome_Failed) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityOutcome_Failed.ProtoReflect.Descriptor instead. +func (*ActivityOutcome_Failed) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *ActivityOutcome_Failed) GetFailure() *v14.Failure { + if x != nil { + return x.Failure + } + return nil +} + +var File_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc = "" + + "\n" + + "@temporal/server/chasm/lib/activity/proto/v1/activity_state.proto\x12+temporal.server.chasm.lib.activity.proto.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a(temporal/api/deployment/v1/message.proto\x1a%temporal/api/failure/v1/message.proto\x1a'temporal/api/sdk/v1/user_metadata.proto\x1a'temporal/api/taskqueue/v1/message.proto\"\x97\b\n" + + "\rActivityState\x12I\n" + + "\ractivity_type\x18\x01 \x01(\v2$.temporal.api.common.v1.ActivityTypeR\factivityType\x12C\n" + + "\n" + + "task_queue\x18\x02 \x01(\v2$.temporal.api.taskqueue.v1.TaskQueueR\ttaskQueue\x12T\n" + + "\x19schedule_to_close_timeout\x18\x03 \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToCloseTimeout\x12T\n" + + "\x19schedule_to_start_timeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToStartTimeout\x12N\n" + + "\x16start_to_close_timeout\x18\x05 \x01(\v2\x19.google.protobuf.DurationR\x13startToCloseTimeout\x12F\n" + + "\x11heartbeat_timeout\x18\x06 \x01(\v2\x19.google.protobuf.DurationR\x10heartbeatTimeout\x12F\n" + + "\fretry_policy\x18\a \x01(\v2#.temporal.api.common.v1.RetryPolicyR\vretryPolicy\x12\\\n" + + "\x06status\x18\b \x01(\x0e2D.temporal.server.chasm.lib.activity.proto.v1.ActivityExecutionStatusR\x06status\x12?\n" + + "\rschedule_time\x18\t \x01(\v2\x1a.google.protobuf.TimestampR\fscheduleTime\x12<\n" + + "\bpriority\x18\n" + + " \x01(\v2 .temporal.api.common.v1.PriorityR\bpriority\x12c\n" + + "\fcancel_state\x18\v \x01(\v2@.temporal.server.chasm.lib.activity.proto.v1.ActivityCancelStateR\vcancelState\x12l\n" + + "\x0fterminate_state\x18\f \x01(\v2C.temporal.server.chasm.lib.activity.proto.v1.ActivityTerminateStateR\x0eterminateState\x12:\n" + + "\vstart_delay\x18\r \x01(\v2\x19.google.protobuf.DurationR\n" + + "startDelay\"\xa7\x01\n" + + "\x13ActivityCancelState\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\x12=\n" + + "\frequest_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\vrequestTime\x12\x1a\n" + + "\bidentity\x18\x03 \x01(\tR\bidentity\x12\x16\n" + + "\x06reason\x18\x04 \x01(\tR\x06reason\"7\n" + + "\x16ActivityTerminateState\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\"\xe8\x05\n" + + "\x14ActivityAttemptState\x12\x14\n" + + "\x05count\x18\x01 \x01(\x05R\x05count\x12O\n" + + "\x16current_retry_interval\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\x14currentRetryInterval\x12=\n" + + "\fstarted_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12?\n" + + "\rcomplete_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\fcompleteTime\x12\x86\x01\n" + + "\x14last_failure_details\x18\x05 \x01(\v2T.temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetailsR\x12lastFailureDetails\x12\x14\n" + + "\x05stamp\x18\x06 \x01(\x05R\x05stamp\x120\n" + + "\x14last_worker_identity\x18\a \x01(\tR\x12lastWorkerIdentity\x12k\n" + + "\x17last_deployment_version\x18\b \x01(\v23.temporal.api.deployment.v1.WorkerDeploymentVersionR\x15lastDeploymentVersion\x12(\n" + + "\x10start_request_id\x18\t \x01(\tR\x0estartRequestId\x1a\x80\x01\n" + + "\x12LastFailureDetails\x12.\n" + + "\x04time\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\x04time\x12:\n" + + "\afailure\x18\x02 \x01(\v2 .temporal.api.failure.v1.FailureR\afailure\"\xc9\x01\n" + + "\x16ActivityHeartbeatState\x12:\n" + + "\adetails\x18\x01 \x01(\v2 .temporal.api.common.v1.PayloadsR\adetails\x12?\n" + + "\rrecorded_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\frecordedTime\x122\n" + + "\x15total_heartbeat_count\x18\x03 \x01(\x03R\x13totalHeartbeatCount\"\xcd\x01\n" + + "\x13ActivityRequestData\x126\n" + + "\x05input\x18\x01 \x01(\v2 .temporal.api.common.v1.PayloadsR\x05input\x126\n" + + "\x06header\x18\x02 \x01(\v2\x1e.temporal.api.common.v1.HeaderR\x06header\x12F\n" + + "\ruser_metadata\x18\x03 \x01(\v2!.temporal.api.sdk.v1.UserMetadataR\fuserMetadata\"\xf4\x02\n" + + "\x0fActivityOutcome\x12i\n" + + "\n" + + "successful\x18\x01 \x01(\v2G.temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.SuccessfulH\x00R\n" + + "successful\x12]\n" + + "\x06failed\x18\x02 \x01(\v2C.temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.FailedH\x00R\x06failed\x1aF\n" + + "\n" + + "Successful\x128\n" + + "\x06output\x18\x01 \x01(\v2 .temporal.api.common.v1.PayloadsR\x06output\x1aD\n" + + "\x06Failed\x12:\n" + + "\afailure\x18\x01 \x01(\v2 .temporal.api.failure.v1.FailureR\afailureB\t\n" + + "\avariant*\x8e\x03\n" + + "\x17ActivityExecutionStatus\x12)\n" + + "%ACTIVITY_EXECUTION_STATUS_UNSPECIFIED\x10\x00\x12'\n" + + "#ACTIVITY_EXECUTION_STATUS_SCHEDULED\x10\x01\x12%\n" + + "!ACTIVITY_EXECUTION_STATUS_STARTED\x10\x02\x12.\n" + + "*ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED\x10\x03\x12'\n" + + "#ACTIVITY_EXECUTION_STATUS_COMPLETED\x10\x04\x12$\n" + + " ACTIVITY_EXECUTION_STATUS_FAILED\x10\x05\x12&\n" + + "\"ACTIVITY_EXECUTION_STATUS_CANCELED\x10\x06\x12(\n" + + "$ACTIVITY_EXECUTION_STATUS_TERMINATED\x10\a\x12'\n" + + "#ACTIVITY_EXECUTION_STATUS_TIMED_OUT\x10\bBDZBgo.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDescData +} + +var file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_goTypes = []any{ + (ActivityExecutionStatus)(0), // 0: temporal.server.chasm.lib.activity.proto.v1.ActivityExecutionStatus + (*ActivityState)(nil), // 1: temporal.server.chasm.lib.activity.proto.v1.ActivityState + (*ActivityCancelState)(nil), // 2: temporal.server.chasm.lib.activity.proto.v1.ActivityCancelState + (*ActivityTerminateState)(nil), // 3: temporal.server.chasm.lib.activity.proto.v1.ActivityTerminateState + (*ActivityAttemptState)(nil), // 4: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState + (*ActivityHeartbeatState)(nil), // 5: temporal.server.chasm.lib.activity.proto.v1.ActivityHeartbeatState + (*ActivityRequestData)(nil), // 6: temporal.server.chasm.lib.activity.proto.v1.ActivityRequestData + (*ActivityOutcome)(nil), // 7: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome + (*ActivityAttemptState_LastFailureDetails)(nil), // 8: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetails + (*ActivityOutcome_Successful)(nil), // 9: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Successful + (*ActivityOutcome_Failed)(nil), // 10: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Failed + (*v1.ActivityType)(nil), // 11: temporal.api.common.v1.ActivityType + (*v11.TaskQueue)(nil), // 12: temporal.api.taskqueue.v1.TaskQueue + (*durationpb.Duration)(nil), // 13: google.protobuf.Duration + (*v1.RetryPolicy)(nil), // 14: temporal.api.common.v1.RetryPolicy + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (*v1.Priority)(nil), // 16: temporal.api.common.v1.Priority + (*v12.WorkerDeploymentVersion)(nil), // 17: temporal.api.deployment.v1.WorkerDeploymentVersion + (*v1.Payloads)(nil), // 18: temporal.api.common.v1.Payloads + (*v1.Header)(nil), // 19: temporal.api.common.v1.Header + (*v13.UserMetadata)(nil), // 20: temporal.api.sdk.v1.UserMetadata + (*v14.Failure)(nil), // 21: temporal.api.failure.v1.Failure +} +var file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_depIdxs = []int32{ + 11, // 0: temporal.server.chasm.lib.activity.proto.v1.ActivityState.activity_type:type_name -> temporal.api.common.v1.ActivityType + 12, // 1: temporal.server.chasm.lib.activity.proto.v1.ActivityState.task_queue:type_name -> temporal.api.taskqueue.v1.TaskQueue + 13, // 2: temporal.server.chasm.lib.activity.proto.v1.ActivityState.schedule_to_close_timeout:type_name -> google.protobuf.Duration + 13, // 3: temporal.server.chasm.lib.activity.proto.v1.ActivityState.schedule_to_start_timeout:type_name -> google.protobuf.Duration + 13, // 4: temporal.server.chasm.lib.activity.proto.v1.ActivityState.start_to_close_timeout:type_name -> google.protobuf.Duration + 13, // 5: temporal.server.chasm.lib.activity.proto.v1.ActivityState.heartbeat_timeout:type_name -> google.protobuf.Duration + 14, // 6: temporal.server.chasm.lib.activity.proto.v1.ActivityState.retry_policy:type_name -> temporal.api.common.v1.RetryPolicy + 0, // 7: temporal.server.chasm.lib.activity.proto.v1.ActivityState.status:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityExecutionStatus + 15, // 8: temporal.server.chasm.lib.activity.proto.v1.ActivityState.schedule_time:type_name -> google.protobuf.Timestamp + 16, // 9: temporal.server.chasm.lib.activity.proto.v1.ActivityState.priority:type_name -> temporal.api.common.v1.Priority + 2, // 10: temporal.server.chasm.lib.activity.proto.v1.ActivityState.cancel_state:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityCancelState + 3, // 11: temporal.server.chasm.lib.activity.proto.v1.ActivityState.terminate_state:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityTerminateState + 13, // 12: temporal.server.chasm.lib.activity.proto.v1.ActivityState.start_delay:type_name -> google.protobuf.Duration + 15, // 13: temporal.server.chasm.lib.activity.proto.v1.ActivityCancelState.request_time:type_name -> google.protobuf.Timestamp + 13, // 14: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.current_retry_interval:type_name -> google.protobuf.Duration + 15, // 15: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.started_time:type_name -> google.protobuf.Timestamp + 15, // 16: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.complete_time:type_name -> google.protobuf.Timestamp + 8, // 17: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.last_failure_details:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetails + 17, // 18: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.last_deployment_version:type_name -> temporal.api.deployment.v1.WorkerDeploymentVersion + 18, // 19: temporal.server.chasm.lib.activity.proto.v1.ActivityHeartbeatState.details:type_name -> temporal.api.common.v1.Payloads + 15, // 20: temporal.server.chasm.lib.activity.proto.v1.ActivityHeartbeatState.recorded_time:type_name -> google.protobuf.Timestamp + 18, // 21: temporal.server.chasm.lib.activity.proto.v1.ActivityRequestData.input:type_name -> temporal.api.common.v1.Payloads + 19, // 22: temporal.server.chasm.lib.activity.proto.v1.ActivityRequestData.header:type_name -> temporal.api.common.v1.Header + 20, // 23: temporal.server.chasm.lib.activity.proto.v1.ActivityRequestData.user_metadata:type_name -> temporal.api.sdk.v1.UserMetadata + 9, // 24: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.successful:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Successful + 10, // 25: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.failed:type_name -> temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Failed + 15, // 26: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetails.time:type_name -> google.protobuf.Timestamp + 21, // 27: temporal.server.chasm.lib.activity.proto.v1.ActivityAttemptState.LastFailureDetails.failure:type_name -> temporal.api.failure.v1.Failure + 18, // 28: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Successful.output:type_name -> temporal.api.common.v1.Payloads + 21, // 29: temporal.server.chasm.lib.activity.proto.v1.ActivityOutcome.Failed.failure:type_name -> temporal.api.failure.v1.Failure + 30, // [30:30] is the sub-list for method output_type + 30, // [30:30] is the sub-list for method input_type + 30, // [30:30] is the sub-list for extension type_name + 30, // [30:30] is the sub-list for extension extendee + 0, // [0:30] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_init() } +func file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_init() { + if File_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto != nil { + return + } + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes[6].OneofWrappers = []any{ + (*ActivityOutcome_Successful_)(nil), + (*ActivityOutcome_Failed_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_rawDesc)), + NumEnums: 1, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_depIdxs, + EnumInfos: file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_enumTypes, + MessageInfos: file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto = out.File + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_goTypes = nil + file_temporal_server_chasm_lib_activity_proto_v1_activity_state_proto_depIdxs = nil +} diff --git a/chasm/lib/activity/gen/activitypb/v1/request_response.go-helpers.pb.go b/chasm/lib/activity/gen/activitypb/v1/request_response.go-helpers.pb.go new file mode 100644 index 00000000000..517287dc3e2 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/request_response.go-helpers.pb.go @@ -0,0 +1,450 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package activitypb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type StartActivityExecutionRequest to the protobuf v3 wire format +func (val *StartActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartActivityExecutionRequest from the protobuf v3 wire format +func (val *StartActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartActivityExecutionRequest + switch t := that.(type) { + case *StartActivityExecutionRequest: + that1 = t + case StartActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartActivityExecutionResponse to the protobuf v3 wire format +func (val *StartActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartActivityExecutionResponse from the protobuf v3 wire format +func (val *StartActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartActivityExecutionResponse + switch t := that.(type) { + case *StartActivityExecutionResponse: + that1 = t + case StartActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeActivityExecutionRequest to the protobuf v3 wire format +func (val *DescribeActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeActivityExecutionRequest from the protobuf v3 wire format +func (val *DescribeActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeActivityExecutionRequest + switch t := that.(type) { + case *DescribeActivityExecutionRequest: + that1 = t + case DescribeActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeActivityExecutionResponse to the protobuf v3 wire format +func (val *DescribeActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeActivityExecutionResponse from the protobuf v3 wire format +func (val *DescribeActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeActivityExecutionResponse + switch t := that.(type) { + case *DescribeActivityExecutionResponse: + that1 = t + case DescribeActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PollActivityExecutionRequest to the protobuf v3 wire format +func (val *PollActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollActivityExecutionRequest from the protobuf v3 wire format +func (val *PollActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollActivityExecutionRequest + switch t := that.(type) { + case *PollActivityExecutionRequest: + that1 = t + case PollActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PollActivityExecutionResponse to the protobuf v3 wire format +func (val *PollActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollActivityExecutionResponse from the protobuf v3 wire format +func (val *PollActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollActivityExecutionResponse + switch t := that.(type) { + case *PollActivityExecutionResponse: + that1 = t + case PollActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TerminateActivityExecutionRequest to the protobuf v3 wire format +func (val *TerminateActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TerminateActivityExecutionRequest from the protobuf v3 wire format +func (val *TerminateActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TerminateActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TerminateActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TerminateActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TerminateActivityExecutionRequest + switch t := that.(type) { + case *TerminateActivityExecutionRequest: + that1 = t + case TerminateActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TerminateActivityExecutionResponse to the protobuf v3 wire format +func (val *TerminateActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TerminateActivityExecutionResponse from the protobuf v3 wire format +func (val *TerminateActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TerminateActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TerminateActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TerminateActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TerminateActivityExecutionResponse + switch t := that.(type) { + case *TerminateActivityExecutionResponse: + that1 = t + case TerminateActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RequestCancelActivityExecutionRequest to the protobuf v3 wire format +func (val *RequestCancelActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RequestCancelActivityExecutionRequest from the protobuf v3 wire format +func (val *RequestCancelActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RequestCancelActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RequestCancelActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RequestCancelActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RequestCancelActivityExecutionRequest + switch t := that.(type) { + case *RequestCancelActivityExecutionRequest: + that1 = t + case RequestCancelActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RequestCancelActivityExecutionResponse to the protobuf v3 wire format +func (val *RequestCancelActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RequestCancelActivityExecutionResponse from the protobuf v3 wire format +func (val *RequestCancelActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RequestCancelActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RequestCancelActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RequestCancelActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RequestCancelActivityExecutionResponse + switch t := that.(type) { + case *RequestCancelActivityExecutionResponse: + that1 = t + case RequestCancelActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteActivityExecutionRequest to the protobuf v3 wire format +func (val *DeleteActivityExecutionRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteActivityExecutionRequest from the protobuf v3 wire format +func (val *DeleteActivityExecutionRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteActivityExecutionRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteActivityExecutionRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteActivityExecutionRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteActivityExecutionRequest + switch t := that.(type) { + case *DeleteActivityExecutionRequest: + that1 = t + case DeleteActivityExecutionRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteActivityExecutionResponse to the protobuf v3 wire format +func (val *DeleteActivityExecutionResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteActivityExecutionResponse from the protobuf v3 wire format +func (val *DeleteActivityExecutionResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteActivityExecutionResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteActivityExecutionResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteActivityExecutionResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteActivityExecutionResponse + switch t := that.(type) { + case *DeleteActivityExecutionResponse: + that1 = t + case DeleteActivityExecutionResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/activity/gen/activitypb/v1/request_response.pb.go b/chasm/lib/activity/gen/activitypb/v1/request_response.pb.go new file mode 100644 index 00000000000..0407199486e --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/request_response.pb.go @@ -0,0 +1,686 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/activity/proto/v1/request_response.proto + +package activitypb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/workflowservice/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StartActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.StartActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartActivityExecutionRequest) Reset() { + *x = StartActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartActivityExecutionRequest) ProtoMessage() {} + +func (x *StartActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*StartActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *StartActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *StartActivityExecutionRequest) GetFrontendRequest() *v1.StartActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type StartActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.StartActivityExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartActivityExecutionResponse) Reset() { + *x = StartActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartActivityExecutionResponse) ProtoMessage() {} + +func (x *StartActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*StartActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{1} +} + +func (x *StartActivityExecutionResponse) GetFrontendResponse() *v1.StartActivityExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type DescribeActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.DescribeActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeActivityExecutionRequest) Reset() { + *x = DescribeActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeActivityExecutionRequest) ProtoMessage() {} + +func (x *DescribeActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*DescribeActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{2} +} + +func (x *DescribeActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DescribeActivityExecutionRequest) GetFrontendRequest() *v1.DescribeActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type DescribeActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.DescribeActivityExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeActivityExecutionResponse) Reset() { + *x = DescribeActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeActivityExecutionResponse) ProtoMessage() {} + +func (x *DescribeActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*DescribeActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{3} +} + +func (x *DescribeActivityExecutionResponse) GetFrontendResponse() *v1.DescribeActivityExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type PollActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.PollActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollActivityExecutionRequest) Reset() { + *x = PollActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollActivityExecutionRequest) ProtoMessage() {} + +func (x *PollActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*PollActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{4} +} + +func (x *PollActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *PollActivityExecutionRequest) GetFrontendRequest() *v1.PollActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type PollActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.PollActivityExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollActivityExecutionResponse) Reset() { + *x = PollActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollActivityExecutionResponse) ProtoMessage() {} + +func (x *PollActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*PollActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{5} +} + +func (x *PollActivityExecutionResponse) GetFrontendResponse() *v1.PollActivityExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type TerminateActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.TerminateActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TerminateActivityExecutionRequest) Reset() { + *x = TerminateActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TerminateActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TerminateActivityExecutionRequest) ProtoMessage() {} + +func (x *TerminateActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TerminateActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*TerminateActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{6} +} + +func (x *TerminateActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *TerminateActivityExecutionRequest) GetFrontendRequest() *v1.TerminateActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type TerminateActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TerminateActivityExecutionResponse) Reset() { + *x = TerminateActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TerminateActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TerminateActivityExecutionResponse) ProtoMessage() {} + +func (x *TerminateActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TerminateActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*TerminateActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{7} +} + +type RequestCancelActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.RequestCancelActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestCancelActivityExecutionRequest) Reset() { + *x = RequestCancelActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestCancelActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestCancelActivityExecutionRequest) ProtoMessage() {} + +func (x *RequestCancelActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestCancelActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*RequestCancelActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{8} +} + +func (x *RequestCancelActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *RequestCancelActivityExecutionRequest) GetFrontendRequest() *v1.RequestCancelActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type RequestCancelActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestCancelActivityExecutionResponse) Reset() { + *x = RequestCancelActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestCancelActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestCancelActivityExecutionResponse) ProtoMessage() {} + +func (x *RequestCancelActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestCancelActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*RequestCancelActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{9} +} + +type DeleteActivityExecutionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.DeleteActivityExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteActivityExecutionRequest) Reset() { + *x = DeleteActivityExecutionRequest{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteActivityExecutionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteActivityExecutionRequest) ProtoMessage() {} + +func (x *DeleteActivityExecutionRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteActivityExecutionRequest.ProtoReflect.Descriptor instead. +func (*DeleteActivityExecutionRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{10} +} + +func (x *DeleteActivityExecutionRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DeleteActivityExecutionRequest) GetFrontendRequest() *v1.DeleteActivityExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type DeleteActivityExecutionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteActivityExecutionResponse) Reset() { + *x = DeleteActivityExecutionResponse{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteActivityExecutionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteActivityExecutionResponse) ProtoMessage() {} + +func (x *DeleteActivityExecutionResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteActivityExecutionResponse.ProtoReflect.Descriptor instead. +func (*DeleteActivityExecutionResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP(), []int{11} +} + +var File_temporal_server_chasm_lib_activity_proto_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc = "" + + "\n" + + "Btemporal/server/chasm/lib/activity/proto/v1/request_response.proto\x12+temporal.server.chasm.lib.activity.proto.v1\x1a6temporal/api/workflowservice/v1/request_response.proto\"\xad\x01\n" + + "\x1dStartActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12i\n" + + "\x10frontend_request\x18\x02 \x01(\v2>.temporal.api.workflowservice.v1.StartActivityExecutionRequestR\x0ffrontendRequest\"\x8e\x01\n" + + "\x1eStartActivityExecutionResponse\x12l\n" + + "\x11frontend_response\x18\x01 \x01(\v2?.temporal.api.workflowservice.v1.StartActivityExecutionResponseR\x10frontendResponse\"\xb3\x01\n" + + " DescribeActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12l\n" + + "\x10frontend_request\x18\x02 \x01(\v2A.temporal.api.workflowservice.v1.DescribeActivityExecutionRequestR\x0ffrontendRequest\"\x94\x01\n" + + "!DescribeActivityExecutionResponse\x12o\n" + + "\x11frontend_response\x18\x01 \x01(\v2B.temporal.api.workflowservice.v1.DescribeActivityExecutionResponseR\x10frontendResponse\"\xab\x01\n" + + "\x1cPollActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12h\n" + + "\x10frontend_request\x18\x02 \x01(\v2=.temporal.api.workflowservice.v1.PollActivityExecutionRequestR\x0ffrontendRequest\"\x8c\x01\n" + + "\x1dPollActivityExecutionResponse\x12k\n" + + "\x11frontend_response\x18\x01 \x01(\v2>.temporal.api.workflowservice.v1.PollActivityExecutionResponseR\x10frontendResponse\"\xb5\x01\n" + + "!TerminateActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12m\n" + + "\x10frontend_request\x18\x02 \x01(\v2B.temporal.api.workflowservice.v1.TerminateActivityExecutionRequestR\x0ffrontendRequest\"$\n" + + "\"TerminateActivityExecutionResponse\"\xbd\x01\n" + + "%RequestCancelActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12q\n" + + "\x10frontend_request\x18\x02 \x01(\v2F.temporal.api.workflowservice.v1.RequestCancelActivityExecutionRequestR\x0ffrontendRequest\"(\n" + + "&RequestCancelActivityExecutionResponse\"\xaf\x01\n" + + "\x1eDeleteActivityExecutionRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12j\n" + + "\x10frontend_request\x18\x02 \x01(\v2?.temporal.api.workflowservice.v1.DeleteActivityExecutionRequestR\x0ffrontendRequest\"!\n" + + "\x1fDeleteActivityExecutionResponseBDZBgo.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDescData +} + +var file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_goTypes = []any{ + (*StartActivityExecutionRequest)(nil), // 0: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest + (*StartActivityExecutionResponse)(nil), // 1: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse + (*DescribeActivityExecutionRequest)(nil), // 2: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest + (*DescribeActivityExecutionResponse)(nil), // 3: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse + (*PollActivityExecutionRequest)(nil), // 4: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest + (*PollActivityExecutionResponse)(nil), // 5: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse + (*TerminateActivityExecutionRequest)(nil), // 6: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest + (*TerminateActivityExecutionResponse)(nil), // 7: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionResponse + (*RequestCancelActivityExecutionRequest)(nil), // 8: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest + (*RequestCancelActivityExecutionResponse)(nil), // 9: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionResponse + (*DeleteActivityExecutionRequest)(nil), // 10: temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionRequest + (*DeleteActivityExecutionResponse)(nil), // 11: temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionResponse + (*v1.StartActivityExecutionRequest)(nil), // 12: temporal.api.workflowservice.v1.StartActivityExecutionRequest + (*v1.StartActivityExecutionResponse)(nil), // 13: temporal.api.workflowservice.v1.StartActivityExecutionResponse + (*v1.DescribeActivityExecutionRequest)(nil), // 14: temporal.api.workflowservice.v1.DescribeActivityExecutionRequest + (*v1.DescribeActivityExecutionResponse)(nil), // 15: temporal.api.workflowservice.v1.DescribeActivityExecutionResponse + (*v1.PollActivityExecutionRequest)(nil), // 16: temporal.api.workflowservice.v1.PollActivityExecutionRequest + (*v1.PollActivityExecutionResponse)(nil), // 17: temporal.api.workflowservice.v1.PollActivityExecutionResponse + (*v1.TerminateActivityExecutionRequest)(nil), // 18: temporal.api.workflowservice.v1.TerminateActivityExecutionRequest + (*v1.RequestCancelActivityExecutionRequest)(nil), // 19: temporal.api.workflowservice.v1.RequestCancelActivityExecutionRequest + (*v1.DeleteActivityExecutionRequest)(nil), // 20: temporal.api.workflowservice.v1.DeleteActivityExecutionRequest +} +var file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_depIdxs = []int32{ + 12, // 0: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.StartActivityExecutionRequest + 13, // 1: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.StartActivityExecutionResponse + 14, // 2: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.DescribeActivityExecutionRequest + 15, // 3: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.DescribeActivityExecutionResponse + 16, // 4: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.PollActivityExecutionRequest + 17, // 5: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.PollActivityExecutionResponse + 18, // 6: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.TerminateActivityExecutionRequest + 19, // 7: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.RequestCancelActivityExecutionRequest + 20, // 8: temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.DeleteActivityExecutionRequest + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_init() } +func file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_init() { + if File_temporal_server_chasm_lib_activity_proto_v1_request_response_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_activity_proto_v1_request_response_proto = out.File + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_goTypes = nil + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_depIdxs = nil +} diff --git a/chasm/lib/activity/gen/activitypb/v1/service.pb.go b/chasm/lib/activity/gen/activitypb/v1/service.pb.go new file mode 100644 index 00000000000..2bca0c6046c --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/service.pb.go @@ -0,0 +1,96 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/activity/proto/v1/service.proto + +package activitypb + +import ( + reflect "reflect" + unsafe "unsafe" + + _ "go.temporal.io/server/api/common/v1" + _ "go.temporal.io/server/api/routing/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_temporal_server_chasm_lib_activity_proto_v1_service_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_activity_proto_v1_service_proto_rawDesc = "" + + "\n" + + "9temporal/server/chasm/lib/activity/proto/v1/service.proto\x12+temporal.server.chasm.lib.activity.proto.v1\x1aBtemporal/server/chasm/lib/activity/proto/v1/request_response.proto\x1a0temporal/server/api/common/v1/api_category.proto\x1a.temporal/server/api/routing/v1/extension.proto2\xf2\n" + + "\n" + + "\x0fActivityService\x12\xdb\x01\n" + + "\x16StartActivityExecution\x12J.temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest\x1aK.temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.activity_id\x12\xe4\x01\n" + + "\x19DescribeActivityExecution\x12M.temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest\x1aN.temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.activity_id\x12\xd8\x01\n" + + "\x15PollActivityExecution\x12I.temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest\x1aJ.temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse\"(\x8a\xb5\x18\x02\b\x02\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.activity_id\x12\xe7\x01\n" + + "\x1aTerminateActivityExecution\x12N.temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest\x1aO.temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.activity_id\x12\xf3\x01\n" + + "\x1eRequestCancelActivityExecution\x12R.temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest\x1aS.temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.activity_id\x12\xde\x01\n" + + "\x17DeleteActivityExecution\x12K.temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionRequest\x1aL.temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.activity_idBDZBgo.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypbb\x06proto3" + +var file_temporal_server_chasm_lib_activity_proto_v1_service_proto_goTypes = []any{ + (*StartActivityExecutionRequest)(nil), // 0: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest + (*DescribeActivityExecutionRequest)(nil), // 1: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest + (*PollActivityExecutionRequest)(nil), // 2: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest + (*TerminateActivityExecutionRequest)(nil), // 3: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest + (*RequestCancelActivityExecutionRequest)(nil), // 4: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest + (*DeleteActivityExecutionRequest)(nil), // 5: temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionRequest + (*StartActivityExecutionResponse)(nil), // 6: temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse + (*DescribeActivityExecutionResponse)(nil), // 7: temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse + (*PollActivityExecutionResponse)(nil), // 8: temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse + (*TerminateActivityExecutionResponse)(nil), // 9: temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionResponse + (*RequestCancelActivityExecutionResponse)(nil), // 10: temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionResponse + (*DeleteActivityExecutionResponse)(nil), // 11: temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionResponse +} +var file_temporal_server_chasm_lib_activity_proto_v1_service_proto_depIdxs = []int32{ + 0, // 0: temporal.server.chasm.lib.activity.proto.v1.ActivityService.StartActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionRequest + 1, // 1: temporal.server.chasm.lib.activity.proto.v1.ActivityService.DescribeActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionRequest + 2, // 2: temporal.server.chasm.lib.activity.proto.v1.ActivityService.PollActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionRequest + 3, // 3: temporal.server.chasm.lib.activity.proto.v1.ActivityService.TerminateActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionRequest + 4, // 4: temporal.server.chasm.lib.activity.proto.v1.ActivityService.RequestCancelActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionRequest + 5, // 5: temporal.server.chasm.lib.activity.proto.v1.ActivityService.DeleteActivityExecution:input_type -> temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionRequest + 6, // 6: temporal.server.chasm.lib.activity.proto.v1.ActivityService.StartActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.StartActivityExecutionResponse + 7, // 7: temporal.server.chasm.lib.activity.proto.v1.ActivityService.DescribeActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.DescribeActivityExecutionResponse + 8, // 8: temporal.server.chasm.lib.activity.proto.v1.ActivityService.PollActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.PollActivityExecutionResponse + 9, // 9: temporal.server.chasm.lib.activity.proto.v1.ActivityService.TerminateActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.TerminateActivityExecutionResponse + 10, // 10: temporal.server.chasm.lib.activity.proto.v1.ActivityService.RequestCancelActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.RequestCancelActivityExecutionResponse + 11, // 11: temporal.server.chasm.lib.activity.proto.v1.ActivityService.DeleteActivityExecution:output_type -> temporal.server.chasm.lib.activity.proto.v1.DeleteActivityExecutionResponse + 6, // [6:12] is the sub-list for method output_type + 0, // [0:6] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_activity_proto_v1_service_proto_init() } +func file_temporal_server_chasm_lib_activity_proto_v1_service_proto_init() { + if File_temporal_server_chasm_lib_activity_proto_v1_service_proto != nil { + return + } + file_temporal_server_chasm_lib_activity_proto_v1_request_response_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_service_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_temporal_server_chasm_lib_activity_proto_v1_service_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_activity_proto_v1_service_proto_depIdxs, + }.Build() + File_temporal_server_chasm_lib_activity_proto_v1_service_proto = out.File + file_temporal_server_chasm_lib_activity_proto_v1_service_proto_goTypes = nil + file_temporal_server_chasm_lib_activity_proto_v1_service_proto_depIdxs = nil +} diff --git a/chasm/lib/activity/gen/activitypb/v1/service_client.pb.go b/chasm/lib/activity/gen/activitypb/v1/service_client.pb.go new file mode 100644 index 00000000000..b1d80f018f1 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/service_client.pb.go @@ -0,0 +1,318 @@ +// Code generated by protoc-gen-go-chasm. DO NOT EDIT. +package activitypb + +import ( + "context" + "time" + + "go.temporal.io/server/client/history" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives" + "google.golang.org/grpc" +) + +// ActivityServiceLayeredClient is a client for ActivityService. +type ActivityServiceLayeredClient struct { + metricsHandler metrics.Handler + numShards int32 + redirector history.Redirector[ActivityServiceClient] + retryPolicy backoff.RetryPolicy +} + +// NewActivityServiceLayeredClient initializes a new ActivityServiceLayeredClient. +func NewActivityServiceLayeredClient( + dc *dynamicconfig.Collection, + rpcFactory common.RPCFactory, + monitor membership.Monitor, + config *config.Persistence, + logger log.Logger, + metricsHandler metrics.Handler, +) (ActivityServiceClient, error) { + resolver, err := monitor.GetResolver(primitives.HistoryService) + if err != nil { + return nil, err + } + connections := history.NewConnectionPool(resolver, rpcFactory, NewActivityServiceClient) + var redirector history.Redirector[ActivityServiceClient] + if dynamicconfig.HistoryClientOwnershipCachingEnabled.Get(dc)() { + redirector = history.NewCachingRedirector( + connections, + resolver, + logger, + dynamicconfig.HistoryClientOwnershipCachingStaleTTL.Get(dc), + ) + } else { + redirector = history.NewBasicRedirector(connections, resolver) + } + return &ActivityServiceLayeredClient{ + metricsHandler: metricsHandler, + redirector: redirector, + numShards: config.NumHistoryShards, + retryPolicy: common.CreateHistoryClientRetryPolicy(), + }, nil +} +func (c *ActivityServiceLayeredClient) callStartActivityExecutionNoRetry( + ctx context.Context, + request *StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (*StartActivityExecutionResponse, error) { + var response *StartActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.StartActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.StartActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) StartActivityExecution( + ctx context.Context, + request *StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (*StartActivityExecutionResponse, error) { + call := func(ctx context.Context) (*StartActivityExecutionResponse, error) { + return c.callStartActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callDescribeActivityExecutionNoRetry( + ctx context.Context, + request *DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (*DescribeActivityExecutionResponse, error) { + var response *DescribeActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.DescribeActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.DescribeActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) DescribeActivityExecution( + ctx context.Context, + request *DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (*DescribeActivityExecutionResponse, error) { + call := func(ctx context.Context) (*DescribeActivityExecutionResponse, error) { + return c.callDescribeActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callPollActivityExecutionNoRetry( + ctx context.Context, + request *PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (*PollActivityExecutionResponse, error) { + var response *PollActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.PollActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.PollActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) PollActivityExecution( + ctx context.Context, + request *PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (*PollActivityExecutionResponse, error) { + call := func(ctx context.Context) (*PollActivityExecutionResponse, error) { + return c.callPollActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callTerminateActivityExecutionNoRetry( + ctx context.Context, + request *TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (*TerminateActivityExecutionResponse, error) { + var response *TerminateActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.TerminateActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.TerminateActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) TerminateActivityExecution( + ctx context.Context, + request *TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (*TerminateActivityExecutionResponse, error) { + call := func(ctx context.Context) (*TerminateActivityExecutionResponse, error) { + return c.callTerminateActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callRequestCancelActivityExecutionNoRetry( + ctx context.Context, + request *RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (*RequestCancelActivityExecutionResponse, error) { + var response *RequestCancelActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.RequestCancelActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.RequestCancelActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) RequestCancelActivityExecution( + ctx context.Context, + request *RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (*RequestCancelActivityExecutionResponse, error) { + call := func(ctx context.Context) (*RequestCancelActivityExecutionResponse, error) { + return c.callRequestCancelActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *ActivityServiceLayeredClient) callDeleteActivityExecutionNoRetry( + ctx context.Context, + request *DeleteActivityExecutionRequest, + opts ...grpc.CallOption, +) (*DeleteActivityExecutionResponse, error) { + var response *DeleteActivityExecutionResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("ActivityService.DeleteActivityExecution"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetActivityId(), c.numShards) + op := func(ctx context.Context, client ActivityServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.DeleteActivityExecution(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *ActivityServiceLayeredClient) DeleteActivityExecution( + ctx context.Context, + request *DeleteActivityExecutionRequest, + opts ...grpc.CallOption, +) (*DeleteActivityExecutionResponse, error) { + call := func(ctx context.Context) (*DeleteActivityExecutionResponse, error) { + return c.callDeleteActivityExecutionNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} diff --git a/chasm/lib/activity/gen/activitypb/v1/service_grpc.pb.go b/chasm/lib/activity/gen/activitypb/v1/service_grpc.pb.go new file mode 100644 index 00000000000..f02184fbd40 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/service_grpc.pb.go @@ -0,0 +1,295 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// plugins: +// - protoc-gen-go-grpc +// - protoc +// source: temporal/server/chasm/lib/activity/proto/v1/service.proto + +package activitypb + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + ActivityService_StartActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/StartActivityExecution" + ActivityService_DescribeActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/DescribeActivityExecution" + ActivityService_PollActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/PollActivityExecution" + ActivityService_TerminateActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/TerminateActivityExecution" + ActivityService_RequestCancelActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/RequestCancelActivityExecution" + ActivityService_DeleteActivityExecution_FullMethodName = "/temporal.server.chasm.lib.activity.proto.v1.ActivityService/DeleteActivityExecution" +) + +// ActivityServiceClient is the client API for ActivityService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ActivityServiceClient interface { + StartActivityExecution(ctx context.Context, in *StartActivityExecutionRequest, opts ...grpc.CallOption) (*StartActivityExecutionResponse, error) + DescribeActivityExecution(ctx context.Context, in *DescribeActivityExecutionRequest, opts ...grpc.CallOption) (*DescribeActivityExecutionResponse, error) + PollActivityExecution(ctx context.Context, in *PollActivityExecutionRequest, opts ...grpc.CallOption) (*PollActivityExecutionResponse, error) + TerminateActivityExecution(ctx context.Context, in *TerminateActivityExecutionRequest, opts ...grpc.CallOption) (*TerminateActivityExecutionResponse, error) + RequestCancelActivityExecution(ctx context.Context, in *RequestCancelActivityExecutionRequest, opts ...grpc.CallOption) (*RequestCancelActivityExecutionResponse, error) + DeleteActivityExecution(ctx context.Context, in *DeleteActivityExecutionRequest, opts ...grpc.CallOption) (*DeleteActivityExecutionResponse, error) +} + +type activityServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewActivityServiceClient(cc grpc.ClientConnInterface) ActivityServiceClient { + return &activityServiceClient{cc} +} + +func (c *activityServiceClient) StartActivityExecution(ctx context.Context, in *StartActivityExecutionRequest, opts ...grpc.CallOption) (*StartActivityExecutionResponse, error) { + out := new(StartActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_StartActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) DescribeActivityExecution(ctx context.Context, in *DescribeActivityExecutionRequest, opts ...grpc.CallOption) (*DescribeActivityExecutionResponse, error) { + out := new(DescribeActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_DescribeActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) PollActivityExecution(ctx context.Context, in *PollActivityExecutionRequest, opts ...grpc.CallOption) (*PollActivityExecutionResponse, error) { + out := new(PollActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_PollActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) TerminateActivityExecution(ctx context.Context, in *TerminateActivityExecutionRequest, opts ...grpc.CallOption) (*TerminateActivityExecutionResponse, error) { + out := new(TerminateActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_TerminateActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) RequestCancelActivityExecution(ctx context.Context, in *RequestCancelActivityExecutionRequest, opts ...grpc.CallOption) (*RequestCancelActivityExecutionResponse, error) { + out := new(RequestCancelActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_RequestCancelActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *activityServiceClient) DeleteActivityExecution(ctx context.Context, in *DeleteActivityExecutionRequest, opts ...grpc.CallOption) (*DeleteActivityExecutionResponse, error) { + out := new(DeleteActivityExecutionResponse) + err := c.cc.Invoke(ctx, ActivityService_DeleteActivityExecution_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ActivityServiceServer is the server API for ActivityService service. +// All implementations must embed UnimplementedActivityServiceServer +// for forward compatibility +type ActivityServiceServer interface { + StartActivityExecution(context.Context, *StartActivityExecutionRequest) (*StartActivityExecutionResponse, error) + DescribeActivityExecution(context.Context, *DescribeActivityExecutionRequest) (*DescribeActivityExecutionResponse, error) + PollActivityExecution(context.Context, *PollActivityExecutionRequest) (*PollActivityExecutionResponse, error) + TerminateActivityExecution(context.Context, *TerminateActivityExecutionRequest) (*TerminateActivityExecutionResponse, error) + RequestCancelActivityExecution(context.Context, *RequestCancelActivityExecutionRequest) (*RequestCancelActivityExecutionResponse, error) + DeleteActivityExecution(context.Context, *DeleteActivityExecutionRequest) (*DeleteActivityExecutionResponse, error) + mustEmbedUnimplementedActivityServiceServer() +} + +// UnimplementedActivityServiceServer must be embedded to have forward compatible implementations. +type UnimplementedActivityServiceServer struct { +} + +func (UnimplementedActivityServiceServer) StartActivityExecution(context.Context, *StartActivityExecutionRequest) (*StartActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) DescribeActivityExecution(context.Context, *DescribeActivityExecutionRequest) (*DescribeActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) PollActivityExecution(context.Context, *PollActivityExecutionRequest) (*PollActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PollActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) TerminateActivityExecution(context.Context, *TerminateActivityExecutionRequest) (*TerminateActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TerminateActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) RequestCancelActivityExecution(context.Context, *RequestCancelActivityExecutionRequest) (*RequestCancelActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RequestCancelActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) DeleteActivityExecution(context.Context, *DeleteActivityExecutionRequest) (*DeleteActivityExecutionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteActivityExecution not implemented") +} +func (UnimplementedActivityServiceServer) mustEmbedUnimplementedActivityServiceServer() {} + +// UnsafeActivityServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ActivityServiceServer will +// result in compilation errors. +type UnsafeActivityServiceServer interface { + mustEmbedUnimplementedActivityServiceServer() +} + +func RegisterActivityServiceServer(s grpc.ServiceRegistrar, srv ActivityServiceServer) { + s.RegisterService(&ActivityService_ServiceDesc, srv) +} + +func _ActivityService_StartActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).StartActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_StartActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).StartActivityExecution(ctx, req.(*StartActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_DescribeActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).DescribeActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_DescribeActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).DescribeActivityExecution(ctx, req.(*DescribeActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_PollActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PollActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).PollActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_PollActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).PollActivityExecution(ctx, req.(*PollActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_TerminateActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TerminateActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).TerminateActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_TerminateActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).TerminateActivityExecution(ctx, req.(*TerminateActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_RequestCancelActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCancelActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).RequestCancelActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_RequestCancelActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).RequestCancelActivityExecution(ctx, req.(*RequestCancelActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ActivityService_DeleteActivityExecution_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteActivityExecutionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ActivityServiceServer).DeleteActivityExecution(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: ActivityService_DeleteActivityExecution_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ActivityServiceServer).DeleteActivityExecution(ctx, req.(*DeleteActivityExecutionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ActivityService_ServiceDesc is the grpc.ServiceDesc for ActivityService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ActivityService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "temporal.server.chasm.lib.activity.proto.v1.ActivityService", + HandlerType: (*ActivityServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "StartActivityExecution", + Handler: _ActivityService_StartActivityExecution_Handler, + }, + { + MethodName: "DescribeActivityExecution", + Handler: _ActivityService_DescribeActivityExecution_Handler, + }, + { + MethodName: "PollActivityExecution", + Handler: _ActivityService_PollActivityExecution_Handler, + }, + { + MethodName: "TerminateActivityExecution", + Handler: _ActivityService_TerminateActivityExecution_Handler, + }, + { + MethodName: "RequestCancelActivityExecution", + Handler: _ActivityService_RequestCancelActivityExecution_Handler, + }, + { + MethodName: "DeleteActivityExecution", + Handler: _ActivityService_DeleteActivityExecution_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "temporal/server/chasm/lib/activity/proto/v1/service.proto", +} diff --git a/chasm/lib/activity/gen/activitypb/v1/tasks.go-helpers.pb.go b/chasm/lib/activity/gen/activitypb/v1/tasks.go-helpers.pb.go new file mode 100644 index 00000000000..d7628a6e9e6 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/tasks.go-helpers.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package activitypb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ActivityDispatchTask to the protobuf v3 wire format +func (val *ActivityDispatchTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ActivityDispatchTask from the protobuf v3 wire format +func (val *ActivityDispatchTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ActivityDispatchTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ActivityDispatchTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ActivityDispatchTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ActivityDispatchTask + switch t := that.(type) { + case *ActivityDispatchTask: + that1 = t + case ActivityDispatchTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ScheduleToStartTimeoutTask to the protobuf v3 wire format +func (val *ScheduleToStartTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ScheduleToStartTimeoutTask from the protobuf v3 wire format +func (val *ScheduleToStartTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ScheduleToStartTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ScheduleToStartTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ScheduleToStartTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ScheduleToStartTimeoutTask + switch t := that.(type) { + case *ScheduleToStartTimeoutTask: + that1 = t + case ScheduleToStartTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ScheduleToCloseTimeoutTask to the protobuf v3 wire format +func (val *ScheduleToCloseTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ScheduleToCloseTimeoutTask from the protobuf v3 wire format +func (val *ScheduleToCloseTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ScheduleToCloseTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ScheduleToCloseTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ScheduleToCloseTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ScheduleToCloseTimeoutTask + switch t := that.(type) { + case *ScheduleToCloseTimeoutTask: + that1 = t + case ScheduleToCloseTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartToCloseTimeoutTask to the protobuf v3 wire format +func (val *StartToCloseTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartToCloseTimeoutTask from the protobuf v3 wire format +func (val *StartToCloseTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartToCloseTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartToCloseTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartToCloseTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartToCloseTimeoutTask + switch t := that.(type) { + case *StartToCloseTimeoutTask: + that1 = t + case StartToCloseTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type HeartbeatTimeoutTask to the protobuf v3 wire format +func (val *HeartbeatTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type HeartbeatTimeoutTask from the protobuf v3 wire format +func (val *HeartbeatTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *HeartbeatTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two HeartbeatTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *HeartbeatTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *HeartbeatTimeoutTask + switch t := that.(type) { + case *HeartbeatTimeoutTask: + that1 = t + case HeartbeatTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/activity/gen/activitypb/v1/tasks.pb.go b/chasm/lib/activity/gen/activitypb/v1/tasks.pb.go new file mode 100644 index 00000000000..796574e7db2 --- /dev/null +++ b/chasm/lib/activity/gen/activitypb/v1/tasks.pb.go @@ -0,0 +1,307 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/activity/proto/v1/tasks.proto + +package activitypb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ActivityDispatchTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current stamp for this activity execution. Used for task validation. See also [ActivityAttemptState]. + Stamp int32 `protobuf:"varint,1,opt,name=stamp,proto3" json:"stamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ActivityDispatchTask) Reset() { + *x = ActivityDispatchTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ActivityDispatchTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ActivityDispatchTask) ProtoMessage() {} + +func (x *ActivityDispatchTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ActivityDispatchTask.ProtoReflect.Descriptor instead. +func (*ActivityDispatchTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{0} +} + +func (x *ActivityDispatchTask) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +type ScheduleToStartTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current stamp for this activity execution. Used for task validation. See also [ActivityAttemptState]. + Stamp int32 `protobuf:"varint,1,opt,name=stamp,proto3" json:"stamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScheduleToStartTimeoutTask) Reset() { + *x = ScheduleToStartTimeoutTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScheduleToStartTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScheduleToStartTimeoutTask) ProtoMessage() {} + +func (x *ScheduleToStartTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScheduleToStartTimeoutTask.ProtoReflect.Descriptor instead. +func (*ScheduleToStartTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{1} +} + +func (x *ScheduleToStartTimeoutTask) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +type ScheduleToCloseTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScheduleToCloseTimeoutTask) Reset() { + *x = ScheduleToCloseTimeoutTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScheduleToCloseTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScheduleToCloseTimeoutTask) ProtoMessage() {} + +func (x *ScheduleToCloseTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScheduleToCloseTimeoutTask.ProtoReflect.Descriptor instead. +func (*ScheduleToCloseTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{2} +} + +type StartToCloseTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current stamp for this activity execution. Used for task validation. See also [ActivityAttemptState]. + Stamp int32 `protobuf:"varint,1,opt,name=stamp,proto3" json:"stamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartToCloseTimeoutTask) Reset() { + *x = StartToCloseTimeoutTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartToCloseTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartToCloseTimeoutTask) ProtoMessage() {} + +func (x *StartToCloseTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartToCloseTimeoutTask.ProtoReflect.Descriptor instead. +func (*StartToCloseTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{3} +} + +func (x *StartToCloseTimeoutTask) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +// HeartbeatTimeoutTask is a pure task that enforces heartbeat timeouts. +type HeartbeatTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current stamp for this activity execution. Used for task validation. See also [ActivityAttemptState]. + Stamp int32 `protobuf:"varint,1,opt,name=stamp,proto3" json:"stamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HeartbeatTimeoutTask) Reset() { + *x = HeartbeatTimeoutTask{} + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HeartbeatTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeartbeatTimeoutTask) ProtoMessage() {} + +func (x *HeartbeatTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeartbeatTimeoutTask.ProtoReflect.Descriptor instead. +func (*HeartbeatTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP(), []int{4} +} + +func (x *HeartbeatTimeoutTask) GetStamp() int32 { + if x != nil { + return x.Stamp + } + return 0 +} + +var File_temporal_server_chasm_lib_activity_proto_v1_tasks_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc = "" + + "\n" + + "7temporal/server/chasm/lib/activity/proto/v1/tasks.proto\x12+temporal.server.chasm.lib.activity.proto.v1\",\n" + + "\x14ActivityDispatchTask\x12\x14\n" + + "\x05stamp\x18\x01 \x01(\x05R\x05stamp\"2\n" + + "\x1aScheduleToStartTimeoutTask\x12\x14\n" + + "\x05stamp\x18\x01 \x01(\x05R\x05stamp\"\x1c\n" + + "\x1aScheduleToCloseTimeoutTask\"/\n" + + "\x17StartToCloseTimeoutTask\x12\x14\n" + + "\x05stamp\x18\x01 \x01(\x05R\x05stamp\",\n" + + "\x14HeartbeatTimeoutTask\x12\x14\n" + + "\x05stamp\x18\x01 \x01(\x05R\x05stampBDZBgo.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDescData +} + +var file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_goTypes = []any{ + (*ActivityDispatchTask)(nil), // 0: temporal.server.chasm.lib.activity.proto.v1.ActivityDispatchTask + (*ScheduleToStartTimeoutTask)(nil), // 1: temporal.server.chasm.lib.activity.proto.v1.ScheduleToStartTimeoutTask + (*ScheduleToCloseTimeoutTask)(nil), // 2: temporal.server.chasm.lib.activity.proto.v1.ScheduleToCloseTimeoutTask + (*StartToCloseTimeoutTask)(nil), // 3: temporal.server.chasm.lib.activity.proto.v1.StartToCloseTimeoutTask + (*HeartbeatTimeoutTask)(nil), // 4: temporal.server.chasm.lib.activity.proto.v1.HeartbeatTimeoutTask +} +var file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_init() } +func file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_init() { + if File_temporal_server_chasm_lib_activity_proto_v1_tasks_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_rawDesc)), + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_activity_proto_v1_tasks_proto = out.File + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_goTypes = nil + file_temporal_server_chasm_lib_activity_proto_v1_tasks_proto_depIdxs = nil +} diff --git a/chasm/lib/activity/handler.go b/chasm/lib/activity/handler.go new file mode 100644 index 00000000000..378776ebca4 --- /dev/null +++ b/chasm/lib/activity/handler.go @@ -0,0 +1,334 @@ +package activity + +import ( + "context" + "errors" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/contextutil" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" +) + +var ( + businessIDReusePolicyMap = map[enumspb.ActivityIdReusePolicy]chasm.BusinessIDReusePolicy{ + enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE: chasm.BusinessIDReusePolicyAllowDuplicate, + enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY: chasm.BusinessIDReusePolicyAllowDuplicateFailedOnly, + enumspb.ACTIVITY_ID_REUSE_POLICY_REJECT_DUPLICATE: chasm.BusinessIDReusePolicyRejectDuplicate, + } + + businessIDConflictPolicyMap = map[enumspb.ActivityIdConflictPolicy]chasm.BusinessIDConflictPolicy{ + enumspb.ACTIVITY_ID_CONFLICT_POLICY_FAIL: chasm.BusinessIDConflictPolicyFail, + enumspb.ACTIVITY_ID_CONFLICT_POLICY_USE_EXISTING: chasm.BusinessIDConflictPolicyUseExisting, + } +) + +type handler struct { + activitypb.UnimplementedActivityServiceServer + config *Config + logger log.Logger + metricsHandler metrics.Handler + namespaceRegistry namespace.Registry +} + +func newHandler(config *Config, metricsHandler metrics.Handler, logger log.Logger, namespaceRegistry namespace.Registry) *handler { + return &handler{ + config: config, + logger: logger, + metricsHandler: metricsHandler, + namespaceRegistry: namespaceRegistry, + } +} + +// StartActivityExecution schedules an activity execution. Note that while external callers refer to +// this as "start", the start transition in fact happens later, in response to the activity task in +// matching being delivered to a worker poll request. +func (h *handler) StartActivityExecution(ctx context.Context, req *activitypb.StartActivityExecutionRequest) (*activitypb.StartActivityExecutionResponse, error) { + frontendReq := req.GetFrontendRequest() + + reusePolicy, ok := businessIDReusePolicyMap[frontendReq.GetIdReusePolicy()] + if !ok { + return nil, serviceerror.NewInvalidArgumentf("unsupported ID reuse policy: %v", frontendReq.GetIdReusePolicy()) + } + + conflictPolicy, ok := businessIDConflictPolicyMap[frontendReq.GetIdConflictPolicy()] + if !ok { + return nil, serviceerror.NewInvalidArgumentf("unsupported ID conflict policy: %v", frontendReq.GetIdConflictPolicy()) + } + + maxCallbacks := h.config.MaxCallbacksPerExecution(frontendReq.GetNamespace()) + + result, err := chasm.StartExecution( + ctx, + chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: frontendReq.GetActivityId(), + }, + func(mutableContext chasm.MutableContext, request *workflowservice.StartActivityExecutionRequest) (*Activity, error) { + newActivity, err := NewStandaloneActivity(mutableContext, request) + if err != nil { + return nil, err + } + + if cbs := request.GetCompletionCallbacks(); len(cbs) > 0 { + if err := newActivity.addCompletionCallbacks(mutableContext, request.GetRequestId(), cbs, maxCallbacks); err != nil { + return nil, err + } + } + + err = TransitionScheduled.Apply(newActivity, mutableContext, nil) + if err != nil { + return nil, err + } + + return newActivity, nil + }, + frontendReq, + chasm.WithRequestID(frontendReq.GetRequestId()), + chasm.WithBusinessIDPolicy(reusePolicy, conflictPolicy), + ) + + if err != nil { + var alreadyStartedErr *chasm.ExecutionAlreadyStartedError + if errors.As(err, &alreadyStartedErr) { + return nil, serviceerror.NewActivityExecutionAlreadyStarted("activity execution already started", alreadyStartedErr.CurrentRequestID, alreadyStartedErr.CurrentRunID) + } + + return nil, err + } + + // Attach callbacks to an existing activity when on_conflict_options.attach_completion_callbacks is set. + // TODO: Use chasm.UpdateWithStartExecution to avoid a second transaction once the engine supports BusinessIDConflictPolicyFail in the updateFn path. + cbs := frontendReq.GetCompletionCallbacks() + if !result.Created && frontendReq.GetOnConflictOptions().GetAttachCompletionCallbacks() && len(cbs) > 0 { + requestID := frontendReq.GetRequestId() + ref := chasm.NewComponentRef[*Activity](result.ExecutionKey) + _, _, err := chasm.UpdateComponent( + ctx, + ref, + func(a *Activity, ctx chasm.MutableContext, _ any) (any, error) { + return nil, a.addCompletionCallbacks(ctx, requestID, cbs, maxCallbacks) + }, + nil, + ) + if err != nil { + return nil, err + } + } + + return &activitypb.StartActivityExecutionResponse{ + FrontendResponse: &workflowservice.StartActivityExecutionResponse{ + RunId: result.ExecutionKey.RunID, + Started: result.Created, + Link: &commonpb.Link{ + Variant: &commonpb.Link_Activity_{ + Activity: &commonpb.Link_Activity{ + Namespace: frontendReq.GetNamespace(), + ActivityId: frontendReq.GetActivityId(), + RunId: result.ExecutionKey.RunID, + }, + }, + }, + // EagerTask: TODO when supported, need to call the same code that would handle the HandleStarted API + }, + }, nil +} + +// DescribeActivityExecution queries current activity state, optionally as a long-poll that waits +// for any state change. When used to long-poll, it returns an empty non-error response on context +// deadline expiry, to indicate that the state being waited for was not reached. Callers should +// interpret this as an invitation to resubmit their long-poll request. This response is sent before +// the caller's deadline (see chasm.activity.longPollBuffer) so that it is likely that the caller +// does indeed receive the non-error response. +func (h *handler) DescribeActivityExecution( + ctx context.Context, + req *activitypb.DescribeActivityExecutionRequest, +) (response *activitypb.DescribeActivityExecutionResponse, err error) { + ref := chasm.NewComponentRef[*Activity](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetActivityId(), + RunID: req.GetFrontendRequest().GetRunId(), + }) + + token := req.GetFrontendRequest().GetLongPollToken() + if len(token) == 0 { + return chasm.ReadComponent(ctx, ref, (*Activity).buildDescribeActivityExecutionResponse, req) + } + + // Below, we send an empty non-error response on context deadline expiry. Here we compute a + // deadline that causes us to send that response before the caller's own deadline (see + // chasm.activity.longPollBuffer). We also cap the caller's deadline at + // chasm.activity.longPollTimeout. + ns := req.GetFrontendRequest().GetNamespace() + ctx, cancel := contextutil.WithDeadlineBuffer( + ctx, + h.config.LongPollTimeout(ns), + h.config.LongPollBuffer(ns), + ) + defer cancel() + + response, _, err = chasm.PollComponent(ctx, ref, func( + a *Activity, + ctx chasm.Context, + req *activitypb.DescribeActivityExecutionRequest, + ) (*activitypb.DescribeActivityExecutionResponse, bool, error) { + changed, err := chasm.ExecutionStateChanged(a, ctx, token) + if err != nil { + if errors.Is(err, chasm.ErrMalformedComponentRef) { + return nil, false, serviceerror.NewInvalidArgument("invalid long poll token") + } + if errors.Is(err, chasm.ErrInvalidComponentRef) { + return nil, false, serviceerror.NewInvalidArgument("long poll token does not match execution") + } + return nil, false, err + } + if changed { + response, err := a.buildDescribeActivityExecutionResponse(ctx, req) + return response, true, err + } + return nil, false, nil + }, req) + + if err != nil && ctx.Err() != nil { + // Send empty non-error response on deadline expiry: caller should continue long-polling. + return &activitypb.DescribeActivityExecutionResponse{ + FrontendResponse: &workflowservice.DescribeActivityExecutionResponse{}, + }, nil + } + return response, err +} + +// PollActivityExecution long-polls for activity outcome. It returns an empty non-error response on +// context deadline expiry, to indicate that the state being waited for was not reached. Callers +// should interpret this as an invitation to resubmit their long-poll request. This response is sent +// before the caller's deadline (see chasm.activity.longPollBuffer) so that it is likely that the +// caller does indeed receive the non-error response. +func (h *handler) PollActivityExecution( + ctx context.Context, + req *activitypb.PollActivityExecutionRequest, +) (response *activitypb.PollActivityExecutionResponse, err error) { + ref := chasm.NewComponentRef[*Activity](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetActivityId(), + RunID: req.GetFrontendRequest().GetRunId(), + }) + + // Below, we send an empty non-error response on context deadline expiry. Here we compute a + // deadline that causes us to send that response before the caller's own deadline (see + // chasm.activity.longPollBuffer). We also cap the caller's deadline at + // chasm.activity.longPollTimeout. + ns := req.GetFrontendRequest().GetNamespace() + ctx, cancel := contextutil.WithDeadlineBuffer( + ctx, + h.config.LongPollTimeout(ns), + h.config.LongPollBuffer(ns), + ) + defer cancel() + + response, _, err = chasm.PollComponent(ctx, ref, func( + a *Activity, + ctx chasm.Context, + req *activitypb.PollActivityExecutionRequest, + ) (*activitypb.PollActivityExecutionResponse, bool, error) { + if a.LifecycleState(ctx) != chasm.LifecycleStateRunning { + response := a.buildPollActivityExecutionResponse(ctx) + return response, true, nil + } + return nil, false, nil + }, req) + + if err != nil && ctx.Err() != nil { + // Send an empty non-error response as an invitation to resubmit the long-poll. + return &activitypb.PollActivityExecutionResponse{ + FrontendResponse: &workflowservice.PollActivityExecutionResponse{}, + }, nil + } + return response, err +} + +// DeleteActivityExecution terminates the activity if running, then schedules it for deletion. +func (h *handler) DeleteActivityExecution( + ctx context.Context, + req *activitypb.DeleteActivityExecutionRequest, +) (*activitypb.DeleteActivityExecutionResponse, error) { + frontendReq := req.GetFrontendRequest() + + key := chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: frontendReq.GetActivityId(), + RunID: frontendReq.GetRunId(), + } + + if err := chasm.DeleteExecution[*Activity](ctx, key, chasm.DeleteExecutionRequest{ + TerminateComponentRequest: chasm.TerminateComponentRequest{ + Reason: "Delete activity execution", + }, + }); err != nil { + return nil, err + } + + return &activitypb.DeleteActivityExecutionResponse{}, nil +} + +// TerminateActivityExecution terminates an activity execution. +func (h *handler) TerminateActivityExecution( + ctx context.Context, + req *activitypb.TerminateActivityExecutionRequest, +) (*activitypb.TerminateActivityExecutionResponse, error) { + frontendReq := req.GetFrontendRequest() + + ref := chasm.NewComponentRef[*Activity](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: frontendReq.GetActivityId(), + RunID: frontendReq.GetRunId(), + }) + + _, _, err := chasm.UpdateComponent( + ctx, + ref, + (*Activity).Terminate, + chasm.TerminateComponentRequest{ + Reason: frontendReq.GetReason(), + Identity: frontendReq.GetIdentity(), + RequestID: frontendReq.GetRequestId(), + }, + ) + + if err != nil { + return nil, err + } + + return &activitypb.TerminateActivityExecutionResponse{}, nil +} + +// RequestCancelActivityExecution requests cancellation of an activity execution. +func (h *handler) RequestCancelActivityExecution( + ctx context.Context, + req *activitypb.RequestCancelActivityExecutionRequest, +) (response *activitypb.RequestCancelActivityExecutionResponse, err error) { + frontendReq := req.GetFrontendRequest() + + ref := chasm.NewComponentRef[*Activity](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: frontendReq.GetActivityId(), + RunID: frontendReq.GetRunId(), + }) + + response, _, err = chasm.UpdateComponent( + ctx, + ref, + (*Activity).handleCancellationRequested, + req, + ) + if err != nil { + return nil, err + } + + return response, nil +} diff --git a/chasm/lib/activity/library.go b/chasm/lib/activity/library.go new file mode 100644 index 00000000000..83e3d9067af --- /dev/null +++ b/chasm/lib/activity/library.go @@ -0,0 +1,136 @@ +package activity + +import ( + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/namespace" + "google.golang.org/grpc" +) + +type ctxKeyActivityContextType struct{} + +var ctxKeyActivityContext = ctxKeyActivityContextType{} + +// activityContext holds dependencies injected into the chasm.Context for use by Activity methods. +type activityContext struct { + config *Config + namespaceRegistry namespace.Registry +} + +// activityContextFromChasm extracts the activityContext from a chasm.Context. +// Panics if the context value is missing, which indicates a library registration bug. +func activityContextFromChasm(ctx chasm.Context) *activityContext { + //nolint:revive // unchecked-type-assertion: intentional panic on missing context value + return ctx.Value(ctxKeyActivityContext).(*activityContext) +} + +const ( + libraryName = "activity" + componentName = "activity" +) + +var ( + Archetype = chasm.FullyQualifiedName(libraryName, componentName) + ArchetypeID = chasm.GenerateTypeID(Archetype) +) + +type componentOnlyLibrary struct { + chasm.UnimplementedLibrary + config *Config + namespaceRegistry namespace.Registry +} + +func newComponentOnlyLibrary( + config *Config, + namespaceRegistry namespace.Registry, +) *componentOnlyLibrary { + return &componentOnlyLibrary{ + config: config, + namespaceRegistry: namespaceRegistry, + } +} + +func (l *componentOnlyLibrary) Name() string { + return libraryName +} + +func (l *componentOnlyLibrary) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*Activity]( + componentName, + chasm.WithSearchAttributes( + TypeSearchAttribute, + StatusSearchAttribute, + chasm.SearchAttributeTaskQueue, + ), + chasm.WithBusinessIDAlias("ActivityId"), + chasm.WithContextValues(map[any]any{ + ctxKeyActivityContext: &activityContext{ + config: l.config, + namespaceRegistry: l.namespaceRegistry, + }, + }), + ), + } +} + +type library struct { + componentOnlyLibrary + + handler *handler + activityDispatchTaskHandler *activityDispatchTaskHandler + scheduleToStartTimeoutTaskHandler *scheduleToStartTimeoutTaskHandler + scheduleToCloseTimeoutTaskHandler *scheduleToCloseTimeoutTaskHandler + startToCloseTimeoutTaskHandler *startToCloseTimeoutTaskHandler + heartbeatTimeoutTaskHandler *heartbeatTimeoutTaskHandler +} + +func newLibrary( + handler *handler, + activityDispatchTaskHandler *activityDispatchTaskHandler, + scheduleToStartTimeoutTaskHandler *scheduleToStartTimeoutTaskHandler, + scheduleToCloseTimeoutTaskHandler *scheduleToCloseTimeoutTaskHandler, + startToCloseTimeoutTaskHandler *startToCloseTimeoutTaskHandler, + heartbeatTimeoutTaskHandler *heartbeatTimeoutTaskHandler, + config *Config, + namespaceRegistry namespace.Registry, +) *library { + return &library{ + componentOnlyLibrary: *newComponentOnlyLibrary(config, namespaceRegistry), + handler: handler, + activityDispatchTaskHandler: activityDispatchTaskHandler, + scheduleToStartTimeoutTaskHandler: scheduleToStartTimeoutTaskHandler, + scheduleToCloseTimeoutTaskHandler: scheduleToCloseTimeoutTaskHandler, + startToCloseTimeoutTaskHandler: startToCloseTimeoutTaskHandler, + heartbeatTimeoutTaskHandler: heartbeatTimeoutTaskHandler, + } +} + +func (l *library) RegisterServices(server *grpc.Server) { + server.RegisterService(&activitypb.ActivityService_ServiceDesc, l.handler) +} + +func (l *library) Tasks() []*chasm.RegistrableTask { + return []*chasm.RegistrableTask{ + chasm.NewRegistrableSideEffectTask( + "dispatch", + l.activityDispatchTaskHandler, + ), + chasm.NewRegistrablePureTask( + "scheduleToStartTimer", + l.scheduleToStartTimeoutTaskHandler, + ), + chasm.NewRegistrablePureTask( + "scheduleToCloseTimer", + l.scheduleToCloseTimeoutTaskHandler, + ), + chasm.NewRegistrablePureTask( + "startToCloseTimer", + l.startToCloseTimeoutTaskHandler, + ), + chasm.NewRegistrablePureTask( + "heartbeatTimer", + l.heartbeatTimeoutTaskHandler, + ), + } +} diff --git a/chasm/lib/activity/proto/v1/activity_state.proto b/chasm/lib/activity/proto/v1/activity_state.proto new file mode 100644 index 00000000000..931afb0b881 --- /dev/null +++ b/chasm/lib/activity/proto/v1/activity_state.proto @@ -0,0 +1,193 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.activity.proto.v1; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/deployment/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; +import "temporal/api/sdk/v1/user_metadata.proto"; +import "temporal/api/taskqueue/v1/message.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypb"; + +enum ActivityExecutionStatus { + ACTIVITY_EXECUTION_STATUS_UNSPECIFIED = 0; + // The activity has been scheduled, but a worker has not accepted the task for the current + // attempt. The activity may be backing off between attempts or waiting for a worker to pick it + // up. + ACTIVITY_EXECUTION_STATUS_SCHEDULED = 1; + // A worker has accepted a task for the current attempt. + ACTIVITY_EXECUTION_STATUS_STARTED = 2; + // A caller has requested cancellation of the activity. + ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED = 3; + // The activity completed successfully. + ACTIVITY_EXECUTION_STATUS_COMPLETED = 4; + // The activity completed with failure. + ACTIVITY_EXECUTION_STATUS_FAILED = 5; + // The activity completed as canceled. + // Requesting to cancel an activity does not automatically transition the activity to canceled status. If the worker + // responds to cancel the activity after requesting cancellation, the status will transition to cancelled. If the + // activity completes, fails, times out or terminates after cancel is requested and before the worker responds with + // cancelled. The activity will be stay in the terminal non-cancelled status. + ACTIVITY_EXECUTION_STATUS_CANCELED = 6; + // The activity was terminated. Termination does not reach the worker and the activity code cannot react to it. + // A terminated activity may have a running attempt and will be requested to be canceled by the server when it + // heartbeats. + ACTIVITY_EXECUTION_STATUS_TERMINATED = 7; + // The activity has timed out by reaching the specified schedule-to-start or schedule-to-close timeouts. + // Additionally, after all retries are exhausted for start-to-close or heartbeat timeouts, the activity will also + // transition to timed out status. + ACTIVITY_EXECUTION_STATUS_TIMED_OUT = 8; +} + +message ActivityState { + // The type of the activity, a string that maps to a registered activity on a worker. + temporal.api.common.v1.ActivityType activity_type = 1; + + temporal.api.taskqueue.v1.TaskQueue task_queue = 2; + + // Indicates how long the caller is willing to wait for an activity completion. Limits how long + // retries will be attempted. Either this or `start_to_close_timeout` must be specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_close_timeout = 3; + // Limits time an activity task can stay in a task queue before a worker picks it up. This + // timeout is always non retryable, as all a retry would achieve is to put it back into the same + // queue. Defaults to `schedule_to_close_timeout` or workflow execution timeout if not + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration schedule_to_start_timeout = 4; + // Maximum time an activity is allowed to execute after being picked up by a worker. This + // timeout is always retryable. Either this or `schedule_to_close_timeout` must be + // specified. + // + // (-- api-linter: core::0140::prepositions=disabled + // aip.dev/not-precedent: "to" is used to indicate interval. --) + google.protobuf.Duration start_to_close_timeout = 5; + // Maximum permitted time between successful worker heartbeats. + google.protobuf.Duration heartbeat_timeout = 6; + // The retry policy for the activity. Will never exceed `schedule_to_close_timeout`. + temporal.api.common.v1.RetryPolicy retry_policy = 7; + + // All of the possible activity statuses (covers both the public ActivityExecutionStatus and PendingActivityState). + // TODO: consider moving this into ActivityAttemptState and renaming that message. This could save mutating two + // components on each attempt transition. + ActivityExecutionStatus status = 8; + + // Time the activity was originally scheduled via a StartActivityExecution request. + google.protobuf.Timestamp schedule_time = 9; + + // Priority metadata. + temporal.api.common.v1.Priority priority = 10; + + // Set if activity cancellation was requested. + ActivityCancelState cancel_state = 11; + + // Set if the activity was terminated + ActivityTerminateState terminate_state = 12; + + // Amount of time to wait before dispatching the activity task to the task queue for the first time. If the activity + // has a retry policy, retry attempts will not have start delay applied. + google.protobuf.Duration start_delay = 13; +} + +message ActivityCancelState { + string request_id = 1; + google.protobuf.Timestamp request_time = 2; + string identity = 3; + string reason = 4; +} + +message ActivityTerminateState { + string request_id = 1; +} + +message ActivityAttemptState { + // The attempt this activity is currently on. + // Incremented each time a new attempt is scheduled. A newly created activity will immediately be scheduled, and + // the count is set to 1. + int32 count = 1; + + // Time from the last attempt failure to the next activity retry. + // If the activity is currently running, this represents the next retry interval in case the attempt fails. + // If activity is currently backing off between attempt, this represents the current retry interval. + // If there is no next retry allowed, this field will be null. + // This interval is typically calculated from the specified retry policy, but may be modified if an activity fails + // with a retryable application failure specifying a retry delay. + google.protobuf.Duration current_retry_interval = 2; + + // Time the last attempt was started. + google.protobuf.Timestamp started_time = 3; + + // The time when the last activity attempt completed. If activity has not been completed yet, it will be null. + google.protobuf.Timestamp complete_time = 4; + + message LastFailureDetails { + // The last time the activity attempt failed. + google.protobuf.Timestamp time = 1; + + // Failure details from the last failed attempt. + temporal.api.failure.v1.Failure failure = 2; + } + + // Details about the last failure. This will only be updated when an activity attempt fails, + // including start-to-close timeout. Activity success, termination, schedule-to-start and schedule-to-close timeouts + // will not reset it. + LastFailureDetails last_failure_details = 5; + + // An incremental version number used to validate tasks. + // Initially this only verifies that a task belong to the current attempt. + // Later on this stamp will be used to also invalidate tasks when the activity is paused, reset, or has its options + // updated. + int32 stamp = 6; + + string last_worker_identity = 7; + + // The Worker Deployment Version this activity was dispatched to most recently. + // If nil, the activity has not yet been dispatched or was last dispatched to an unversioned worker. + temporal.api.deployment.v1.WorkerDeploymentVersion last_deployment_version = 8; + + // The request ID that came from matching's RecordActivityTaskStarted API call. Used to make this API idempotent in + // case of implicit retries. + string start_request_id = 9; +} + +message ActivityHeartbeatState { + // Details provided in the last recorded activity heartbeat. + temporal.api.common.v1.Payloads details = 1; + // Time the last heartbeat was recorded. + google.protobuf.Timestamp recorded_time = 2; + // Total number of heartbeats recorded across all attempts of this activity, including retries. + int64 total_heartbeat_count = 3; +} + +message ActivityRequestData { + // Serialized activity input, passed as arguments to the activity function. + temporal.api.common.v1.Payloads input = 1; + temporal.api.common.v1.Header header = 2; + + // Metadata for use by user interfaces to display the fixed as-of-start summary and details of the activity. + temporal.api.sdk.v1.UserMetadata user_metadata = 3; +} + +message ActivityOutcome { + message Successful { + temporal.api.common.v1.Payloads output = 1; + } + + message Failed { + // Only filled on schedule-to-start timeouts, schedule-to-close timeouts or terminations. All other attempt + // failures will be recorded in ActivityAttemptState.last_failure_details. + temporal.api.failure.v1.Failure failure = 1; + } + + oneof variant { + Successful successful = 1; + Failed failed = 2; + } +} diff --git a/chasm/lib/activity/proto/v1/request_response.proto b/chasm/lib/activity/proto/v1/request_response.proto new file mode 100644 index 00000000000..918c6f4de31 --- /dev/null +++ b/chasm/lib/activity/proto/v1/request_response.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.activity.proto.v1; + +import "temporal/api/workflowservice/v1/request_response.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypb"; + +message StartActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.StartActivityExecutionRequest frontend_request = 2; +} + +message StartActivityExecutionResponse { + temporal.api.workflowservice.v1.StartActivityExecutionResponse frontend_response = 1; +} + +message DescribeActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.DescribeActivityExecutionRequest frontend_request = 2; +} + +message DescribeActivityExecutionResponse { + temporal.api.workflowservice.v1.DescribeActivityExecutionResponse frontend_response = 1; +} + +message PollActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.PollActivityExecutionRequest frontend_request = 2; +} + +message PollActivityExecutionResponse { + temporal.api.workflowservice.v1.PollActivityExecutionResponse frontend_response = 1; +} + +message TerminateActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.TerminateActivityExecutionRequest frontend_request = 2; +} + +message TerminateActivityExecutionResponse {} + +message RequestCancelActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.RequestCancelActivityExecutionRequest frontend_request = 2; +} + +message RequestCancelActivityExecutionResponse {} + +message DeleteActivityExecutionRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.DeleteActivityExecutionRequest frontend_request = 2; +} + +message DeleteActivityExecutionResponse {} diff --git a/chasm/lib/activity/proto/v1/service.proto b/chasm/lib/activity/proto/v1/service.proto new file mode 100644 index 00000000000..69810bee55c --- /dev/null +++ b/chasm/lib/activity/proto/v1/service.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.activity.proto.v1; + +import "chasm/lib/activity/proto/v1/request_response.proto"; +import "temporal/server/api/common/v1/api_category.proto"; +import "temporal/server/api/routing/v1/extension.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypb"; + +service ActivityService { + rpc StartActivityExecution(StartActivityExecutionRequest) returns (StartActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc DescribeActivityExecution(DescribeActivityExecutionRequest) returns (DescribeActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc PollActivityExecution(PollActivityExecutionRequest) returns (PollActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_LONG_POLL; + } + + rpc TerminateActivityExecution(TerminateActivityExecutionRequest) returns (TerminateActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc RequestCancelActivityExecution(RequestCancelActivityExecutionRequest) returns (RequestCancelActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc DeleteActivityExecution(DeleteActivityExecutionRequest) returns (DeleteActivityExecutionResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.activity_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } +} diff --git a/chasm/lib/activity/proto/v1/tasks.proto b/chasm/lib/activity/proto/v1/tasks.proto new file mode 100644 index 00000000000..9a1996e3dd2 --- /dev/null +++ b/chasm/lib/activity/proto/v1/tasks.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.activity.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/activity/gen/activitypb;activitypb"; + +message ActivityDispatchTask { + // The current stamp for this activity execution. Used for task validation. See also [ActivityAttemptState]. + int32 stamp = 1; +} + +message ScheduleToStartTimeoutTask { + // The current stamp for this activity execution. Used for task validation. See also [ActivityAttemptState]. + int32 stamp = 1; +} + +message ScheduleToCloseTimeoutTask {} + +message StartToCloseTimeoutTask { + // The current stamp for this activity execution. Used for task validation. See also [ActivityAttemptState]. + int32 stamp = 1; +} + +// HeartbeatTimeoutTask is a pure task that enforces heartbeat timeouts. +message HeartbeatTimeoutTask { + // The current stamp for this activity execution. Used for task validation. See also [ActivityAttemptState]. + int32 stamp = 1; +} diff --git a/chasm/lib/activity/statemachine.go b/chasm/lib/activity/statemachine.go new file mode 100644 index 00000000000..b594e56a6d1 --- /dev/null +++ b/chasm/lib/activity/statemachine.go @@ -0,0 +1,387 @@ +package activity + +import ( + "fmt" + "time" + + commonpb "go.temporal.io/api/common/v1" + deploymentpb "go.temporal.io/api/deployment/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/metrics" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Ensure that Activity implements chasm.StateMachine interface +var _ chasm.StateMachine[activitypb.ActivityExecutionStatus] = (*Activity)(nil) + +// StateMachineState returns the current status of the activity. +func (a *Activity) StateMachineState() activitypb.ActivityExecutionStatus { + if a.ActivityState == nil { + return activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED + } + return a.Status +} + +// SetStateMachineState sets the status of the activity. +func (a *Activity) SetStateMachineState(state activitypb.ActivityExecutionStatus) { + a.Status = state +} + +// TransitionScheduled transitions to Scheduled status. This is only called on the initial +// scheduling of the activity. +var TransitionScheduled = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + func(a *Activity, ctx chasm.MutableContext, _ any) error { + attempt := a.LastAttempt.Get(ctx) + currentTime := ctx.Now(a) + attempt.Count++ + attempt.Stamp++ + + // Start delay defers the dispatch and extends ScheduleToClose and ScheduleToStart timeouts. StartToClose and + // Heartbeat timeouts are unaffected as they only start when a worker picks up the task. + startDelay := a.GetStartDelay().AsDuration() + startDelayEnd := currentTime.Add(startDelay) + + if timeout := a.GetScheduleToStartTimeout().AsDuration(); timeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: startDelayEnd.Add(timeout), + }, + &activitypb.ScheduleToStartTimeoutTask{ + Stamp: attempt.GetStamp(), + }) + } + + if timeout := a.GetScheduleToCloseTimeout().AsDuration(); timeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: startDelayEnd.Add(timeout), + }, + &activitypb.ScheduleToCloseTimeoutTask{}) + } + + dispatchAttrs := chasm.TaskAttributes{} + if startDelay > 0 { + dispatchAttrs.ScheduledTime = startDelayEnd + } + ctx.AddTask( + a, + dispatchAttrs, + &activitypb.ActivityDispatchTask{ + Stamp: attempt.GetStamp(), + }) + + return nil + }, +) + +type rescheduleEvent struct { + retryInterval time.Duration + failure *failurepb.Failure + timeoutType enumspb.TimeoutType +} + +// TransitionRescheduled transitions to Scheduled from Started, which happens on retries. The event +// to pass in is the failure to be recorded from the previously failed attempt. +var TransitionRescheduled = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, // For retries the activity will be in started status + }, + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + func(a *Activity, ctx chasm.MutableContext, event rescheduleEvent) error { + attempt := a.LastAttempt.Get(ctx) + currentTime := ctx.Now(a) + attempt.Count++ + attempt.Stamp++ + + err := a.recordFailedAttempt(ctx, event.retryInterval, event.failure, currentTime, false) + if err != nil { + return err + } + + retryScheduledTime := attemptScheduleTimeForRetry(attempt).AsTime() + + if timeout := a.GetScheduleToStartTimeout().AsDuration(); timeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: retryScheduledTime.Add(timeout), + }, + &activitypb.ScheduleToStartTimeoutTask{ + Stamp: attempt.GetStamp(), + }) + } + + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: retryScheduledTime, + }, + &activitypb.ActivityDispatchTask{ + Stamp: attempt.GetStamp(), + }) + + return nil + }, +) + +// TransitionStarted transitions to Started status. +var TransitionStarted = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + func(a *Activity, ctx chasm.MutableContext, request *historyservice.RecordActivityTaskStartedRequest) error { + attempt := a.LastAttempt.Get(ctx) + attempt.StartedTime = timestamppb.New(ctx.Now(a)) + attempt.StartRequestId = request.GetRequestId() + attempt.LastWorkerIdentity = request.GetPollRequest().GetIdentity() + if versionDirective := request.GetVersionDirective().GetDeploymentVersion(); versionDirective != nil { + attempt.LastDeploymentVersion = &deploymentpb.WorkerDeploymentVersion{ + BuildId: versionDirective.GetBuildId(), + DeploymentName: versionDirective.GetDeploymentName(), + } + } + startTime := attempt.GetStartedTime().AsTime() + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: startTime.Add(a.GetStartToCloseTimeout().AsDuration()), + }, + &activitypb.StartToCloseTimeoutTask{ + Stamp: a.LastAttempt.Get(ctx).GetStamp(), + }) + + if heartbeatTimeout := a.GetHeartbeatTimeout().AsDuration(); heartbeatTimeout > 0 { + ctx.AddTask( + a, + chasm.TaskAttributes{ + ScheduledTime: startTime.Add(heartbeatTimeout), + }, + &activitypb.HeartbeatTimeoutTask{ + Stamp: attempt.GetStamp(), + }) + } + + return nil + }, +) + +type completeEvent struct { + req *historyservice.RespondActivityTaskCompletedRequest + metricsHandler metrics.Handler +} + +// TransitionCompleted transitions to Completed status. +var TransitionCompleted = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED, + func(a *Activity, ctx chasm.MutableContext, event completeEvent) error { + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + req := event.req.GetCompleteRequest() + + attempt := a.LastAttempt.Get(ctx) + attempt.CompleteTime = timestamppb.New(ctx.Now(a)) + attempt.LastWorkerIdentity = req.GetIdentity() + outcome := a.Outcome.Get(ctx) + outcome.Variant = &activitypb.ActivityOutcome_Successful_{ + Successful: &activitypb.ActivityOutcome_Successful{ + Output: req.GetResult(), + }, + } + + a.emitOnCompletedMetrics(ctx, event.metricsHandler) + + return nil + }) + }, +) + +type failedEvent struct { + req *historyservice.RespondActivityTaskFailedRequest + metricsHandler metrics.Handler +} + +// TransitionFailed transitions to Failed status. +var TransitionFailed = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, + func(a *Activity, ctx chasm.MutableContext, event failedEvent) error { + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + req := event.req.GetFailedRequest() + + if details := req.GetLastHeartbeatDetails(); details != nil { + heartbeat := a.getOrCreateLastHeartbeat(ctx) + heartbeat.Details = details + heartbeat.RecordedTime = timestamppb.New(ctx.Now(a)) + } + attempt := a.LastAttempt.Get(ctx) + attempt.LastWorkerIdentity = req.GetIdentity() + + if err := a.recordFailedAttempt(ctx, 0, req.GetFailure(), ctx.Now(a), true); err != nil { + return err + } + + a.emitOnFailedMetrics(ctx, event.metricsHandler) + + return nil + }) + }, +) + +type terminateEvent struct { + request chasm.TerminateComponentRequest + metricsHandler metrics.Handler + fromStatus activitypb.ActivityExecutionStatus +} + +// TransitionTerminated transitions to Terminated status. +var TransitionTerminated = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, + func(a *Activity, ctx chasm.MutableContext, event terminateEvent) error { + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + a.TerminateState = &activitypb.ActivityTerminateState{ + RequestId: event.request.RequestID, + } + outcome := a.Outcome.Get(ctx) + failure := &failurepb.Failure{ + Message: event.request.Reason, + FailureInfo: &failurepb.Failure_TerminatedFailureInfo{ + TerminatedFailureInfo: &failurepb.TerminatedFailureInfo{ + Identity: event.request.Identity, + }, + }, + } + outcome.Variant = &activitypb.ActivityOutcome_Failed_{ + Failed: &activitypb.ActivityOutcome_Failed{ + Failure: failure, + }, + } + + a.emitOnTerminatedMetrics(event.metricsHandler) + + return nil + }) + }, +) + +// TransitionCancelRequested transitions to CancelRequested status. +var TransitionCancelRequested = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + func(a *Activity, ctx chasm.MutableContext, req *workflowservice.RequestCancelActivityExecutionRequest) error { + a.CancelState = &activitypb.ActivityCancelState{ + Identity: req.GetIdentity(), + RequestId: req.GetRequestId(), + Reason: req.GetReason(), + RequestTime: timestamppb.New(ctx.Now(a)), + } + + return nil + }, +) + +type cancelEvent struct { + details *commonpb.Payloads + handler metrics.Handler + fromStatus activitypb.ActivityExecutionStatus +} + +// TransitionCanceled transitions to Canceled status. +var TransitionCanceled = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED, + func(a *Activity, ctx chasm.MutableContext, event cancelEvent) error { + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + outcome := a.Outcome.Get(ctx) + failure := &failurepb.Failure{ + Message: "Activity canceled", + FailureInfo: &failurepb.Failure_CanceledFailureInfo{ + CanceledFailureInfo: &failurepb.CanceledFailureInfo{ + Details: event.details, + Identity: a.GetCancelState().GetIdentity(), + }, + }, + } + outcome.Variant = &activitypb.ActivityOutcome_Failed_{ + Failed: &activitypb.ActivityOutcome_Failed{ + Failure: failure, + }, + } + + a.emitOnCanceledMetrics(ctx, event.handler, event.fromStatus) + + return nil + }) + }, +) + +type timeoutEvent struct { + metricsHandler metrics.Handler + timeoutType enumspb.TimeoutType + fromStatus activitypb.ActivityExecutionStatus +} + +// TransitionTimedOut transitions to TimedOut status. +var TransitionTimedOut = chasm.NewTransition( + []activitypb.ActivityExecutionStatus{ + activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + }, + activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, + func(a *Activity, ctx chasm.MutableContext, event timeoutEvent) error { + timeoutType := event.timeoutType + + return a.StoreOrSelf(ctx).RecordCompleted(ctx, func(ctx chasm.MutableContext) error { + var err error + switch timeoutType { + case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE: + err = a.recordScheduleToStartOrCloseTimeoutFailure(ctx, timeoutType) + case enumspb.TIMEOUT_TYPE_START_TO_CLOSE: + failure := createStartToCloseTimeoutFailure() + err = a.recordFailedAttempt(ctx, 0, failure, ctx.Now(a), true) + case enumspb.TIMEOUT_TYPE_HEARTBEAT: + failure := createHeartbeatTimeoutFailure() + err = a.recordFailedAttempt(ctx, 0, failure, ctx.Now(a), true) + default: + err = fmt.Errorf("unhandled activity timeout: %v", timeoutType) + } + if err != nil { + return err + } + + a.emitOnTimedOutMetrics(ctx, event.metricsHandler, timeoutType, event.fromStatus) + + return nil + }) + }, +) diff --git a/chasm/lib/activity/statemachine_test.go b/chasm/lib/activity/statemachine_test.go new file mode 100644 index 00000000000..8e412b09bd4 --- /dev/null +++ b/chasm/lib/activity/statemachine_test.go @@ -0,0 +1,717 @@ +package activity + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/activity/gen/activitypb/v1" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/testing/protorequire" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + defaultTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + defaultRetryPolicy = &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + BackoffCoefficient: 2.0, + MaximumAttempts: 5, + MaximumInterval: durationpb.New(100 * time.Second), + } + defaultScheduleToCloseTimeout = 10 * time.Minute + defaultScheduleToStartTimeout = 2 * time.Minute + defaultStartToCloseTimeout = 3 * time.Minute +) + +func TestTransitionScheduled(t *testing.T) { + testCases := []struct { + name string + startingAttemptCount int32 + expectedTasks []chasm.MockTask + scheduleToStartTimeout time.Duration + scheduleToCloseTimeout time.Duration + }{ + { + name: "all timeouts set", + startingAttemptCount: 0, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ScheduleToCloseTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + scheduleToCloseTimeout: defaultScheduleToCloseTimeout, + }, + { + name: "schedule to start timeout not set", + startingAttemptCount: 0, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToCloseTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + scheduleToStartTimeout: 0, + scheduleToCloseTimeout: defaultScheduleToCloseTimeout, + }, + { + name: "schedule to close timeout not set", + startingAttemptCount: 0, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + scheduleToCloseTimeout: 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + attemptState := &activitypb.ActivityAttemptState{Count: tc.startingAttemptCount} + outcome := &activitypb.ActivityOutcome{} + input := payloads.EncodeString("test-input") + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(tc.scheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(tc.scheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_UNSPECIFIED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + RequestData: chasm.NewDataField(ctx, &activitypb.ActivityRequestData{ + Input: input, + }), + } + + err := TransitionScheduled.Apply(activity, ctx, nil) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + + // Verify added tasks + require.Len(t, ctx.Tasks, len(tc.expectedTasks)) + for i, expectedTask := range tc.expectedTasks { + actualTask := ctx.Tasks[i] + + require.IsType(t, expectedTask.Payload, actualTask.Payload, "expected %T at index %d, got %T", + expectedTask.Payload, i, actualTask.Payload) + + switch expectedTask.Payload.(type) { + case *activitypb.ActivityDispatchTask: + require.Empty(t, actualTask.Attributes.ScheduledTime) + case *activitypb.ScheduleToStartTimeoutTask: + require.Equal(t, defaultTime.Add(tc.scheduleToStartTimeout), actualTask.Attributes.ScheduledTime) + case *activitypb.ScheduleToCloseTimeoutTask: + require.Equal(t, defaultTime.Add(tc.scheduleToCloseTimeout), actualTask.Attributes.ScheduledTime) + default: + t.Fatalf("unexpected task payload type at index %d: %T", i, actualTask.Payload) + } + + } + }) + } +} + +func TestTransitionRescheduled(t *testing.T) { + testCases := []struct { + name string + startingAttemptCount int32 + expectedTasks []chasm.MockTask + expectedRetryInterval time.Duration + retryPolicy *commonpb.RetryPolicy + scheduleToStartTimeout time.Duration + operationTag string + counterMetric string + timeoutType enumspb.TimeoutType + }{ + { + name: "second attempt - timeout recorded", + startingAttemptCount: 1, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 2 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + operationTag: metrics.TimerActiveTaskActivityTimeoutScope, + counterMetric: metrics.ActivityTaskTimeout.Name(), + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + { + name: "third attempt - timeout recorded", + startingAttemptCount: 2, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 4 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + operationTag: metrics.TimerActiveTaskActivityTimeoutScope, + counterMetric: metrics.ActivityTaskTimeout.Name(), + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + { + name: "no schedule to start timeout", + startingAttemptCount: 1, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 2 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: 0, + operationTag: metrics.TimerActiveTaskActivityTimeoutScope, + counterMetric: metrics.ActivityTaskTimeout.Name(), + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + { + name: "heartbeat timeout - timeout recorded", + startingAttemptCount: 1, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 2 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + operationTag: metrics.TimerActiveTaskActivityTimeoutScope, + counterMetric: metrics.ActivityTaskTimeout.Name(), + timeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + }, + + { + name: "reschedule from failure", + startingAttemptCount: 1, + expectedTasks: []chasm.MockTask{ + {Payload: &activitypb.ScheduleToStartTimeoutTask{}}, + {Payload: &activitypb.ActivityDispatchTask{}}, + }, + expectedRetryInterval: 2 * time.Second, + retryPolicy: defaultRetryPolicy, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + operationTag: metrics.HistoryRespondActivityTaskFailedScope, + counterMetric: metrics.ActivityTaskFail.Name(), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: tc.startingAttemptCount} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(tc.scheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + event := rescheduleEvent{ + retryInterval: tc.expectedRetryInterval, + failure: createStartToCloseTimeoutFailure(), + timeoutType: tc.timeoutType, + } + + err := TransitionRescheduled.Apply(activity, ctx, event) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, activity.Status) + require.Equal(t, tc.startingAttemptCount+1, attemptState.Count) + protorequire.ProtoEqual(t, durationpb.New(tc.expectedRetryInterval), attemptState.GetCurrentRetryInterval()) + + // Verify attempt state failure details updated correctly + lastFailureDetails := attemptState.GetLastFailureDetails() + require.NotNil(t, lastFailureDetails.GetFailure()) + require.Equal(t, lastFailureDetails.GetTime(), attemptState.GetCompleteTime()) + // This should remain nil on intermediate retry attempts. The final attempt goes directly via TransitionTimedOut. + require.Nil(t, outcome.GetVariant()) + + // Verify added tasks + require.Len(t, ctx.Tasks, len(tc.expectedTasks)) + for i, expectedTask := range tc.expectedTasks { + actualTask := ctx.Tasks[i] + + switch expectedTask.Payload.(type) { + case *activitypb.ActivityDispatchTask: + _, ok := actualTask.Payload.(*activitypb.ActivityDispatchTask) + require.True(t, ok, "expected ActivityDispatchTask at index %d", i) + require.Equal(t, defaultTime.Add(tc.expectedRetryInterval), actualTask.Attributes.ScheduledTime) + case *activitypb.ScheduleToStartTimeoutTask: + _, ok := actualTask.Payload.(*activitypb.ScheduleToStartTimeoutTask) + require.True(t, ok, "expected ScheduleToStartTimeoutTask at index %d", i) + require.Equal(t, defaultTime.Add(tc.scheduleToStartTimeout).Add(tc.expectedRetryInterval), actualTask.Attributes.ScheduledTime) + default: + t.Fatalf("unexpected task payload type at index %d: %T", i, actualTask.Payload) + } + + } + }) + } +} + +func TestTransitionStarted(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{ + Count: 1, + StartedTime: timestamppb.New(defaultTime), + } + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + err := TransitionStarted.Apply(activity, ctx, &historyservice.RecordActivityTaskStartedRequest{ + PollRequest: &workflowservice.PollActivityTaskQueueRequest{ + Identity: "test-worker", + }, + }) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + require.Equal(t, defaultTime, attemptState.StartedTime.AsTime()) + require.Equal(t, "test-worker", attemptState.LastWorkerIdentity) + + // Verify added tasks + require.Len(t, ctx.Tasks, 1) + _, ok := ctx.Tasks[0].Payload.(*activitypb.StartToCloseTimeoutTask) + require.True(t, ok, "expected ScheduleToStartTimeoutTask") + require.Equal(t, defaultTime.Add(defaultStartToCloseTimeout), ctx.Tasks[0].Attributes.ScheduledTime) +} + +func TestTransitionTimedout(t *testing.T) { + testCases := []struct { + name string + startStatus activitypb.ActivityExecutionStatus + timeoutType enumspb.TimeoutType + attemptCount int32 + }{ + { + name: "schedule to start timeout", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + attemptCount: 2, + }, + { + name: "schedule to close timeout from scheduled status", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_SCHEDULED, + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + attemptCount: 3, + }, + { + name: "schedule to close timeout from started status", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + timeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + attemptCount: 4, + }, + { + name: "start to close timeout", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + timeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + attemptCount: 5, + }, + { + name: "heartbeat timeout", + startStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + timeoutType: enumspb.TIMEOUT_TYPE_HEARTBEAT, + attemptCount: 2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + attemptState := &activitypb.ActivityAttemptState{Count: tc.attemptCount} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: tc.startStatus, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + timerStartToCloseLatency := metrics.NewMockTimerIface(controller) + timerStartToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityStartToCloseLatency.Name()).Return(timerStartToCloseLatency) + + timerScheduleToCloseLatency := metrics.NewMockTimerIface(controller) + timerScheduleToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityScheduleToCloseLatency.Name()).Return(timerScheduleToCloseLatency) + + timeoutTag := metrics.StringTag("timeout_type", tc.timeoutType.String()) + + counterTimeout := metrics.NewMockCounterIface(controller) + counterTimeout.EXPECT().Record(int64(1), timeoutTag).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityTimeout.Name()).Return(counterTimeout) + + counterTaskTimeout := metrics.NewMockCounterIface(controller) + counterTaskTimeout.EXPECT().Record(int64(1), timeoutTag).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityTaskTimeout.Name()).Return(counterTaskTimeout) + + event := timeoutEvent{ + timeoutType: tc.timeoutType, + metricsHandler: metricsHandler, + } + + err := TransitionTimedOut.Apply(activity, ctx, event) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_TIMED_OUT, activity.Status) + require.Equal(t, tc.attemptCount, attemptState.Count) + + switch tc.timeoutType { + case enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE: + // Timeout failure is recorded in outcome but not attempt state + require.Nil(t, attemptState.GetLastFailureDetails()) + require.Nil(t, attemptState.GetCompleteTime()) + require.NotNil(t, outcome.GetFailed().GetFailure()) + // do something + case enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + enumspb.TIMEOUT_TYPE_HEARTBEAT: + // Timeout failure is recorded in attempt state only. TransitionTimedOut should only be called when there + // are no more retries. Retries go through TransitionRescheduled. + require.NotNil(t, attemptState.GetLastFailureDetails().GetFailure()) + require.NotNil(t, attemptState.GetLastFailureDetails().GetTime()) + require.NotNil(t, attemptState.GetCompleteTime()) + require.Nil(t, attemptState.GetCurrentRetryInterval()) + require.Nil(t, outcome.GetVariant()) + + default: + t.Fatalf("unexpected timeout type: %v", tc.timeoutType) + } + + require.Empty(t, ctx.Tasks) + }) + } +} + +func TestTransitionCompleted(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: 1} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + payload := payloads.EncodeString("Done") + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + timerStartToCloseLatency := metrics.NewMockTimerIface(controller) + timerStartToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityStartToCloseLatency.Name()).Return(timerStartToCloseLatency) + + timerScheduleToCloseLatency := metrics.NewMockTimerIface(controller) + timerScheduleToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityScheduleToCloseLatency.Name()).Return(timerScheduleToCloseLatency) + + counterSuccess := metrics.NewMockCounterIface(controller) + counterSuccess.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivitySuccess.Name()).Return(counterSuccess) + + req := &historyservice.RespondActivityTaskCompletedRequest{ + CompleteRequest: &workflowservice.RespondActivityTaskCompletedRequest{ + Result: payload, + Identity: "worker", + }, + } + + err := TransitionCompleted.Apply(activity, ctx, completeEvent{ + req: req, + metricsHandler: metricsHandler, + }) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_COMPLETED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + require.Equal(t, "worker", attemptState.GetLastWorkerIdentity()) + require.NotNil(t, attemptState.GetCompleteTime()) + protorequire.ProtoEqual(t, payload, outcome.GetSuccessful().GetOutput()) +} + +func TestTransitionFailed(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: 1} + heartbeatState := &activitypb.ActivityHeartbeatState{} + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + LastHeartbeat: chasm.NewDataField(ctx, heartbeatState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + heartbeatDetails := payloads.EncodeString("Heartbeat") + failure := &failurepb.Failure{ + Message: "Failed Activity", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + Type: "Test", + NonRetryable: true, + }}, + } + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + timerStartToCloseLatency := metrics.NewMockTimerIface(controller) + timerStartToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityStartToCloseLatency.Name()).Return(timerStartToCloseLatency) + + timerScheduleToCloseLatency := metrics.NewMockTimerIface(controller) + timerScheduleToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityScheduleToCloseLatency.Name()).Return(timerScheduleToCloseLatency) + + counterFail := metrics.NewMockCounterIface(controller) + counterFail.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityFail.Name()).Return(counterFail) + + counterTaskFail := metrics.NewMockCounterIface(controller) + counterTaskFail.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityTaskFail.Name()).Return(counterTaskFail) + + req := &historyservice.RespondActivityTaskFailedRequest{ + FailedRequest: &workflowservice.RespondActivityTaskFailedRequest{ + Failure: failure, + LastHeartbeatDetails: heartbeatDetails, + Identity: "worker", + }, + } + + err := TransitionFailed.Apply(activity, ctx, failedEvent{ + req: req, + metricsHandler: metricsHandler, + }) + + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_FAILED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + require.Equal(t, "worker", attemptState.GetLastWorkerIdentity()) + require.NotNil(t, attemptState.GetCompleteTime()) + protorequire.ProtoEqual(t, heartbeatDetails, heartbeatState.GetDetails()) + require.NotNil(t, heartbeatState.GetRecordedTime()) + protorequire.ProtoEqual(t, failure, attemptState.GetLastFailureDetails().GetFailure()) + require.NotNil(t, attemptState.GetLastFailureDetails().GetTime()) + require.Nil(t, outcome.GetFailed()) +} + +func TestTransitionTerminated(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{ + Count: 1, + LastWorkerIdentity: "worker", + } + outcome := &activitypb.ActivityOutcome{} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + counterTerminate := metrics.NewMockCounterIface(controller) + counterTerminate.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityTerminate.Name()).Return(counterTerminate) + + identity := "terminator" + req := chasm.TerminateComponentRequest{ + Reason: "Test Termination", + Identity: identity, + RequestID: "test-request-id", + } + + err := TransitionTerminated.Apply(activity, ctx, terminateEvent{ + request: req, + metricsHandler: metricsHandler, + fromStatus: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + }) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_TERMINATED, activity.Status) + require.EqualValues(t, 1, attemptState.Count) + require.Equal(t, "worker", attemptState.GetLastWorkerIdentity()) + require.Equal(t, "test-request-id", activity.GetTerminateState().RequestId) + + expectedFailure := &failurepb.Failure{ + Message: "Test Termination", + FailureInfo: &failurepb.Failure_TerminatedFailureInfo{ + TerminatedFailureInfo: &failurepb.TerminatedFailureInfo{ + Identity: identity, + }, + }, + } + protorequire.ProtoEqual(t, expectedFailure, outcome.GetFailed().GetFailure()) +} + +func TestTransitionCancelRequested(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: 1} + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_STARTED, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + } + + err := TransitionCancelRequested.Apply(activity, ctx, &workflowservice.RequestCancelActivityExecutionRequest{ + RequestId: "cancel-request", + Reason: "Test Cancel Requested", + Identity: "worker", + }) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, activity.Status) + + cancelState := activity.CancelState + + require.Equal(t, "cancel-request", cancelState.GetRequestId()) + require.Equal(t, "worker", cancelState.GetIdentity()) + require.Equal(t, "Test Cancel Requested", cancelState.GetReason()) + require.NotNil(t, cancelState.GetRequestTime()) +} + +func TestTransitionCanceled(t *testing.T) { + ctx := &chasm.MockMutableContext{} + ctx.HandleNow = func(chasm.Component) time.Time { return defaultTime } + attemptState := &activitypb.ActivityAttemptState{Count: 1} + outcome := &activitypb.ActivityOutcome{} + identity := "canceler" + + activity := &Activity{ + ActivityState: &activitypb.ActivityState{ + ActivityType: &commonpb.ActivityType{Name: "test-activity-type"}, + RetryPolicy: defaultRetryPolicy, + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(defaultScheduleToStartTimeout), + StartToCloseTimeout: durationpb.New(defaultStartToCloseTimeout), + Status: activitypb.ACTIVITY_EXECUTION_STATUS_CANCEL_REQUESTED, + TaskQueue: &taskqueuepb.TaskQueue{Name: "test-task-queue"}, + CancelState: &activitypb.ActivityCancelState{ + Identity: identity, + }, + }, + LastAttempt: chasm.NewDataField(ctx, attemptState), + Outcome: chasm.NewDataField(ctx, outcome), + } + + controller := gomock.NewController(t) + metricsHandler := metrics.NewMockHandler(controller) + + timerStartToCloseLatency := metrics.NewMockTimerIface(controller) + timerStartToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityStartToCloseLatency.Name()).Return(timerStartToCloseLatency) + + timerScheduleToCloseLatency := metrics.NewMockTimerIface(controller) + timerScheduleToCloseLatency.EXPECT().Record(gomock.Any()).Times(1) + metricsHandler.EXPECT().Timer(metrics.ActivityScheduleToCloseLatency.Name()).Return(timerScheduleToCloseLatency) + + counterCancel := metrics.NewMockCounterIface(controller) + counterCancel.EXPECT().Record(int64(1)).Times(1) + metricsHandler.EXPECT().Counter(metrics.ActivityCancel.Name()).Return(counterCancel) + + event := cancelEvent{ + details: payloads.EncodeString("Details"), + handler: metricsHandler, + } + + err := TransitionCanceled.Apply(activity, ctx, event) + require.NoError(t, err) + require.Equal(t, activitypb.ACTIVITY_EXECUTION_STATUS_CANCELED, activity.Status) + + expectedFailure := &failurepb.Failure{ + Message: "Activity canceled", + FailureInfo: &failurepb.Failure_CanceledFailureInfo{ + CanceledFailureInfo: &failurepb.CanceledFailureInfo{ + Details: payloads.EncodeString("Details"), + Identity: identity, + }, + }, + } + protorequire.ProtoEqual(t, expectedFailure, outcome.GetFailed().GetFailure()) +} diff --git a/chasm/lib/activity/validator.go b/chasm/lib/activity/validator.go new file mode 100644 index 00000000000..e1c1ddb9797 --- /dev/null +++ b/chasm/lib/activity/validator.go @@ -0,0 +1,495 @@ +package activity + +import ( + "github.com/google/uuid" + activitypb "go.temporal.io/api/activity/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/priorities" + "go.temporal.io/server/common/retrypolicy" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/common/tqid" + "google.golang.org/protobuf/types/known/durationpb" +) + +// ValidateAndNormalizeStandaloneActivity validates and normalizes the attributes for a standalone activity. +func ValidateAndNormalizeStandaloneActivity( + activityID string, + activityType string, + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings], + maxIDLengthLimit int, + namespaceID namespace.ID, + options *activitypb.ActivityOptions, + priority *commonpb.Priority, + runTimeout *durationpb.Duration, +) error { + // Standalone activities always use user defined task queues, so we can enforce user defined task queue validation + if err := tqid.NormalizeAndValidateUserDefined(options.TaskQueue, "", "", maxIDLengthLimit); err != nil { + return err + } + + return validateAndNormalizeActivityAttributes( + activityID, + activityType, + getDefaultActivityRetrySettings, + maxIDLengthLimit, + namespaceID, + options, + priority, + runTimeout) +} + +// ValidateAndNormalizeEmbeddedActivity validates and normalizes the attributes for an embedded activity. +func ValidateAndNormalizeEmbeddedActivity( + activityID string, + activityType string, + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings], + maxIDLengthLimit int, + namespaceID namespace.ID, + options *activitypb.ActivityOptions, + priority *commonpb.Priority, + runTimeout *durationpb.Duration, + workflowTaskQueueName string, +) error { + if err := tqid.NormalizeAndValidateUserDefined(options.TaskQueue, "", workflowTaskQueueName, maxIDLengthLimit); err != nil { + return err + } + + return validateAndNormalizeActivityAttributes( + activityID, + activityType, + getDefaultActivityRetrySettings, + maxIDLengthLimit, + namespaceID, + options, + priority, + runTimeout) +} + +// ValidateAndNormalizeActivityAttributes validates and normalizes the common activity request attributes. +// This validation is shared by both standalone and embedded activities. +// IMPORTANT: this method mutates the input params; in cases where it's critical to maintain immutability +// (i.e., when incoming request can potentially be retried), clone the params first before passing it in. +// +// The timeout normalization logic is as follows: +// 1. If ScheduleToClose is set, fill in missing ScheduleToStart and StartToClose from ScheduleToClose +// 2. If StartToClose is set but ScheduleToClose is not set, set ScheduleToClose to runTimeout, and fill in missing ScheduleToStart from runTimeout +// 3. If neither ScheduleToClose nor StartToClose is set, return error +// 4. Ensure all timeouts do not exceed runTimeout if runTimeout is set (>0) +// 5. Ensure HeartbeatTimeout does not exceed StartToClose +func validateAndNormalizeActivityAttributes( + activityID string, + activityType string, + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings], + maxIDLengthLimit int, + namespaceID namespace.ID, + options *activitypb.ActivityOptions, + priority *commonpb.Priority, + runTimeout *durationpb.Duration, +) error { + if activityID == "" { + return serviceerror.NewInvalidArgument("activityId is not set") + } + if activityType == "" { + return serviceerror.NewInvalidArgument("activityType is not set") + } + + if err := validateActivityRetryPolicy(namespaceID, options.RetryPolicy, getDefaultActivityRetrySettings); err != nil { + return err + } + + if len(activityID) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activityId exceeds length limit. Length=%d Limit=%d", + len(activityID), maxIDLengthLimit) + } + if len(activityType) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activityType exceeds length limit. Length=%d Limit=%d", + len(activityType), maxIDLengthLimit) + } + + if err := priorities.Validate(priority); err != nil { + return serviceerror.NewInvalidArgumentf("invalid priorities: %v", err) + } + + return validateAndNormalizeTimeouts(activityID, + activityType, + runTimeout, + options) +} + +func validateStartDelay(startDelay *durationpb.Duration) error { + if err := timestamp.ValidateAndCapProtoDuration(startDelay); err != nil { + return serviceerror.NewInvalidArgumentf("invalid StartDelay: %v", err) + } + return nil +} + +func validateActivityRetryPolicy( + namespaceID namespace.ID, + retryPolicy *commonpb.RetryPolicy, + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings], +) error { + if retryPolicy == nil { + return nil + } + // TODO(saa-preview): this is a namespace setting, not a namespace id setting + defaultActivityRetrySettings := getDefaultActivityRetrySettings(namespaceID.String()) + retrypolicy.EnsureDefaults(retryPolicy, defaultActivityRetrySettings) + return retrypolicy.Validate(retryPolicy) +} + +func validateAndNormalizeTimeouts( + activityID string, + activityType string, + runTimeout *durationpb.Duration, + options *activitypb.ActivityOptions, +) error { + // Only attempt to deduce and fill in unspecified timeouts only when all timeouts are non-negative. + if err := timestamp.ValidateAndCapProtoDuration(options.GetScheduleToCloseTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("invalid ScheduleToCloseTimeout: %v", err) + } + if err := timestamp.ValidateAndCapProtoDuration(options.GetScheduleToStartTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("invalid ScheduleToStartTimeout: %v", err) + } + if err := timestamp.ValidateAndCapProtoDuration(options.GetStartToCloseTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("invalid StartToCloseTimeout: %v", err) + } + if err := timestamp.ValidateAndCapProtoDuration(options.GetHeartbeatTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("invalid HeartbeatTimeout: %v", err) + } + + scheduleToCloseSet := options.GetScheduleToCloseTimeout().AsDuration() > 0 + scheduleToStartSet := options.GetScheduleToStartTimeout().AsDuration() > 0 + startToCloseSet := options.GetStartToCloseTimeout().AsDuration() > 0 + + if scheduleToCloseSet { + if scheduleToStartSet { + options.ScheduleToStartTimeout = timestamp.MinDurationPtr(options.ScheduleToStartTimeout, options.ScheduleToCloseTimeout) + } else { + options.ScheduleToStartTimeout = options.ScheduleToCloseTimeout + } + if startToCloseSet { + options.StartToCloseTimeout = timestamp.MinDurationPtr(options.StartToCloseTimeout, options.ScheduleToCloseTimeout) + } else { + options.StartToCloseTimeout = options.ScheduleToCloseTimeout + } + } else if startToCloseSet { + // We are in !validScheduleToClose due to the first if above + options.ScheduleToCloseTimeout = runTimeout + if !scheduleToStartSet { + options.ScheduleToStartTimeout = runTimeout + } + } else { + // Deduction failed as there's not enough information to fill in missing timeouts. + return serviceerror.NewInvalidArgumentf("a valid StartToClose or ScheduleToCloseTimeout is not set on ScheduleActivityTaskCommand. ActivityId=%s ActivityType=%s", + activityID, activityType) + } + // ensure activity timeout never larger than workflow timeout + if runTimeout.AsDuration() > 0 { + runTimeoutDur := runTimeout.AsDuration() + if options.ScheduleToCloseTimeout.AsDuration() > runTimeoutDur { + options.ScheduleToCloseTimeout = runTimeout + } + if options.ScheduleToStartTimeout.AsDuration() > runTimeoutDur { + options.ScheduleToStartTimeout = runTimeout + } + if options.StartToCloseTimeout.AsDuration() > runTimeoutDur { + options.StartToCloseTimeout = runTimeout + } + if options.HeartbeatTimeout.AsDuration() > runTimeoutDur { + options.HeartbeatTimeout = runTimeout + } + } + + options.HeartbeatTimeout = timestamp.MinDurationPtr(options.HeartbeatTimeout, options.StartToCloseTimeout) + + return nil +} + +func validateAndNormalizeIDPolicy(req *workflowservice.StartActivityExecutionRequest) error { + if req.GetIdReusePolicy() == enumspb.ACTIVITY_ID_REUSE_POLICY_UNSPECIFIED { + req.IdReusePolicy = enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE + } + + if req.GetIdConflictPolicy() == enumspb.ACTIVITY_ID_CONFLICT_POLICY_UNSPECIFIED { + req.IdConflictPolicy = enumspb.ACTIVITY_ID_CONFLICT_POLICY_FAIL + } + + return nil +} + +func validateBlobSize( + activityID string, + blobSizeViolationTagValue string, + blobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter, + blobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter, + blobSize int, + logger log.Logger, + namespaceName string, +) error { + sizeWarnLimit := blobSizeLimitWarn(namespaceName) + sizeErrorLimit := blobSizeLimitError(namespaceName) + + if blobSize > sizeWarnLimit { + logger.Warn("Activity blob size exceeds the warning limit.", + tag.WorkflowNamespace(namespaceName), + tag.ActivityID(activityID), + tag.ActivitySize(int64(blobSize)), + tag.BlobSizeViolationOperation(blobSizeViolationTagValue)) + } + + if blobSize > sizeErrorLimit { + return common.ErrBlobSizeExceedsLimit + } + + return nil +} + +func validateAndNormalizeSearchAttributes( + req *workflowservice.StartActivityExecutionRequest, + saMapperProvider searchattribute.MapperProvider, + saValidator *searchattribute.Validator, +) error { + namespaceName := req.GetNamespace() + + // Unalias search attributes for validation. + saToValidate := req.SearchAttributes + if saMapperProvider != nil && saToValidate != nil { + var err error + saToValidate, err = searchattribute.UnaliasFields(saMapperProvider, saToValidate, namespaceName) + if err != nil { + return err + } + } + + if err := saValidator.Validate(saToValidate, namespaceName); err != nil { + return err + } + + return saValidator.ValidateSize(saToValidate, namespaceName) +} + +func validateDescribeActivityExecutionRequest( + req *workflowservice.DescribeActivityExecutionRequest, + maxIDLengthLimit int, +) error { + if req.GetActivityId() == "" { + return serviceerror.NewInvalidArgument("activity ID is required") + } + if len(req.GetActivityId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activity ID exceeds length limit. Length=%d Limit=%d", + len(req.GetActivityId()), maxIDLengthLimit) + } + hasRunID := req.GetRunId() != "" + hasLongPollToken := len(req.GetLongPollToken()) > 0 + + if hasLongPollToken && !hasRunID { + return serviceerror.NewInvalidArgument("run id is required when long poll token is provided") + } + if hasRunID { + _, err := uuid.Parse(req.GetRunId()) + if err != nil { + return serviceerror.NewInvalidArgument("invalid run id: must be a valid UUID") + } + } + return nil +} + +func validatePollActivityExecutionRequest( + req *workflowservice.PollActivityExecutionRequest, + maxIDLengthLimit int, +) error { + if req.GetActivityId() == "" { + return serviceerror.NewInvalidArgument("activity ID is required") + } + if len(req.GetActivityId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activity ID exceeds length limit. Length=%d Limit=%d", + len(req.GetActivityId()), maxIDLengthLimit) + } + if runID := req.GetRunId(); runID != "" { + _, err := uuid.Parse(runID) + if err != nil { + return serviceerror.NewInvalidArgument("invalid run id: must be a valid UUID") + } + } + return nil +} + +func validateAndNormalizeStartRequest( + req *workflowservice.StartActivityExecutionRequest, + maxIDLengthLimit int, + blobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter, + blobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter, + logger log.Logger, + saMapperProvider searchattribute.MapperProvider, + saValidator *searchattribute.Validator, +) error { + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } else if len(req.GetRequestId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("request ID exceeds length limit. Length=%d Limit=%d", + len(req.GetRequestId()), maxIDLengthLimit) + } + + if len(req.GetIdentity()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("identity exceeds length limit. Length=%d Limit=%d", + len(req.GetIdentity()), maxIDLengthLimit) + } + + if err := validateAndNormalizeIDPolicy(req); err != nil { + return err + } + + if err := validateBlobSize( + req.GetActivityId(), + "StartActivityExecution", + blobSizeLimitError, + blobSizeLimitWarn, + req.Input.Size(), + logger, + req.GetNamespace()); err != nil { + return serviceerror.NewInvalidArgument("input exceeds length limit") + } + + if req.GetSearchAttributes() != nil { + if err := validateAndNormalizeSearchAttributes( + req, + saMapperProvider, + saValidator); err != nil { + return err + } + } + + return nil +} + +func validateAndNormalizeCancelRequest( + req *workflowservice.RequestCancelActivityExecutionRequest, + maxIDLengthLimit int, + blobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter, + blobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter, + logger log.Logger, +) error { + if req.GetActivityId() == "" { + return serviceerror.NewInvalidArgument("activity ID is required") + } + + if len(req.GetActivityId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activity ID exceeds length limit. Length=%d Limit=%d", + len(req.GetActivityId()), maxIDLengthLimit) + } + + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } else if len(req.GetRequestId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("request ID exceeds length limit. Length=%d Limit=%d", + len(req.GetRequestId()), maxIDLengthLimit) + } + + if len(req.GetIdentity()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("identity exceeds length limit. Length=%d Limit=%d", + len(req.GetIdentity()), maxIDLengthLimit) + } + + if runID := req.GetRunId(); runID != "" { + _, err := uuid.Parse(runID) + if err != nil { + return serviceerror.NewInvalidArgument("invalid run id: must be a valid UUID") + } + } + + err := validateBlobSize( + req.GetActivityId(), + "RequestCancelActivityExecution", + blobSizeLimitError, + blobSizeLimitWarn, + len(req.GetReason()), + logger, + req.GetNamespace()) + if err != nil { + return serviceerror.NewInvalidArgument("reason exceeds length limit") + } + + return nil +} + +func validateAndNormalizeDeleteRequest( + req *workflowservice.DeleteActivityExecutionRequest, + maxIDLengthLimit int, +) error { + if req.GetActivityId() == "" { + return serviceerror.NewInvalidArgument("activity ID is required") + } + + if len(req.GetActivityId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activity ID exceeds length limit. Length=%d Limit=%d", + len(req.GetActivityId()), maxIDLengthLimit) + } + + if runID := req.GetRunId(); runID != "" { + _, err := uuid.Parse(runID) + if err != nil { + return serviceerror.NewInvalidArgument("invalid run id: must be a valid UUID") + } + } + + return nil +} + +func validateAndNormalizeTerminateRequest( + req *workflowservice.TerminateActivityExecutionRequest, + maxIDLengthLimit int, + blobSizeLimitError dynamicconfig.IntPropertyFnWithNamespaceFilter, + blobSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter, + logger log.Logger, +) error { + if req.GetActivityId() == "" { + return serviceerror.NewInvalidArgument("activity ID is required") + } + + if len(req.GetActivityId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("activity ID exceeds length limit. Length=%d Limit=%d", + len(req.GetActivityId()), maxIDLengthLimit) + } + + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } else if len(req.GetRequestId()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("request ID exceeds length limit. Length=%d Limit=%d", + len(req.GetRequestId()), maxIDLengthLimit) + } + + if len(req.GetIdentity()) > maxIDLengthLimit { + return serviceerror.NewInvalidArgumentf("identity exceeds length limit. Length=%d Limit=%d", + len(req.GetIdentity()), maxIDLengthLimit) + } + + if runID := req.GetRunId(); runID != "" { + _, err := uuid.Parse(runID) + if err != nil { + return serviceerror.NewInvalidArgument("invalid run id: must be a valid UUID") + } + } + + err := validateBlobSize( + req.GetActivityId(), + "TerminateActivityExecution", + blobSizeLimitError, + blobSizeLimitWarn, + len(req.GetReason()), + logger, + req.GetNamespace()) + if err != nil { + return serviceerror.NewInvalidArgument("reason exceeds length limit") + } + + return nil +} diff --git a/chasm/lib/activity/validator_test.go b/chasm/lib/activity/validator_test.go new file mode 100644 index 00000000000..7ebc7415223 --- /dev/null +++ b/chasm/lib/activity/validator_test.go @@ -0,0 +1,722 @@ +package activity + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + activitypb "go.temporal.io/api/activity/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/retrypolicy" + "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + defaultActivityID = "test-activity-id" + defaultActivityType = "test-activity-type" + defaultTaskQueue = "test-task-queue" + defaultMaxIDLengthLimit = 1000 + defaultNamespaceID = "default" +) + +var ( + defaultActivityOptions = activitypb.ActivityOptions{ + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + } + + defaultPriority = commonpb.Priority{FairnessKey: "normal"} + + defaultBlobSizeLimitError = func(ns string) int { + return 64 + } + defaultBlobSizeLimitWarn = func(ns string) int { + return 32 + } +) + +func TestValidateSuccess(t *testing.T) { + t.Run("StandaloneActivitySuccess", func(t *testing.T) { + err := ValidateAndNormalizeStandaloneActivity( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + &defaultActivityOptions, + &defaultPriority, + durationpb.New(0)) + require.NoError(t, err) + }) + + t.Run("EmbeddedActivitySuccess", func(t *testing.T) { + err := ValidateAndNormalizeEmbeddedActivity( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + &defaultActivityOptions, + &defaultPriority, + durationpb.New(0), + defaultTaskQueue) + require.NoError(t, err) + }) +} + +func TestValidateAllActivityFailures(t *testing.T) { + cases := []struct { + name string + activityID string + activityType string + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings] + maxIDLengthLimit int + namespaceID namespace.ID + options *activitypb.ActivityOptions + priority *commonpb.Priority + runTimeout *durationpb.Duration + expectedErrMessage string + }{ + { + name: "Empty ActivityId", + activityID: "", + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: "activityId is not set", + }, + { + name: "Empty ActivityType", + activityID: defaultActivityID, + activityType: "", + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: "activityType is not set", + }, + { + name: "ActivityId exceeds length limit", + activityID: string(make([]byte, 1001)), + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: fmt.Sprintf("activityId exceeds length limit. Length=%d Limit=%d", 1001, defaultMaxIDLengthLimit), + }, + { + name: "ActivityType exceeds length limit", + activityID: defaultActivityID, + activityType: string(make([]byte, 1001)), + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: fmt.Sprintf("activityType exceeds length limit. Length=%d Limit=%d", 1001, defaultMaxIDLengthLimit), + }, + { + name: "Negative ScheduleToCloseTimeout", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(-1 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: "invalid ScheduleToCloseTimeout", + }, + { + name: "Negative ScheduleToStartTimeout", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + ScheduleToStartTimeout: durationpb.New(-1 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: "invalid ScheduleToStartTimeout", + }, + { + name: "Negative StartToCloseTimeout", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + StartToCloseTimeout: durationpb.New(-1 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: "invalid StartToCloseTimeout", + }, + { + name: "Negative HeartbeatTimeout", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + HeartbeatTimeout: durationpb.New(-1 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: "invalid HeartbeatTimeout", + }, + { + name: "Invalid Priority", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &defaultActivityOptions, + priority: &commonpb.Priority{FairnessKey: string(make([]byte, 1001))}, + runTimeout: nil, + expectedErrMessage: "invalid priorities", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := validateAndNormalizeActivityAttributes( + tc.activityID, + tc.activityType, + tc.getDefaultActivityRetrySettings, + tc.maxIDLengthLimit, + tc.namespaceID, + tc.options, + tc.priority, + durationpb.New(0)) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + if tc.expectedErrMessage != "" { + require.Contains(t, invalidArgErr.Error(), tc.expectedErrMessage) + } + }) + } +} + +func TestStandaloneActivityTaskQueueValidations(t *testing.T) { + cases := []struct { + name string + activityID string + activityType string + getDefaultActivityRetrySettings dynamicconfig.TypedPropertyFnWithNamespaceFilter[retrypolicy.DefaultRetrySettings] + maxIDLengthLimit int + namespaceID namespace.ID + options *activitypb.ActivityOptions + priority *commonpb.Priority + runTimeout *durationpb.Duration + expectedErrMessage string + }{ + { + name: "Disallow PerNSWorkerTaskQueue TaskQueue", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: primitives.PerNSWorkerTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: fmt.Sprintf("cannot use internal per-namespace task queue:%s", primitives.PerNSWorkerTaskQueue), + }, + { + name: "Disallow Internal TaskQueue Prefix", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: "/_sys/my-task-queue"}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: "task queue name cannot start with reserved prefix /_sys/", + }, + { + name: "Disallow Empty TaskQueue", + activityID: defaultActivityID, + activityType: defaultActivityType, + getDefaultActivityRetrySettings: getDefaultRetrySettings, + maxIDLengthLimit: defaultMaxIDLengthLimit, + namespaceID: defaultNamespaceID, + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: ""}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + }, + priority: &defaultPriority, + runTimeout: nil, + expectedErrMessage: "missing task queue name", + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := ValidateAndNormalizeStandaloneActivity( + tc.activityID, + tc.activityType, + tc.getDefaultActivityRetrySettings, + tc.maxIDLengthLimit, + tc.namespaceID, + tc.options, + tc.priority, + durationpb.New(0)) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, invalidArgErr.Error(), tc.expectedErrMessage) + }) + } +} + +func TestEmbeddedActivityTaskQueueValidations(t *testing.T) { + t.Run("Allow PerNSWorkerTaskQueue TaskQueue", func(t *testing.T) { + options := &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: primitives.PerNSWorkerTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + } + + err := ValidateAndNormalizeEmbeddedActivity( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + options, + &defaultPriority, + durationpb.New(0), + primitives.PerNSWorkerTaskQueue) + require.NoError(t, err) + }) + + t.Run("Disallow PerNSWorkerTaskQueue TaskQueue", func(t *testing.T) { + options := &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: primitives.PerNSWorkerTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + } + + err := ValidateAndNormalizeEmbeddedActivity( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + options, + &defaultPriority, + durationpb.New(0), + defaultTaskQueue) + + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, invalidArgErr.Error(), "cannot use internal per-namespace task queue") + }) + + t.Run("Disallow Internal TaskQueue Prefix", func(t *testing.T) { + options := &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: "/_sys/my-task-queue"}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + } + + err := ValidateAndNormalizeEmbeddedActivity( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + options, + &defaultPriority, + durationpb.New(0), + defaultTaskQueue) + + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, invalidArgErr.Error(), "task queue name cannot start with reserved prefix /_sys/") + }) + + t.Run("Disallow Empty TaskQueue", func(t *testing.T) { + options := &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: ""}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + } + + err := ValidateAndNormalizeEmbeddedActivity( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + options, + &defaultPriority, + durationpb.New(0), + defaultTaskQueue) + + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, invalidArgErr.Error(), "missing task queue name") + }) +} + +func newTestFrontendHandler( + blobSizeLimitError func(string) int, + blobSizeLimitWarn func(string) int, + maxIDLengthLimit int, +) *frontendHandler { + return &frontendHandler{ + config: &Config{ + BlobSizeLimitError: blobSizeLimitError, + BlobSizeLimitWarn: blobSizeLimitWarn, + MaxIDLengthLimit: func() int { return maxIDLengthLimit }, + }, + logger: log.NewNoopLogger(), + } +} + +func TestValidateStandAloneRequestIDTooLong(t *testing.T) { + req := &workflowservice.StartActivityExecutionRequest{ + ActivityId: defaultActivityID, + ActivityType: &commonpb.ActivityType{Name: defaultActivityType}, + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + Namespace: "default", + RequestId: string(make([]byte, 1001)), + Input: payloads.EncodeString("test-input"), + } + + h := newTestFrontendHandler(defaultBlobSizeLimitError, defaultBlobSizeLimitWarn, defaultMaxIDLengthLimit) + err := validateAndNormalizeStartRequest(req, h.config.MaxIDLengthLimit(), h.config.BlobSizeLimitError, h.config.BlobSizeLimitWarn, h.logger, h.saMapperProvider, h.saValidator) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) +} + +func TestValidateStandAloneInputTooLarge(t *testing.T) { + req := &workflowservice.StartActivityExecutionRequest{ + ActivityId: defaultActivityID, + ActivityType: &commonpb.ActivityType{Name: defaultActivityType}, + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + Namespace: "default", + RequestId: "test-request-id", + Input: payloads.EncodeString(string(make([]byte, 1000))), + } + + h := newTestFrontendHandler(defaultBlobSizeLimitError, defaultBlobSizeLimitWarn, defaultMaxIDLengthLimit) + err := validateAndNormalizeStartRequest(req, h.config.MaxIDLengthLimit(), h.config.BlobSizeLimitError, h.config.BlobSizeLimitWarn, h.logger, h.saMapperProvider, h.saValidator) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) +} + +func TestValidateStandAloneInputWarningSizeShouldSucceed(t *testing.T) { + payload := payloads.EncodeString("test-input") + payloadSize := payload.Size() + + req := &workflowservice.StartActivityExecutionRequest{ + ActivityId: defaultActivityID, + ActivityType: &commonpb.ActivityType{Name: defaultActivityType}, + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + Namespace: "default", + RequestId: "test-request-id", + Input: payload, + } + + h := newTestFrontendHandler( + func(ns string) int { return payloadSize + 1 }, + func(ns string) int { return payloadSize }, + defaultMaxIDLengthLimit, + ) + err := validateAndNormalizeStartRequest(req, h.config.MaxIDLengthLimit(), h.config.BlobSizeLimitError, h.config.BlobSizeLimitWarn, h.logger, h.saMapperProvider, h.saValidator) + require.NoError(t, err) +} + +func TestValidateStandAlone_IDPolicyShouldDefault(t *testing.T) { + req := &workflowservice.StartActivityExecutionRequest{ + ActivityId: defaultActivityID, + ActivityType: &commonpb.ActivityType{Name: defaultActivityType}, + RetryPolicy: &commonpb.RetryPolicy{ + InitialInterval: durationpb.New(1 * time.Second), + }, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + Namespace: "default", + RequestId: "test-request-id", + } + + h := newTestFrontendHandler(defaultBlobSizeLimitError, defaultBlobSizeLimitWarn, defaultMaxIDLengthLimit) + err := validateAndNormalizeStartRequest(req, h.config.MaxIDLengthLimit(), h.config.BlobSizeLimitError, h.config.BlobSizeLimitWarn, h.logger, h.saMapperProvider, h.saValidator) + + require.NoError(t, err) + require.Equal(t, enumspb.ACTIVITY_ID_REUSE_POLICY_ALLOW_DUPLICATE, req.IdReusePolicy) + require.Equal(t, enumspb.ACTIVITY_ID_CONFLICT_POLICY_FAIL, req.IdConflictPolicy) +} + +func TestModifiedActivityTimeouts(t *testing.T) { + cases := []struct { + name string + options *activitypb.ActivityOptions + runTimeout *durationpb.Duration + isErr bool + validate func(t *testing.T, options *activitypb.ActivityOptions) + }{ + { + name: "ScheduleToClose set - fills in missing timeouts", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + }, + runTimeout: durationpb.New(0), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 10*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 0*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "StartToClose set but not ScheduleToClose - fills from runTimeout", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + StartToCloseTimeout: durationpb.New(5 * time.Second), + }, + runTimeout: durationpb.New(20 * time.Second), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 20*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 20*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 5*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 0*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "Neither ScheduleToClose nor StartToClose set - returns error", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + }, + runTimeout: durationpb.New(0), + isErr: true, + validate: func(t *testing.T, options *activitypb.ActivityOptions) {}, + }, + { + name: "ScheduleToClose and StartToClose set - StartToClose capped by ScheduleToClose", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + StartToCloseTimeout: durationpb.New(15 * time.Second), + }, + runTimeout: durationpb.New(0), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 10*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 0*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "ScheduleToStart capped by ScheduleToClose", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(10 * time.Second), + ScheduleToStartTimeout: durationpb.New(20 * time.Second), + }, + runTimeout: durationpb.New(0), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 10*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 0*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "HeartbeatTimeout capped by StartToClose", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(20 * time.Second), + StartToCloseTimeout: durationpb.New(10 * time.Second), + HeartbeatTimeout: durationpb.New(15 * time.Second), + }, + runTimeout: durationpb.New(0), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 20*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 20*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + { + name: "All timeouts capped by runTimeout", + options: &activitypb.ActivityOptions{ + TaskQueue: &taskqueuepb.TaskQueue{Name: defaultTaskQueue}, + ScheduleToCloseTimeout: durationpb.New(30 * time.Second), + ScheduleToStartTimeout: durationpb.New(25 * time.Second), + StartToCloseTimeout: durationpb.New(20 * time.Second), + HeartbeatTimeout: durationpb.New(15 * time.Second), + }, + runTimeout: durationpb.New(10 * time.Second), + isErr: false, + validate: func(t *testing.T, options *activitypb.ActivityOptions) { + require.Equal(t, 10*time.Second, options.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.StartToCloseTimeout.AsDuration()) + require.Equal(t, 10*time.Second, options.HeartbeatTimeout.AsDuration()) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + err := validateAndNormalizeActivityAttributes( + defaultActivityID, + defaultActivityType, + getDefaultRetrySettings, + defaultMaxIDLengthLimit, + defaultNamespaceID, + tc.options, + &defaultPriority, + tc.runTimeout) + + if tc.isErr { + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + return + } + + require.NoError(t, err) + tc.validate(t, tc.options) + }) + } +} + +func TestValidateDeleteActivityExecutionRequest(t *testing.T) { + t.Run("Success", func(t *testing.T) { + req := &workflowservice.DeleteActivityExecutionRequest{ + ActivityId: defaultActivityID, + } + err := validateAndNormalizeDeleteRequest(req, defaultMaxIDLengthLimit) + require.NoError(t, err) + }) + + t.Run("SuccessWithRunID", func(t *testing.T) { + req := &workflowservice.DeleteActivityExecutionRequest{ + ActivityId: defaultActivityID, + RunId: "f47ac10b-58cc-4372-a567-0e02b2c3d479", + } + err := validateAndNormalizeDeleteRequest(req, defaultMaxIDLengthLimit) + require.NoError(t, err) + }) + + t.Run("EmptyActivityID", func(t *testing.T) { + req := &workflowservice.DeleteActivityExecutionRequest{ + ActivityId: "", + } + err := validateAndNormalizeDeleteRequest(req, defaultMaxIDLengthLimit) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + }) + + t.Run("ActivityIDTooLong", func(t *testing.T) { + req := &workflowservice.DeleteActivityExecutionRequest{ + ActivityId: string(make([]byte, defaultMaxIDLengthLimit+1)), + } + err := validateAndNormalizeDeleteRequest(req, defaultMaxIDLengthLimit) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + }) + + t.Run("InvalidRunID", func(t *testing.T) { + req := &workflowservice.DeleteActivityExecutionRequest{ + ActivityId: defaultActivityID, + RunId: "not-a-valid-uuid", + } + err := validateAndNormalizeDeleteRequest(req, defaultMaxIDLengthLimit) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + }) +} + +func TestValidateStartDelay(t *testing.T) { + t.Run("NilDuration", func(t *testing.T) { + err := validateStartDelay(nil) + require.NoError(t, err) + }) + + t.Run("ZeroDuration", func(t *testing.T) { + err := validateStartDelay(durationpb.New(0)) + require.NoError(t, err) + }) + + t.Run("ValidDuration", func(t *testing.T) { + err := validateStartDelay(durationpb.New(5 * time.Second)) + require.NoError(t, err) + }) + + t.Run("NegativeDuration", func(t *testing.T) { + err := validateStartDelay(durationpb.New(-1 * time.Second)) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, invalidArgErr.Message, "invalid StartDelay") + }) +} + +func getDefaultRetrySettings(_ string) retrypolicy.DefaultRetrySettings { + return retrypolicy.DefaultRetrySettings{ + InitialInterval: time.Second, + MaximumIntervalCoefficient: 100.0, + BackoffCoefficient: 2.0, + MaximumAttempts: 0, + } +} diff --git a/chasm/lib/buf.yaml b/chasm/lib/buf.yaml new file mode 100644 index 00000000000..e4e6ba71070 --- /dev/null +++ b/chasm/lib/buf.yaml @@ -0,0 +1,12 @@ +# buf rules for CHASM protos +version: v1 +deps: + - buf.build/googleapis/googleapis +breaking: + use: + - WIRE +lint: + use: + - DEFAULT + except: + - PACKAGE_DIRECTORY_MATCH diff --git a/chasm/lib/callback/component.go b/chasm/lib/callback/component.go new file mode 100644 index 00000000000..c017a7d2f30 --- /dev/null +++ b/chasm/lib/callback/component.go @@ -0,0 +1,181 @@ +package callback + +import ( + "fmt" + "time" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/chasm" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/nexus/nexusrpc" + queueserrors "go.temporal.io/server/service/history/queues/errors" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type CompletionSource interface { + GetNexusCompletion(ctx chasm.Context, requestID string) (nexusrpc.CompleteOperationOptions, error) +} + +var _ chasm.Component = (*Callback)(nil) +var _ chasm.StateMachine[callbackspb.CallbackStatus] = (*Callback)(nil) + +// Callback represents a callback component in CHASM. +type Callback struct { + chasm.UnimplementedComponent + + // Persisted internal state + *callbackspb.CallbackState + + // Interface to retrieve Nexus operation completion data + CompletionSource chasm.ParentPtr[CompletionSource] +} + +func NewCallback( + requestID string, + registrationTime *timestamppb.Timestamp, + state *callbackspb.CallbackState, + cb *callbackspb.Callback, +) *Callback { + return &Callback{ + CallbackState: &callbackspb.CallbackState{ + RequestId: requestID, + RegistrationTime: registrationTime, + Callback: cb, + Status: callbackspb.CALLBACK_STATUS_STANDBY, + }, + } +} + +func (c *Callback) LifecycleState(_ chasm.Context) chasm.LifecycleState { + switch c.Status { + case callbackspb.CALLBACK_STATUS_SUCCEEDED: + return chasm.LifecycleStateCompleted + case callbackspb.CALLBACK_STATUS_FAILED: + return chasm.LifecycleStateFailed + default: + return chasm.LifecycleStateRunning + } +} + +func (c *Callback) StateMachineState() callbackspb.CallbackStatus { + return c.Status +} + +func (c *Callback) SetStateMachineState(status callbackspb.CallbackStatus) { + c.Status = status +} + +func (c *Callback) recordAttempt(ts time.Time) { + c.Attempt++ + c.LastAttemptCompleteTime = timestamppb.New(ts) +} + +//nolint:revive // context.Context is an input parameter for chasm.ReadComponent, not a function parameter +func (c *Callback) loadInvocationArgs( + ctx chasm.Context, + _ chasm.NoValue, +) (invocable, error) { + target := c.CompletionSource.Get(ctx) + + completion, err := target.GetNexusCompletion(ctx, c.RequestId) + if err != nil { + return nil, err + } + + callback := c.GetCallback().GetNexus() + if callback == nil { + return nil, queueserrors.NewUnprocessableTaskError( + fmt.Sprintf("unprocessable callback variant: %v", callback), + ) + } + + if callback.Url == chasm.NexusCompletionHandlerURL { + return invocableInternal{ + callback: callback, + attempt: c.Attempt, + completion: completion, + requestID: c.RequestId, + }, nil + } + return invocableOutbound{ + callback: callback, + completion: completion, + workflowID: ctx.ExecutionKey().BusinessID, + runID: ctx.ExecutionKey().RunID, + attempt: c.Attempt, + }, nil +} + +type saveResultInput struct { + result invocationResult + retryPolicy backoff.RetryPolicy +} + +func (c *Callback) saveResult( + ctx chasm.MutableContext, + input saveResultInput, +) (chasm.NoValue, error) { + switch r := input.result.(type) { + case invocationResultOK: + err := TransitionSucceeded.Apply(c, ctx, EventSucceeded{Time: ctx.Now(c)}) + return nil, err + case invocationResultRetry: + err := TransitionAttemptFailed.Apply(c, ctx, EventAttemptFailed{ + Time: ctx.Now(c), + Err: r.err, + RetryPolicy: input.retryPolicy, + }) + return nil, err + case invocationResultFail: + err := TransitionFailed.Apply(c, ctx, EventFailed{ + Time: ctx.Now(c), + Err: r.err, + }) + return nil, err + default: + return nil, queueserrors.NewUnprocessableTaskError( + fmt.Sprintf("unrecognized callback result %v", input.result), + ) + } +} + +// ToAPICallback converts a CHASM callback to API callback proto. +func (c *Callback) ToAPICallback() (*commonpb.Callback, error) { + // Convert CHASM callback proto to API callback proto + chasmCB := c.GetCallback() + res := &commonpb.Callback{ + Links: chasmCB.GetLinks(), + } + + // CHASM currently only supports Nexus callbacks + if variant, ok := chasmCB.Variant.(*callbackspb.Callback_Nexus_); ok { + res.Variant = &commonpb.Callback_Nexus_{ + Nexus: &commonpb.Callback_Nexus{ + Url: variant.Nexus.GetUrl(), + Header: variant.Nexus.GetHeader(), + }, + } + return res, nil + } + + // This should not happen as CHASM only supports Nexus callbacks currently + return nil, serviceerror.NewInternal("unsupported CHASM callback type") +} + +// ScheduleStandbyCallbacks transitions all STANDBY callbacks to SCHEDULED state, +// triggering their invocation. Used by both workflows and standalone activities +// when the execution reaches a terminal state. +func ScheduleStandbyCallbacks(ctx chasm.MutableContext, callbacks chasm.Map[string, *Callback]) error { + for _, field := range callbacks { + cb := field.Get(ctx) + if cb.Status != callbackspb.CALLBACK_STATUS_STANDBY { + continue + } + if err := TransitionScheduled.Apply(cb, ctx, EventScheduled{}); err != nil { + return err + } + } + return nil +} diff --git a/chasm/lib/callback/config.go b/chasm/lib/callback/config.go new file mode 100644 index 00000000000..755f233b126 --- /dev/null +++ b/chasm/lib/callback/config.go @@ -0,0 +1,174 @@ +package callback + +import ( + "net/url" + "regexp" + "strings" + "time" + + "go.temporal.io/server/chasm" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/nexus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var MaxPerExecution = dynamicconfig.NewNamespaceIntSetting( + "callback.maxPerExecution", + 2000, + `MaxPerExecution is the maximum number of callbacks that can be attached to an execution (workflow or standalone activity).`, +) + +var RequestTimeout = dynamicconfig.NewDestinationDurationSetting( + "callback.request.timeout", + time.Second*10, + `RequestTimeout is the timeout for executing a single callback request.`, +) + +var RetryPolicyInitialInterval = dynamicconfig.NewGlobalDurationSetting( + "callback.retryPolicy.initialInterval", + time.Second, + `The initial backoff interval between every callback request attempt for a given callback.`, +) + +var RetryPolicyMaximumInterval = dynamicconfig.NewGlobalDurationSetting( + "callback.retryPolicy.maxInterval", + time.Hour, + `The maximum backoff interval between every callback request attempt for a given callback.`, +) + +type Config struct { + RequestTimeout dynamicconfig.DurationPropertyFnWithDestinationFilter + RetryPolicy func() backoff.RetryPolicy +} + +func configProvider(dc *dynamicconfig.Collection) *Config { + return &Config{ + RequestTimeout: RequestTimeout.Get(dc), + RetryPolicy: func() backoff.RetryPolicy { + return backoff.NewExponentialRetryPolicy( + RetryPolicyInitialInterval.Get(dc)(), + ).WithMaximumInterval( + RetryPolicyMaximumInterval.Get(dc)(), + ).WithExpirationInterval( + backoff.NoInterval, + ) + }, + } +} + +var AllowedAddresses = dynamicconfig.NewNamespaceTypedSettingWithConverter( + "callback.allowedAddresses", + allowedAddressConverter, + AddressMatchRules{}, + `The per-namespace list of addresses that are allowed for callbacks and whether secure connections (https) are required. +URLs: "temporal://system" and "temporal://internal" are always allowed. The default is no address rules. +URLs are checked against each in order when starting a workflow or activitiy with attached callbacks or a standalone +callback and only need to match one to pass validation. This configuration is required for external endpoint targets; +any invalid entries are ignored. Each entry is a map with possible values: + - "Pattern":string (required) the host:port pattern to which this config applies. + Wildcards, '*', are supported and can match any number of characters (e.g. '*' matches everything, + 'prefix.*.domain' matches 'prefix.a.domain' as well as 'prefix.a.b.domain'). + - "AllowInsecure":bool (optional, default=false) indicates whether https is required`) + +type AddressMatchRules struct { + Rules []AddressMatchRule +} + +func (a AddressMatchRules) Validate(rawURL string) error { + // Exact match only; no path, query, or fragment allowed for system URL + if rawURL == nexus.SystemCallbackURL || rawURL == chasm.NexusCompletionHandlerURL { + return nil + } + u, err := url.Parse(rawURL) + if err != nil { + return status.Errorf(codes.InvalidArgument, "invalid callback url: %v", err) + } + if u.Scheme != "http" && u.Scheme != "https" { + return status.Errorf(codes.InvalidArgument, "invalid url: unknown scheme: %v", u) + } + if u.Host == "" { + return status.Errorf(codes.InvalidArgument, "invalid url: missing host") + } + for _, rule := range a.Rules { + allow, err := rule.Allow(u) + if err != nil { + return err + } + if allow { + return nil + } + } + return status.Errorf(codes.InvalidArgument, "invalid url: url does not match any configured callback address: %v", u) +} + +type AddressMatchRule struct { + Regexp *regexp.Regexp + AllowInsecure bool +} + +// Allow validates the URL by: +// 1. true, nil if the provided url matches the rule and passed validation +// for the given rule. +// 2. false, nil if the URL does not match the rule. +// 3. It false, error if there is a match and the URL fails validation +func (a AddressMatchRule) Allow(u *url.URL) (bool, error) { + if !a.Regexp.MatchString(u.Host) { + return false, nil + } + if a.AllowInsecure { + return true, nil + } + if u.Scheme != "https" { + return false, + status.Errorf(codes.InvalidArgument, + "invalid url: callback address does not allow insecure connections: %v", u) + } + return true, nil +} + +func allowedAddressConverter(val any) (AddressMatchRules, error) { + type entry struct { + Pattern string + AllowInsecure bool + } + intermediate, err := dynamicconfig.ConvertStructure[[]entry](nil)(val) + if err != nil { + return AddressMatchRules{}, err + } + + configs := []AddressMatchRule{} + for _, e := range intermediate { + if e.Pattern == "" { + // Skip configs with missing / unparsable Pattern + continue + } + re, err := regexp.Compile(addressPatternToRegexp(e.Pattern)) + if err != nil { + // Skip configs with malformed Pattern + continue + } + configs = append(configs, AddressMatchRule{ + Regexp: re, + AllowInsecure: e.AllowInsecure, + }) + } + return AddressMatchRules{Rules: configs}, nil +} + +func addressPatternToRegexp(pattern string) string { + var result strings.Builder + result.WriteString("^") + first := true + for literal := range strings.SplitSeq(pattern, "*") { + if !first { + // Replace * with .* + result.WriteString(".*") + } + result.WriteString(regexp.QuoteMeta(literal)) + first = false + } + result.WriteString("$") + return result.String() +} diff --git a/chasm/lib/callback/config_test.go b/chasm/lib/callback/config_test.go new file mode 100644 index 00000000000..0284cf74beb --- /dev/null +++ b/chasm/lib/callback/config_test.go @@ -0,0 +1,330 @@ +package callback + +import ( + "regexp" + "testing" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/common/nexus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func Test_addressPatternToRegexp(t *testing.T) { + tests := []struct { + name string + pattern string + want string + }{ + {name: "empty", pattern: "", want: "^$"}, + {name: "no_wildcard", pattern: "foo", want: "^foo$"}, + {name: "single_wildcard_only", pattern: "*", want: "^.*$"}, + {name: "leading_wildcard", pattern: "*foo", want: "^.*foo$"}, + {name: "trailing_wildcard", pattern: "foo*", want: "^foo.*$"}, + {name: "surrounded_wildcard", pattern: "*foo*", want: "^.*foo.*$"}, + {name: "middle_wildcard", pattern: "foo*bar", want: "^foo.*bar$"}, + {name: "literal_dots_around_wildcard", pattern: "foo.*bar", want: "^foo\\..*bar$"}, + {name: "prefix_subdomain", pattern: "prefix.*.domain", want: "^prefix\\..*\\.domain$"}, + {name: "leading_any_subdomain", pattern: "*.example.com", want: "^.*\\.example\\.com$"}, + {name: "host_with_port", pattern: "api.example.com:8080", want: "^api\\.example\\.com:8080$"}, + {name: "consecutive_wildcards", pattern: "a**b", want: "^a.*.*b$"}, + {name: "triple_wildcards", pattern: "a***b", want: "^a.*.*.*b$"}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got := addressPatternToRegexp(test.pattern) + require.Equal(t, test.want, got) + _, err := regexp.Compile(got) + require.NoError(t, err) + }) + } +} + +func TestAddressMatchRules_Validate(t *testing.T) { + type args struct { + rawURL string + rules []AddressMatchRule + } + tests := []struct { + name string + args args + validateErr func(t *testing.T, err error) + }{ + { + name: "happy path, default config: just temporal", + args: args{ + rawURL: nexus.SystemCallbackURL, + rules: []AddressMatchRule{}, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "sad path incorrect scheme, default config: just temporal", + args: args{ + rawURL: "https://system", + rules: []AddressMatchRule{}, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: url does not match any configured callback address") + }, + }, + { + name: "sad path incorrect host, default config: just temporal", + args: args{ + rawURL: "temporal://somehost.com", + rules: []AddressMatchRule{}, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: unknown scheme") + }, + }, + { + name: "sad path http, default config: just temporal", + args: args{ + rawURL: "http://localhost", + rules: []AddressMatchRule{}, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: url does not match any configured callback address") + }, + }, + { + name: "sad path invalid url, default config: just temporal", + args: args{ + rawURL: "blblbblblb", + rules: []AddressMatchRule{}, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: unknown scheme") + }, + }, + { + name: "secure only passes with https", + args: args{ + rawURL: "https://api.example.com", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("api.example.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: false} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "secure only fails with http", + args: args{ + rawURL: "http://api.example.com", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("api.example.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: false} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: callback address does not allow insecure connections") + }, + }, + { + name: "allow insecure passes with http", + args: args{ + rawURL: "http://a.example.com", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("*.example.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "allow insecure passes with https", + args: args{ + rawURL: "https://a.example.com", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("*.example.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "port must match", + args: args{ + rawURL: "https://api.example.com:8080", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("api.example.com:8080")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "port mismatch fails", + args: args{ + rawURL: "https://api.example.com:9090", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("api.example.com:8080")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: url does not match any configured callback address") + }, + }, + { + name: "middle wildcard matches", + args: args{ + rawURL: "https://foozbar.com", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("foo*bar.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "prefix subdomain matches single level", + args: args{ + rawURL: "https://prefix.a.domain", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("prefix.*.domain")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "prefix subdomain matches multiple levels", + args: args{ + rawURL: "https://prefix.a.b.domain", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("prefix.*.domain")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "multiple rules, second matches", + args: args{ + rawURL: "http://a.ok.com", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("no-match.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("*.ok.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + { + name: "unknown scheme fails", + args: args{ + rawURL: "ftp://example.com", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("example.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: unknown scheme") + }, + }, + { + name: "invalid url", + args: args{ + rawURL: "../..///../", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("example.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: unknown scheme") + }, + }, + { + name: "invalid url", + args: args{ + rawURL: "http://", + rules: []AddressMatchRule{ + func() AddressMatchRule { + re := regexp.MustCompile(addressPatternToRegexp("example.com")) + return AddressMatchRule{Regexp: re, AllowInsecure: true} + }(), + }, + }, + validateErr: func(t *testing.T, err error) { + require.Error(t, err) + require.Equal(t, codes.InvalidArgument, status.Code(err)) + require.ErrorContains(t, err, "invalid url: missing host") + }, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + rules := AddressMatchRules{Rules: tt.args.rules} + tt.validateErr(t, rules.Validate(tt.args.rawURL)) + }) + } +} diff --git a/chasm/lib/callback/fx.go b/chasm/lib/callback/fx.go new file mode 100644 index 00000000000..1da518d8368 --- /dev/null +++ b/chasm/lib/callback/fx.go @@ -0,0 +1,64 @@ +package callback + +import ( + "fmt" + "net/http" + + "go.temporal.io/server/chasm" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/collection" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + queuescommon "go.temporal.io/server/service/history/queues/common" + "go.uber.org/fx" +) + +func register( + registry *chasm.Registry, + library *Library, +) error { + return registry.Register(library) +} + +// httpCallerProviderProvider provides an HTTPCallerProvider for CHASM callbacks. +func httpCallerProviderProvider( + clusterMetadata cluster.Metadata, + namespaceRegistry namespace.Registry, + rpcFactory common.RPCFactory, + httpClientCache *cluster.FrontendHTTPClientCache, + logger log.Logger, +) (HTTPCallerProvider, error) { + localClient, err := rpcFactory.CreateLocalFrontendHTTPClient() + if err != nil { + return nil, fmt.Errorf("cannot create local frontend HTTP client: %w", err) + } + defaultClient := &http.Client{} + callbackTokenGenerator := commonnexus.NewCallbackTokenGenerator() + + m := collection.NewOnceMap(func(queuescommon.NamespaceIDAndDestination) HTTPCaller { + return func(r *http.Request) (*http.Response, error) { + return routeRequest(r, + clusterMetadata, + namespaceRegistry, + httpClientCache, + callbackTokenGenerator, + defaultClient, + localClient, + logger, + ) + } + }) + return m.Get, nil +} + +var Module = fx.Module( + "chasm.lib.callback", + fx.Provide(configProvider), + fx.Provide(httpCallerProviderProvider), + fx.Provide(newInvocationTaskHandler), + fx.Provide(newBackoffTaskHandler), + fx.Provide(newLibrary), + fx.Invoke(register), +) diff --git a/chasm/lib/callback/gen/callbackpb/v1/message.go-helpers.pb.go b/chasm/lib/callback/gen/callbackpb/v1/message.go-helpers.pb.go new file mode 100644 index 00000000000..4e8000266ae --- /dev/null +++ b/chasm/lib/callback/gen/callbackpb/v1/message.go-helpers.pb.go @@ -0,0 +1,104 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package callbackspb + +import ( + "fmt" + + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type CallbackState to the protobuf v3 wire format +func (val *CallbackState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CallbackState from the protobuf v3 wire format +func (val *CallbackState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CallbackState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CallbackState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CallbackState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CallbackState + switch t := that.(type) { + case *CallbackState: + that1 = t + case CallbackState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type Callback to the protobuf v3 wire format +func (val *Callback) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type Callback from the protobuf v3 wire format +func (val *Callback) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *Callback) Size() int { + return proto.Size(val) +} + +// Equal returns whether two Callback values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *Callback) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *Callback + switch t := that.(type) { + case *Callback: + that1 = t + case Callback: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +var ( + CallbackStatus_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Standby": 1, + "Scheduled": 2, + "BackingOff": 3, + "Failed": 4, + "Succeeded": 5, + } +) + +// CallbackStatusFromString parses a CallbackStatus value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to CallbackStatus +func CallbackStatusFromString(s string) (CallbackStatus, error) { + if v, ok := CallbackStatus_value[s]; ok { + return CallbackStatus(v), nil + } else if v, ok := CallbackStatus_shorthandValue[s]; ok { + return CallbackStatus(v), nil + } + return CallbackStatus(0), fmt.Errorf("%s is not a valid CallbackStatus", s) +} diff --git a/chasm/lib/callback/gen/callbackpb/v1/message.pb.go b/chasm/lib/callback/gen/callbackpb/v1/message.pb.go new file mode 100644 index 00000000000..d998ef3fc8f --- /dev/null +++ b/chasm/lib/callback/gen/callbackpb/v1/message.pb.go @@ -0,0 +1,490 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/callback/proto/v1/message.proto + +package callbackspb + +import ( + reflect "reflect" + "strconv" + sync "sync" + unsafe "unsafe" + + v11 "go.temporal.io/api/common/v1" + v1 "go.temporal.io/api/failure/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Status of a callback. +type CallbackStatus int32 + +const ( + // Default value, unspecified state. + CALLBACK_STATUS_UNSPECIFIED CallbackStatus = 0 + // Callback is standing by, waiting to be triggered. + CALLBACK_STATUS_STANDBY CallbackStatus = 1 + // Callback is in the queue waiting to be executed or is currently executing. + CALLBACK_STATUS_SCHEDULED CallbackStatus = 2 + // Callback has failed with a retryable error and is backing off before the next attempt. + CALLBACK_STATUS_BACKING_OFF CallbackStatus = 3 + // Callback has failed. + CALLBACK_STATUS_FAILED CallbackStatus = 4 + // Callback has succeeded. + CALLBACK_STATUS_SUCCEEDED CallbackStatus = 5 +) + +// Enum value maps for CallbackStatus. +var ( + CallbackStatus_name = map[int32]string{ + 0: "CALLBACK_STATUS_UNSPECIFIED", + 1: "CALLBACK_STATUS_STANDBY", + 2: "CALLBACK_STATUS_SCHEDULED", + 3: "CALLBACK_STATUS_BACKING_OFF", + 4: "CALLBACK_STATUS_FAILED", + 5: "CALLBACK_STATUS_SUCCEEDED", + } + CallbackStatus_value = map[string]int32{ + "CALLBACK_STATUS_UNSPECIFIED": 0, + "CALLBACK_STATUS_STANDBY": 1, + "CALLBACK_STATUS_SCHEDULED": 2, + "CALLBACK_STATUS_BACKING_OFF": 3, + "CALLBACK_STATUS_FAILED": 4, + "CALLBACK_STATUS_SUCCEEDED": 5, + } +) + +func (x CallbackStatus) Enum() *CallbackStatus { + p := new(CallbackStatus) + *p = x + return p +} + +func (x CallbackStatus) String() string { + switch x { + case CALLBACK_STATUS_UNSPECIFIED: + return "Unspecified" + case CALLBACK_STATUS_STANDBY: + return "Standby" + case CALLBACK_STATUS_SCHEDULED: + return "Scheduled" + case CALLBACK_STATUS_BACKING_OFF: + return "BackingOff" + case CALLBACK_STATUS_FAILED: + return "Failed" + case CALLBACK_STATUS_SUCCEEDED: + return "Succeeded" + default: + return strconv.Itoa(int(x)) + } + +} + +func (CallbackStatus) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_chasm_lib_callback_proto_v1_message_proto_enumTypes[0].Descriptor() +} + +func (CallbackStatus) Type() protoreflect.EnumType { + return &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_enumTypes[0] +} + +func (x CallbackStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CallbackStatus.Descriptor instead. +func (CallbackStatus) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescGZIP(), []int{0} +} + +type CallbackState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Information on how this callback should be invoked (e.g. its URL and type). + Callback *Callback `protobuf:"bytes,1,opt,name=callback,proto3" json:"callback,omitempty"` + // The time when the callback was registered. + RegistrationTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=registration_time,json=registrationTime,proto3" json:"registration_time,omitempty"` + Status CallbackStatus `protobuf:"varint,4,opt,name=status,proto3,enum=temporal.server.chasm.lib.callbacks.proto.v1.CallbackStatus" json:"status,omitempty"` + // The number of attempts made to deliver the callback. + // This number represents a minimum bound since the attempt is incremented after the callback request completes. + Attempt int32 `protobuf:"varint,5,opt,name=attempt,proto3" json:"attempt,omitempty"` + // The time when the last attempt completed. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // The last attempt's failure, if any. + LastAttemptFailure *v1.Failure `protobuf:"bytes,7,opt,name=last_attempt_failure,json=lastAttemptFailure,proto3" json:"last_attempt_failure,omitempty"` + // The time when the next attempt is scheduled. + // NOTE (seankane): this field might go away in the future, discussion: + // https://github.com/temporalio/temporal/pull/8473#discussion_r2427348436 + NextAttemptScheduleTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=next_attempt_schedule_time,json=nextAttemptScheduleTime,proto3" json:"next_attempt_schedule_time,omitempty"` + // Request ID that added the callback. + RequestId string `protobuf:"bytes,9,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CallbackState) Reset() { + *x = CallbackState{} + mi := &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CallbackState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallbackState) ProtoMessage() {} + +func (x *CallbackState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallbackState.ProtoReflect.Descriptor instead. +func (*CallbackState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescGZIP(), []int{0} +} + +func (x *CallbackState) GetCallback() *Callback { + if x != nil { + return x.Callback + } + return nil +} + +func (x *CallbackState) GetRegistrationTime() *timestamppb.Timestamp { + if x != nil { + return x.RegistrationTime + } + return nil +} + +func (x *CallbackState) GetStatus() CallbackStatus { + if x != nil { + return x.Status + } + return CALLBACK_STATUS_UNSPECIFIED +} + +func (x *CallbackState) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +func (x *CallbackState) GetLastAttemptCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.LastAttemptCompleteTime + } + return nil +} + +func (x *CallbackState) GetLastAttemptFailure() *v1.Failure { + if x != nil { + return x.LastAttemptFailure + } + return nil +} + +func (x *CallbackState) GetNextAttemptScheduleTime() *timestamppb.Timestamp { + if x != nil { + return x.NextAttemptScheduleTime + } + return nil +} + +func (x *CallbackState) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +type Callback struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Variant: + // + // *Callback_Nexus_ + Variant isCallback_Variant `protobuf_oneof:"variant"` + Links []*v11.Link `protobuf:"bytes,100,rep,name=links,proto3" json:"links,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Callback) Reset() { + *x = Callback{} + mi := &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Callback) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Callback) ProtoMessage() {} + +func (x *Callback) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Callback.ProtoReflect.Descriptor instead. +func (*Callback) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescGZIP(), []int{1} +} + +func (x *Callback) GetVariant() isCallback_Variant { + if x != nil { + return x.Variant + } + return nil +} + +func (x *Callback) GetNexus() *Callback_Nexus { + if x != nil { + if x, ok := x.Variant.(*Callback_Nexus_); ok { + return x.Nexus + } + } + return nil +} + +func (x *Callback) GetLinks() []*v11.Link { + if x != nil { + return x.Links + } + return nil +} + +type isCallback_Variant interface { + isCallback_Variant() +} + +type Callback_Nexus_ struct { + Nexus *Callback_Nexus `protobuf:"bytes,2,opt,name=nexus,proto3,oneof"` +} + +func (*Callback_Nexus_) isCallback_Variant() {} + +// Trigger for when the workflow is closed. +type CallbackState_WorkflowClosed struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CallbackState_WorkflowClosed) Reset() { + *x = CallbackState_WorkflowClosed{} + mi := &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CallbackState_WorkflowClosed) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallbackState_WorkflowClosed) ProtoMessage() {} + +func (x *CallbackState_WorkflowClosed) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallbackState_WorkflowClosed.ProtoReflect.Descriptor instead. +func (*CallbackState_WorkflowClosed) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescGZIP(), []int{0, 0} +} + +type Callback_Nexus struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Callback URL. + // (-- api-linter: core::0140::uri=disabled + // + // aip.dev/not-precedent: Not respecting aip here. --) + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + // Header to attach to callback request. + Header map[string]string `protobuf:"bytes,2,rep,name=header,proto3" json:"header,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *Callback_Nexus) Reset() { + *x = Callback_Nexus{} + mi := &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Callback_Nexus) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Callback_Nexus) ProtoMessage() {} + +func (x *Callback_Nexus) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Callback_Nexus.ProtoReflect.Descriptor instead. +func (*Callback_Nexus) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescGZIP(), []int{1, 0} +} + +func (x *Callback_Nexus) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Callback_Nexus) GetHeader() map[string]string { + if x != nil { + return x.Header + } + return nil +} + +var File_temporal_server_chasm_lib_callback_proto_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDesc = "" + + "\n" + + "9temporal/server/chasm/lib/callback/proto/v1/message.proto\x12,temporal.server.chasm.lib.callbacks.proto.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a%temporal/api/failure/v1/message.proto\"\xd3\x04\n" + + "\rCallbackState\x12R\n" + + "\bcallback\x18\x01 \x01(\v26.temporal.server.chasm.lib.callbacks.proto.v1.CallbackR\bcallback\x12G\n" + + "\x11registration_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x10registrationTime\x12T\n" + + "\x06status\x18\x04 \x01(\x0e2<.temporal.server.chasm.lib.callbacks.proto.v1.CallbackStatusR\x06status\x12\x18\n" + + "\aattempt\x18\x05 \x01(\x05R\aattempt\x12W\n" + + "\x1alast_attempt_complete_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12R\n" + + "\x14last_attempt_failure\x18\a \x01(\v2 .temporal.api.failure.v1.FailureR\x12lastAttemptFailure\x12W\n" + + "\x1anext_attempt_schedule_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\x17nextAttemptScheduleTime\x12\x1d\n" + + "\n" + + "request_id\x18\t \x01(\tR\trequestId\x1a\x10\n" + + "\x0eWorkflowClosed\"\xde\x02\n" + + "\bCallback\x12T\n" + + "\x05nexus\x18\x02 \x01(\v2<.temporal.server.chasm.lib.callbacks.proto.v1.Callback.NexusH\x00R\x05nexus\x122\n" + + "\x05links\x18d \x03(\v2\x1c.temporal.api.common.v1.LinkR\x05links\x1a\xb6\x01\n" + + "\x05Nexus\x12\x10\n" + + "\x03url\x18\x01 \x01(\tR\x03url\x12`\n" + + "\x06header\x18\x02 \x03(\v2H.temporal.server.chasm.lib.callbacks.proto.v1.Callback.Nexus.HeaderEntryR\x06header\x1a9\n" + + "\vHeaderEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B\t\n" + + "\avariantJ\x04\b\x01\x10\x02*\xc9\x01\n" + + "\x0eCallbackStatus\x12\x1f\n" + + "\x1bCALLBACK_STATUS_UNSPECIFIED\x10\x00\x12\x1b\n" + + "\x17CALLBACK_STATUS_STANDBY\x10\x01\x12\x1d\n" + + "\x19CALLBACK_STATUS_SCHEDULED\x10\x02\x12\x1f\n" + + "\x1bCALLBACK_STATUS_BACKING_OFF\x10\x03\x12\x1a\n" + + "\x16CALLBACK_STATUS_FAILED\x10\x04\x12\x1d\n" + + "\x19CALLBACK_STATUS_SUCCEEDED\x10\x05BGZEgo.temporal.io/server/chasm/lib/callbacks/gen/callbackspb;callbackspbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDesc), len(file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDescData +} + +var file_temporal_server_chasm_lib_callback_proto_v1_message_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_temporal_server_chasm_lib_callback_proto_v1_message_proto_goTypes = []any{ + (CallbackStatus)(0), // 0: temporal.server.chasm.lib.callbacks.proto.v1.CallbackStatus + (*CallbackState)(nil), // 1: temporal.server.chasm.lib.callbacks.proto.v1.CallbackState + (*Callback)(nil), // 2: temporal.server.chasm.lib.callbacks.proto.v1.Callback + (*CallbackState_WorkflowClosed)(nil), // 3: temporal.server.chasm.lib.callbacks.proto.v1.CallbackState.WorkflowClosed + (*Callback_Nexus)(nil), // 4: temporal.server.chasm.lib.callbacks.proto.v1.Callback.Nexus + nil, // 5: temporal.server.chasm.lib.callbacks.proto.v1.Callback.Nexus.HeaderEntry + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp + (*v1.Failure)(nil), // 7: temporal.api.failure.v1.Failure + (*v11.Link)(nil), // 8: temporal.api.common.v1.Link +} +var file_temporal_server_chasm_lib_callback_proto_v1_message_proto_depIdxs = []int32{ + 2, // 0: temporal.server.chasm.lib.callbacks.proto.v1.CallbackState.callback:type_name -> temporal.server.chasm.lib.callbacks.proto.v1.Callback + 6, // 1: temporal.server.chasm.lib.callbacks.proto.v1.CallbackState.registration_time:type_name -> google.protobuf.Timestamp + 0, // 2: temporal.server.chasm.lib.callbacks.proto.v1.CallbackState.status:type_name -> temporal.server.chasm.lib.callbacks.proto.v1.CallbackStatus + 6, // 3: temporal.server.chasm.lib.callbacks.proto.v1.CallbackState.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 7, // 4: temporal.server.chasm.lib.callbacks.proto.v1.CallbackState.last_attempt_failure:type_name -> temporal.api.failure.v1.Failure + 6, // 5: temporal.server.chasm.lib.callbacks.proto.v1.CallbackState.next_attempt_schedule_time:type_name -> google.protobuf.Timestamp + 4, // 6: temporal.server.chasm.lib.callbacks.proto.v1.Callback.nexus:type_name -> temporal.server.chasm.lib.callbacks.proto.v1.Callback.Nexus + 8, // 7: temporal.server.chasm.lib.callbacks.proto.v1.Callback.links:type_name -> temporal.api.common.v1.Link + 5, // 8: temporal.server.chasm.lib.callbacks.proto.v1.Callback.Nexus.header:type_name -> temporal.server.chasm.lib.callbacks.proto.v1.Callback.Nexus.HeaderEntry + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_callback_proto_v1_message_proto_init() } +func file_temporal_server_chasm_lib_callback_proto_v1_message_proto_init() { + if File_temporal_server_chasm_lib_callback_proto_v1_message_proto != nil { + return + } + file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes[1].OneofWrappers = []any{ + (*Callback_Nexus_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDesc), len(file_temporal_server_chasm_lib_callback_proto_v1_message_proto_rawDesc)), + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_callback_proto_v1_message_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_callback_proto_v1_message_proto_depIdxs, + EnumInfos: file_temporal_server_chasm_lib_callback_proto_v1_message_proto_enumTypes, + MessageInfos: file_temporal_server_chasm_lib_callback_proto_v1_message_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_callback_proto_v1_message_proto = out.File + file_temporal_server_chasm_lib_callback_proto_v1_message_proto_goTypes = nil + file_temporal_server_chasm_lib_callback_proto_v1_message_proto_depIdxs = nil +} diff --git a/chasm/lib/callback/gen/callbackpb/v1/tasks.go-helpers.pb.go b/chasm/lib/callback/gen/callbackpb/v1/tasks.go-helpers.pb.go new file mode 100644 index 00000000000..a0181447c66 --- /dev/null +++ b/chasm/lib/callback/gen/callbackpb/v1/tasks.go-helpers.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package callbackspb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type InvocationTask to the protobuf v3 wire format +func (val *InvocationTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InvocationTask from the protobuf v3 wire format +func (val *InvocationTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InvocationTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InvocationTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InvocationTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InvocationTask + switch t := that.(type) { + case *InvocationTask: + that1 = t + case InvocationTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type BackoffTask to the protobuf v3 wire format +func (val *BackoffTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type BackoffTask from the protobuf v3 wire format +func (val *BackoffTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *BackoffTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two BackoffTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *BackoffTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *BackoffTask + switch t := that.(type) { + case *BackoffTask: + that1 = t + case BackoffTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/callback/gen/callbackpb/v1/tasks.pb.go b/chasm/lib/callback/gen/callbackpb/v1/tasks.pb.go new file mode 100644 index 00000000000..7354a359c8d --- /dev/null +++ b/chasm/lib/callback/gen/callbackpb/v1/tasks.pb.go @@ -0,0 +1,172 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/callback/proto/v1/tasks.proto + +package callbackspb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type InvocationTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The attempt number for this invocation. + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvocationTask) Reset() { + *x = InvocationTask{} + mi := &file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvocationTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvocationTask) ProtoMessage() {} + +func (x *InvocationTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvocationTask.ProtoReflect.Descriptor instead. +func (*InvocationTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDescGZIP(), []int{0} +} + +func (x *InvocationTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +type BackoffTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The attempt number for this invocation. + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackoffTask) Reset() { + *x = BackoffTask{} + mi := &file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackoffTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackoffTask) ProtoMessage() {} + +func (x *BackoffTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackoffTask.ProtoReflect.Descriptor instead. +func (*BackoffTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDescGZIP(), []int{1} +} + +func (x *BackoffTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +var File_temporal_server_chasm_lib_callback_proto_v1_tasks_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDesc = "" + + "\n" + + "7temporal/server/chasm/lib/callback/proto/v1/tasks.proto\x12,temporal.server.chasm.lib.callbacks.proto.v1\"*\n" + + "\x0eInvocationTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\"'\n" + + "\vBackoffTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattemptBGZEgo.temporal.io/server/chasm/lib/callbacks/gen/callbackspb;callbackspbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDescData +} + +var file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_goTypes = []any{ + (*InvocationTask)(nil), // 0: temporal.server.chasm.lib.callbacks.proto.v1.InvocationTask + (*BackoffTask)(nil), // 1: temporal.server.chasm.lib.callbacks.proto.v1.BackoffTask +} +var file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_init() } +func file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_init() { + if File_temporal_server_chasm_lib_callback_proto_v1_tasks_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_callback_proto_v1_tasks_proto = out.File + file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_goTypes = nil + file_temporal_server_chasm_lib_callback_proto_v1_tasks_proto_depIdxs = nil +} diff --git a/chasm/lib/callback/invocable_internal.go b/chasm/lib/callback/invocable_internal.go new file mode 100644 index 00000000000..273c0a38634 --- /dev/null +++ b/chasm/lib/callback/invocable_internal.go @@ -0,0 +1,180 @@ +package callback + +import ( + "context" + "encoding/base64" + "fmt" + + "github.com/google/uuid" + "github.com/nexus-rpc/sdk-go/nexus" + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/chasm" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// logInternalError emits a log statement for internalMsg, tagged with both +// internalErr and a reference-id. An opaque error containing the reference-id is +// returned. Intended to be used to hide internal errors from end users. +func logInternalError(logger log.Logger, internalMsg string, internalErr error) error { + referenceID := uuid.NewString() + logger.Error(internalMsg, tag.Error(internalErr), tag.String("reference-id", referenceID)) + return fmt.Errorf("internal error, reference-id: %v", referenceID) +} + +// invocableInternal is an invocable that delivers the Nexus operation completion data to History for cross-shard +// callbacks. +type invocableInternal struct { + callback *callbackspb.Callback_Nexus + attempt int32 + completion nexusrpc.CompleteOperationOptions + requestID string +} + +func (c invocableInternal) WrapError(result invocationResult, err error) error { + // Return the invocation result error if present + if resultErr := result.error(); resultErr != nil { + return resultErr + } + + return err +} + +func (c invocableInternal) Invoke( + ctx context.Context, + ns *namespace.Namespace, + h *invocationTaskHandler, + task *callbackspb.InvocationTask, + taskAttr chasm.TaskAttributes, +) invocationResult { + header := nexus.Header(c.callback.GetHeader()) + if header == nil { + header = nexus.Header{} + } + + // Get back the base64-encoded ComponentRef from the header. + encodedRef := header.Get(commonnexus.CallbackTokenHeader) + if encodedRef == "" { + return invocationResultFail{logInternalError(h.logger, "callback missing token", nil)} + } + + decodedRef, err := base64.RawURLEncoding.DecodeString(encodedRef) + if err != nil { + return invocationResultFail{logInternalError(h.logger, "failed to decode CHASM ComponentRef", err)} + } + + // Validate that the bytes are a valid ChasmComponentRef + ref := &persistencespb.ChasmComponentRef{} + err = proto.Unmarshal(decodedRef, ref) + if err != nil { + return invocationResultFail{logInternalError(h.logger, "failed to unmarshal CHASM ComponentRef", err)} + } + + request, err := c.getHistoryRequest(decodedRef) + if err != nil { + return invocationResultFail{logInternalError(h.logger, "failed to build history request", err)} + } + + // RPC to History for cross-shard completion delivery. + _, err = h.historyClient.CompleteNexusOperationChasm(ctx, request) + if err != nil { + msg := logInternalError(h.logger, "failed to complete Nexus operation", err) + if isRetryableRPCResponse(err) { + return invocationResultRetry{err: msg} + } + return invocationResultFail{msg} + } + + return invocationResultOK{} +} + +func isRetryableRPCResponse(err error) bool { + var st *status.Status + stGetter, ok := err.(interface{ Status() *status.Status }) + if ok { + st = stGetter.Status() + } else { + st, ok = status.FromError(err) + if !ok { + // Not a gRPC induced error + return false + } + } + // nolint:exhaustive + switch st.Code() { + case codes.Canceled, + codes.Unknown, + codes.Unavailable, + codes.DeadlineExceeded, + codes.ResourceExhausted, + codes.Aborted, + codes.Internal: + return true + default: + return false + } +} + +func (c invocableInternal) getHistoryRequest( + refBytes []byte, +) (*historyservice.CompleteNexusOperationChasmRequest, error) { + var req *historyservice.CompleteNexusOperationChasmRequest + + completion := &tokenspb.NexusOperationCompletion{ + ComponentRef: refBytes, + RequestId: c.requestID, + } + + if c.completion.Error == nil { + var payload *commonpb.Payload + if c.completion.Result != nil { + var ok bool + payload, ok = c.completion.Result.(*commonpb.Payload) + if !ok { + return nil, fmt.Errorf("invalid result, expected a payload, got: %T", c.completion.Result) + } + } + + req = &historyservice.CompleteNexusOperationChasmRequest{ + Outcome: &historyservice.CompleteNexusOperationChasmRequest_Success{ + Success: payload, + }, + CloseTime: timestamppb.New(c.completion.CloseTime), + Completion: completion, + } + } else { + failure, err := nexusrpc.DefaultFailureConverter().ErrorToFailure(c.completion.Error) + if err != nil { + return nil, fmt.Errorf("failed to convert error to failure: %w", err) + } + // Unwrap the operation error, the handler on the other side is expecting to receive the underlying cause. + if failure.Cause != nil { + failure = *failure.Cause + } + apiFailure, err := commonnexus.NexusFailureToTemporalFailure(failure) + if err != nil { + return nil, fmt.Errorf("failed to convert failure type: %w", err) + } + + req = &historyservice.CompleteNexusOperationChasmRequest{ + Completion: completion, + Outcome: &historyservice.CompleteNexusOperationChasmRequest_Failure{ + Failure: apiFailure, + }, + CloseTime: timestamppb.New(c.completion.CloseTime), + } + } + + return req, nil +} diff --git a/chasm/lib/callback/invocable_outbound.go b/chasm/lib/callback/invocable_outbound.go new file mode 100644 index 00000000000..6ed0a65d6dc --- /dev/null +++ b/chasm/lib/callback/invocable_outbound.go @@ -0,0 +1,110 @@ +package callback + +import ( + "context" + "errors" + "net/http/httptrace" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + "go.temporal.io/server/chasm" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + queuescommon "go.temporal.io/server/service/history/queues/common" + queueserrors "go.temporal.io/server/service/history/queues/errors" +) + +// invocableOutbound is an invocable that delivers the Nexus operation completion data to an external destination for +// cross-namespace or cross-cell callbacks. +type invocableOutbound struct { + callback *callbackspb.Callback_Nexus + completion nexusrpc.CompleteOperationOptions + workflowID, runID string + attempt int32 +} + +func (n invocableOutbound) WrapError(result invocationResult, err error) error { + if retry, ok := result.(invocationResultRetry); ok { + return queueserrors.NewDestinationDownError(retry.err.Error(), err) + } + return err +} + +func (n invocableOutbound) Invoke( + ctx context.Context, + ns *namespace.Namespace, + h *invocationTaskHandler, + task *callbackspb.InvocationTask, + taskAttr chasm.TaskAttributes, +) invocationResult { + if h.httpTraceProvider != nil { + traceLogger := log.With(h.logger, + tag.WorkflowNamespace(ns.Name().String()), + tag.Operation("CompleteNexusOperation"), + tag.String("destination", taskAttr.Destination), + tag.WorkflowID(n.workflowID), + tag.WorkflowRunID(n.runID), + tag.AttemptStart(time.Now().UTC()), + tag.Attempt(n.attempt), + ) + if trace := h.httpTraceProvider.NewTrace(n.attempt, traceLogger); trace != nil { + ctx = httptrace.WithClientTrace(ctx, trace) + } + } + + client := nexusrpc.NewCompletionHTTPClient(nexusrpc.CompletionHTTPClientOptions{ + HTTPCaller: h.httpCallerProvider(queuescommon.NamespaceIDAndDestination{ + NamespaceID: ns.ID().String(), + Destination: taskAttr.Destination, + }), + Serializer: commonnexus.PayloadSerializer, + }) + // Make the call and record metrics. + startTime := time.Now() + + n.completion.Header = n.callback.Header + err := client.CompleteOperation(ctx, n.callback.Url, n.completion) + + namespaceTag := metrics.NamespaceTag(ns.Name().String()) + destTag := metrics.DestinationTag(taskAttr.Destination) + outcomeTag := metrics.OutcomeTag(outcomeTag(ctx, err)) + h.metricsHandler.Counter(RequestCounter.Name()).Record(1, namespaceTag, destTag, outcomeTag) + h.metricsHandler.Timer(RequestLatencyHistogram.Name()).Record(time.Since(startTime), namespaceTag, destTag, outcomeTag) + + if err != nil { + retryable := isRetryableCallError(err) + h.logger.Error("Callback request failed", tag.Error(err), tag.Bool("retryable", retryable)) + if retryable { + return invocationResultRetry{err} + } + return invocationResultFail{err} + } + return invocationResultOK{} +} + +func isRetryableCallError(err error) bool { + var handlerError *nexus.HandlerError + if errors.As(err, &handlerError) { + return handlerError.Retryable() + } + return true +} + +func outcomeTag(callCtx context.Context, callErr error) string { + if callErr != nil { + if callCtx.Err() != nil { + return "request-timeout" + } + var handlerErr *nexus.HandlerError + if errors.As(callErr, &handlerErr) { + return "handler-error:" + string(handlerErr.Type) + } + return "unknown-error" + } + return "success" +} diff --git a/chasm/lib/callback/library.go b/chasm/lib/callback/library.go new file mode 100644 index 00000000000..838a88e1608 --- /dev/null +++ b/chasm/lib/callback/library.go @@ -0,0 +1,54 @@ +package callback + +import ( + "go.temporal.io/server/chasm" + "google.golang.org/grpc" +) + +type ( + Library struct { + chasm.UnimplementedLibrary + + InvocationTaskHandler *invocationTaskHandler + BackoffTaskHandler *backoffTaskHandler + } +) + +func newLibrary( + InvocationTaskHandler *invocationTaskHandler, + BackoffTaskHandler *backoffTaskHandler, +) *Library { + return &Library{ + InvocationTaskHandler: InvocationTaskHandler, + BackoffTaskHandler: BackoffTaskHandler, + } +} + +func (l *Library) Name() string { + return chasm.CallbackLibraryName +} + +func (l *Library) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*Callback]( + chasm.CallbackComponentName, + chasm.WithDetached(), + ), + } +} + +func (l *Library) Tasks() []*chasm.RegistrableTask { + return []*chasm.RegistrableTask{ + chasm.NewRegistrableSideEffectTask( + "invoke", + l.InvocationTaskHandler, + ), + chasm.NewRegistrablePureTask( + "backoff", + l.BackoffTaskHandler, + ), + } +} + +func (l *Library) RegisterServices(server *grpc.Server) { +} diff --git a/chasm/lib/callback/metrics.go b/chasm/lib/callback/metrics.go new file mode 100644 index 00000000000..2642df50ec7 --- /dev/null +++ b/chasm/lib/callback/metrics.go @@ -0,0 +1,16 @@ +package callback + +import "go.temporal.io/server/common/metrics" + +// CHASM callback metrics. +// These are defined independently from HSM callbacks to avoid coupling between the two implementations. +var ( + RequestCounter = metrics.NewCounterDef( + "callback_outbound_requests", + metrics.WithDescription("The number of callback outbound requests made by the history service."), + ) + RequestLatencyHistogram = metrics.NewTimerDef( + "callback_outbound_latency", + metrics.WithDescription("Latency histogram of outbound callback requests made by the history service."), + ) +) diff --git a/chasm/lib/callback/proto/v1/message.proto b/chasm/lib/callback/proto/v1/message.proto new file mode 100644 index 00000000000..057e5c470e0 --- /dev/null +++ b/chasm/lib/callback/proto/v1/message.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.callbacks.proto.v1; + +import "google/protobuf/timestamp.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/callbacks/gen/callbackspb;callbackspb"; + +message CallbackState { + // Trigger for when the workflow is closed. + message WorkflowClosed {} + + // Information on how this callback should be invoked (e.g. its URL and type). + Callback callback = 1; + // The time when the callback was registered. + google.protobuf.Timestamp registration_time = 3; + + CallbackStatus status = 4; + // The number of attempts made to deliver the callback. + // This number represents a minimum bound since the attempt is incremented after the callback request completes. + int32 attempt = 5; + + // The time when the last attempt completed. + google.protobuf.Timestamp last_attempt_complete_time = 6; + // The last attempt's failure, if any. + temporal.api.failure.v1.Failure last_attempt_failure = 7; + // The time when the next attempt is scheduled. + // NOTE (seankane): this field might go away in the future, discussion: + // https://github.com/temporalio/temporal/pull/8473#discussion_r2427348436 + google.protobuf.Timestamp next_attempt_schedule_time = 8; + + // Request ID that added the callback. + string request_id = 9; +} + +// Status of a callback. +enum CallbackStatus { + // Default value, unspecified state. + CALLBACK_STATUS_UNSPECIFIED = 0; + // Callback is standing by, waiting to be triggered. + CALLBACK_STATUS_STANDBY = 1; + // Callback is in the queue waiting to be executed or is currently executing. + CALLBACK_STATUS_SCHEDULED = 2; + // Callback has failed with a retryable error and is backing off before the next attempt. + CALLBACK_STATUS_BACKING_OFF = 3; + // Callback has failed. + CALLBACK_STATUS_FAILED = 4; + // Callback has succeeded. + CALLBACK_STATUS_SUCCEEDED = 5; +} + +message Callback { + message Nexus { + // Callback URL. + // (-- api-linter: core::0140::uri=disabled + // aip.dev/not-precedent: Not respecting aip here. --) + string url = 1; + // Header to attach to callback request. + map header = 2; + } + + reserved 1; // For a generic callback mechanism to be added later. + oneof variant { + Nexus nexus = 2; + } + + repeated temporal.api.common.v1.Link links = 100; +} diff --git a/chasm/lib/callback/proto/v1/tasks.proto b/chasm/lib/callback/proto/v1/tasks.proto new file mode 100644 index 00000000000..f4cb65faa6a --- /dev/null +++ b/chasm/lib/callback/proto/v1/tasks.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.callbacks.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/callbacks/gen/callbackspb;callbackspb"; + +message InvocationTask { + // The attempt number for this invocation. + int32 attempt = 1; +} + +message BackoffTask { + // The attempt number for this invocation. + int32 attempt = 1; +} diff --git a/chasm/lib/callback/request.go b/chasm/lib/callback/request.go new file mode 100644 index 00000000000..43522c04a66 --- /dev/null +++ b/chasm/lib/callback/request.go @@ -0,0 +1,162 @@ +package callback + +import ( + "errors" + "net/http" + + "github.com/nexus-rpc/sdk-go/nexus" + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" +) + +// Header key used to identify callbacks that originate from and target the same cluster. +// Note: this is the nexusoperations.NexusCallbackSourceHeader stripped of Nexus-Callback- +const callbackSourceHeader = "source" + +// routeSystemCallbackRequest routes a system callback request to the appropriate frontend client +// based on the callback token's namespace and active cluster. +func routeSystemCallbackRequest( + r *http.Request, + clusterMetadata cluster.Metadata, + namespaceRegistry namespace.Registry, + httpClientCache *cluster.FrontendHTTPClientCache, + callbackTokenGenerator *commonnexus.CallbackTokenGenerator, + localClient *common.FrontendHTTPClient, + logger log.Logger, +) (*http.Response, error) { + var frontendClient *common.FrontendHTTPClient + if r.Header != nil { + token, err := commonnexus.DecodeCallbackToken(r.Header.Get(commonnexus.CallbackTokenHeader)) + if err != nil { + logger.Error("failed to decode callback token", tag.Error(err)) + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid callback token") + } + + completion, err := callbackTokenGenerator.DecodeCompletion(token) + if err != nil { + logger.Error("failed to decode completion from token", tag.Error(err)) + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid callback token") + } + + // Normalize to support two possible token shapes: + // - legacy HSM tokens carry namespace/workflow IDs directly + // - CHASM tokens carry an encoded component ref instead + namespaceID := completion.GetNamespaceId() + businessID := completion.GetWorkflowId() + if namespaceID == "" && len(completion.GetComponentRef()) > 0 { + ref := &persistencespb.ChasmComponentRef{} + if err := ref.Unmarshal(completion.GetComponentRef()); err != nil { + logger.Error("failed to decode CHASM component ref from callback token", tag.Error(err)) + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid callback token") + } + if ref.GetNamespaceId() == "" { + logger.Error("decoded CHASM component ref is missing namespace ID") + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid callback token") + } + if ref.GetBusinessId() == "" { + logger.Error("decoded CHASM component ref is missing business ID") + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid callback token") + } + namespaceID = ref.GetNamespaceId() + businessID = ref.GetBusinessId() + } + + ns, err := namespaceRegistry.GetNamespaceByID(namespace.ID(namespaceID)) + if err != nil { + logger.Error("failed to get namespace for nexus completion request", tag.WorkflowNamespaceID(namespaceID), tag.Error(err)) + var nfe *serviceerror.NamespaceNotFound + if errors.As(err, &nfe) { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeNotFound, "namespace %q not found", namespaceID) + } + return nil, commonnexus.ConvertGRPCError(err, false) + } + clusterName := ns.ActiveClusterName(namespace.RoutingKey{ID: businessID}) + if clusterMetadata.GetCurrentClusterName() == clusterName { + frontendClient = localClient + } else { + fec, err := httpClientCache.Get(clusterName) + if err != nil { + logger.Warn( + "HTTPCallerProvider unable to get FrontendHTTPClient for callback target cluster. Using local HTTP Client.", + tag.SourceCluster(clusterMetadata.GetCurrentClusterName()), + tag.TargetCluster(clusterName), + tag.Error(err), + ) + frontendClient = localClient + } else { + frontendClient = fec + } + } + } else { + frontendClient = localClient + } + r.URL.Path = commonnexus.PathCompletionCallbackNoIdentifier + r.URL.Scheme = frontendClient.Scheme + r.URL.Host = frontendClient.Address + r.Host = frontendClient.Address + return frontendClient.Do(r) +} + +func routeRequest( + r *http.Request, + clusterMetadata cluster.Metadata, + namespaceRegistry namespace.Registry, + httpClientCache *cluster.FrontendHTTPClientCache, + callbackTokenGenerator *commonnexus.CallbackTokenGenerator, + defaultClient *http.Client, + localClient *common.FrontendHTTPClient, + logger log.Logger, +) (*http.Response, error) { + if r.URL.String() == commonnexus.SystemCallbackURL { + return routeSystemCallbackRequest(r, clusterMetadata, namespaceRegistry, httpClientCache, callbackTokenGenerator, localClient, logger) + } + // This source header is populated in nexusoperations/tasks (via the ClientProvider) for worker targets + // if this header is not populated then we assume it's an external target. + if r.Header == nil || r.Header.Get(callbackSourceHeader) == "" { + return defaultClient.Do(r) + } + // If we got here, we assume that the endpoint in the original call was a worker target, and we should route + // internally, either to a local frontend, or one of the other connected clusters' frontends. + var frontendClient *common.FrontendHTTPClient + callbackSource := r.Header.Get(callbackSourceHeader) + for clusterName, clusterInfo := range clusterMetadata.GetAllClusterInfo() { + if callbackSource == clusterInfo.ClusterID { + if clusterMetadata.GetCurrentClusterName() == clusterName { + frontendClient = localClient + } else { + fec, err := httpClientCache.Get(clusterName) + if err != nil { + logger.Warn( + "HTTPCallerProvider unable to get FrontendHTTPClient for callback target cluster. Using local HTTP Client.", + tag.SourceCluster(clusterMetadata.GetCurrentClusterName()), + tag.TargetCluster(clusterName), + tag.Error(err), + ) + frontendClient = localClient + } else { + frontendClient = fec + } + } + break + } + } + if frontendClient == nil { + // This can happen when a cluster is disconnected. + logger.Warn( + "HTTPCallerProvider unable to find the target cluster. Using local HTTP Client.", + tag.SourceCluster(clusterMetadata.GetCurrentClusterName()), + ) + frontendClient = localClient + } + + r.URL.Scheme = frontendClient.Scheme + r.URL.Host = frontendClient.Address + r.Host = frontendClient.Address + return frontendClient.Do(r) +} diff --git a/chasm/lib/callback/request_test.go b/chasm/lib/callback/request_test.go new file mode 100644 index 00000000000..6bd4bb13df3 --- /dev/null +++ b/chasm/lib/callback/request_test.go @@ -0,0 +1,415 @@ +package callback + +import ( + "net/http" + "net/http/httptest" + "net/url" + "testing" + + "github.com/nexus-rpc/sdk-go/nexus" + "github.com/stretchr/testify/require" + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.uber.org/mock/gomock" +) + +func newTestFrontendHTTPClient(ts *httptest.Server) *common.FrontendHTTPClient { + u, _ := url.Parse(ts.URL) + return &common.FrontendHTTPClient{ + Client: *ts.Client(), + Address: u.Host, + Scheme: u.Scheme, + } +} + +func TestRouteRequest_ExternalTarget(t *testing.T) { + // When no source header is set, the request should be sent via the default client. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + ctrl := gomock.NewController(t) + clusterMeta := cluster.NewMockMetadata(ctrl) + + r, err := http.NewRequest(http.MethodPost, ts.URL+"/some/path", nil) + require.NoError(t, err) + + resp, err := routeRequest( + r, + clusterMeta, + nil, // namespaceRegistry not needed for external targets + nil, // httpClientCache not needed for external targets + nil, // callbackTokenGenerator not needed for external targets + ts.Client(), + nil, // localClient not needed for external targets + log.NewNoopLogger(), + ) + require.NoError(t, err) + defer func() { _ = resp.Body.Close() }() + require.Equal(t, http.StatusOK, resp.StatusCode) +} + +func TestRouteRequest_SourceHeaderLocal(t *testing.T) { + // When the source header matches the local cluster, the request should be routed to the local client. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + })) + defer ts.Close() + + ctrl := gomock.NewController(t) + clusterMeta := cluster.NewMockMetadata(ctrl) + clusterMeta.EXPECT().GetAllClusterInfo().Return(map[string]cluster.ClusterInformation{ + "cluster-A": {ClusterID: "cluster-id-A"}, + }) + clusterMeta.EXPECT().GetCurrentClusterName().Return("cluster-A") + + localClient := newTestFrontendHTTPClient(ts) + + r, err := http.NewRequest(http.MethodPost, "http://original-host/some/path", nil) + require.NoError(t, err) + r.Header.Set(callbackSourceHeader, "cluster-id-A") + + resp, err := routeRequest( + r, + clusterMeta, + nil, // namespaceRegistry + nil, // httpClientCache - not used since it's the local cluster + nil, // callbackTokenGenerator + &http.Client{}, + localClient, + log.NewNoopLogger(), + ) + require.NoError(t, err) + defer func() { _ = resp.Body.Close() }() + require.Equal(t, http.StatusAccepted, resp.StatusCode) +} + +func TestRouteRequest_SourceHeaderUnknownCluster(t *testing.T) { + // When the source header doesn't match any known cluster, falls back to local client. + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusAccepted) + })) + defer ts.Close() + + ctrl := gomock.NewController(t) + clusterMeta := cluster.NewMockMetadata(ctrl) + clusterMeta.EXPECT().GetAllClusterInfo().Return(map[string]cluster.ClusterInformation{ + "cluster-A": {ClusterID: "cluster-id-A"}, + }) + clusterMeta.EXPECT().GetCurrentClusterName().Return("cluster-A") + + localClient := newTestFrontendHTTPClient(ts) + + r, err := http.NewRequest(http.MethodPost, "http://original-host/some/path", nil) + require.NoError(t, err) + r.Header.Set(callbackSourceHeader, "unknown-cluster-id") + + resp, err := routeRequest( + r, + clusterMeta, + nil, + nil, + nil, + &http.Client{}, + localClient, + log.NewNoopLogger(), + ) + require.NoError(t, err) + defer func() { _ = resp.Body.Close() }() + require.Equal(t, http.StatusAccepted, resp.StatusCode) +} + +func TestRouteSystemCallbackRequest_NilHeaders(t *testing.T) { + // When the request has nil headers, it should fall back to the local client. + var gotPath string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotPath = r.URL.Path + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + localClient := newTestFrontendHTTPClient(ts) + + r := &http.Request{ + Method: http.MethodPost, + URL: &url.URL{Path: "/"}, + Header: nil, + } + + resp, err := routeSystemCallbackRequest( + r, + nil, // clusterMetadata - not needed for nil headers path + nil, // namespaceRegistry + nil, // httpClientCache + nil, // callbackTokenGenerator + localClient, + log.NewNoopLogger(), + ) + require.NoError(t, err) + defer func() { _ = resp.Body.Close() }() + require.Equal(t, http.StatusOK, resp.StatusCode) + require.Equal(t, commonnexus.PathCompletionCallbackNoIdentifier, gotPath) +} + +func TestRouteSystemCallbackRequest_InvalidToken(t *testing.T) { + r, err := http.NewRequest(http.MethodPost, commonnexus.SystemCallbackURL, nil) + require.NoError(t, err) + r.Header.Set(commonnexus.CallbackTokenHeader, "not-valid-json") + + _, err = routeSystemCallbackRequest( + r, + nil, + nil, + nil, + nil, + nil, + log.NewNoopLogger(), + ) + require.Error(t, err) + var handlerErr *nexus.HandlerError + require.ErrorAs(t, err, &handlerErr) + require.Equal(t, nexus.HandlerErrorTypeBadRequest, handlerErr.Type) + require.Contains(t, handlerErr.Error(), "invalid callback token") +} + +func TestRouteSystemCallbackRequest_InvalidTokenData(t *testing.T) { + // Valid token structure but invalid data field. + r, err := http.NewRequest(http.MethodPost, commonnexus.SystemCallbackURL, nil) + require.NoError(t, err) + r.Header.Set(commonnexus.CallbackTokenHeader, `{"v":1,"d":"!!!invalid-base64"}`) + + tokenGen := commonnexus.NewCallbackTokenGenerator() + + _, err = routeSystemCallbackRequest( + r, + nil, + nil, + nil, + tokenGen, + nil, + log.NewNoopLogger(), + ) + require.Error(t, err) + var handlerErr *nexus.HandlerError + require.ErrorAs(t, err, &handlerErr) + require.Equal(t, nexus.HandlerErrorTypeBadRequest, handlerErr.Type) +} + +func TestRouteSystemCallbackRequest_NamespaceNotFound(t *testing.T) { + ctrl := gomock.NewController(t) + nsRegistry := namespace.NewMockRegistry(ctrl) + + tokenGen := commonnexus.NewCallbackTokenGenerator() + tokenStr, err := tokenGen.Tokenize(&tokenspb.NexusOperationCompletion{ + NamespaceId: "ns-id-1", + WorkflowId: "wf-1", + RunId: "run-1", + Ref: &persistencespb.StateMachineRef{}, + }) + require.NoError(t, err) + + nsRegistry.EXPECT().GetNamespaceByID(namespace.ID("ns-id-1")).Return( + nil, serviceerror.NewNamespaceNotFound("ns-id-1"), + ) + + r, err := http.NewRequest(http.MethodPost, commonnexus.SystemCallbackURL, nil) + require.NoError(t, err) + r.Header.Set(commonnexus.CallbackTokenHeader, tokenStr) + + _, err = routeSystemCallbackRequest( + r, + nil, + nsRegistry, + nil, + tokenGen, + nil, + log.NewNoopLogger(), + ) + require.Error(t, err) + var handlerErr *nexus.HandlerError + require.ErrorAs(t, err, &handlerErr) + require.Equal(t, nexus.HandlerErrorTypeNotFound, handlerErr.Type) +} + +func TestRouteSystemCallbackRequest_InvalidChasmComponentRef(t *testing.T) { + for _, tc := range []struct { + name string + ref *persistencespb.ChasmComponentRef + }{ + { + name: "missing namespace id", + ref: &persistencespb.ChasmComponentRef{ + BusinessId: "wf-1", + RunId: "run-1", + }, + }, + { + name: "missing business id", + ref: &persistencespb.ChasmComponentRef{ + NamespaceId: "ns-id-1", + RunId: "run-1", + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + tokenGen := commonnexus.NewCallbackTokenGenerator() + + ref, err := tc.ref.Marshal() + require.NoError(t, err) + + tokenStr, err := tokenGen.Tokenize(&tokenspb.NexusOperationCompletion{ComponentRef: ref}) + require.NoError(t, err) + + r, err := http.NewRequest(http.MethodPost, commonnexus.SystemCallbackURL, nil) + require.NoError(t, err) + r.Header.Set(commonnexus.CallbackTokenHeader, tokenStr) + + _, err = routeSystemCallbackRequest( + r, + nil, + nil, + nil, + tokenGen, + nil, + log.NewNoopLogger(), + ) + require.Error(t, err) + var handlerErr *nexus.HandlerError + require.ErrorAs(t, err, &handlerErr) + require.Equal(t, nexus.HandlerErrorTypeBadRequest, handlerErr.Type) + require.Contains(t, handlerErr.Error(), "invalid callback token") + }) + } +} + +func TestRouteSystemCallbackRequest_Success(t *testing.T) { + for _, tc := range []struct { + name string + completionToken func(*commonnexus.CallbackTokenGenerator) (string, error) + }{ + { + name: "HSM", + completionToken: func(tokenGen *commonnexus.CallbackTokenGenerator) (string, error) { + return tokenGen.Tokenize(&tokenspb.NexusOperationCompletion{ + // HSM sets the deprecated execution fields and ref. + NamespaceId: "ns-id-1", + WorkflowId: "wf-1", + RunId: "run-1", + Ref: &persistencespb.StateMachineRef{}, + }) + }, + }, + { + name: "CHASM", + completionToken: func(tokenGen *commonnexus.CallbackTokenGenerator) (string, error) { + ref, err := (&persistencespb.ChasmComponentRef{ + NamespaceId: "ns-id-1", + BusinessId: "wf-1", + RunId: "run-1", + }).Marshal() + if err != nil { + return "", err + } + return tokenGen.Tokenize(&tokenspb.NexusOperationCompletion{ + // CHASM sets ComponentRef. + ComponentRef: ref, + }) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + var gotPath string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotPath = r.URL.Path + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + ctrl := gomock.NewController(t) + clusterMeta := cluster.NewMockMetadata(ctrl) + nsRegistry := namespace.NewMockRegistry(ctrl) + + tokenGen := commonnexus.NewCallbackTokenGenerator() + tokenStr, err := tc.completionToken(tokenGen) + require.NoError(t, err) + + testNS := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{Id: "ns-id-1", Name: "test-ns"}, + nil, + "cluster-A", + ) + nsRegistry.EXPECT().GetNamespaceByID(namespace.ID("ns-id-1")).Return(testNS, nil) + + // httpClientCache.Get will fail for "cluster-A", so it falls back to localClient. + clusterMeta.EXPECT().GetCurrentClusterName().Return("cluster-A").AnyTimes() + clusterMeta.EXPECT().GetAllClusterInfo().Return(map[string]cluster.ClusterInformation{}).AnyTimes() + clusterMeta.EXPECT().RegisterMetadataChangeCallback(gomock.Any(), gomock.Any()) + + localClient := newTestFrontendHTTPClient(ts) + // Create a cache that will fail for the requested cluster since we don't set up metadata fully. + httpClientCache := cluster.NewFrontendHTTPClientCache(clusterMeta, nil) + + r, err := http.NewRequest(http.MethodPost, commonnexus.SystemCallbackURL, nil) + require.NoError(t, err) + r.Header.Set(commonnexus.CallbackTokenHeader, tokenStr) + + resp, err := routeSystemCallbackRequest( + r, + clusterMeta, + nsRegistry, + httpClientCache, + tokenGen, + localClient, + log.NewNoopLogger(), + ) + require.NoError(t, err) + defer func() { _ = resp.Body.Close() }() + require.Equal(t, http.StatusOK, resp.StatusCode) + require.Equal(t, commonnexus.PathCompletionCallbackNoIdentifier, gotPath) + }) + } +} + +func TestRouteRequest_SystemCallback(t *testing.T) { + // Verify that routeRequest delegates to routeSystemCallbackRequest for system callback URLs. + var gotPath string + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + gotPath = r.URL.Path + w.WriteHeader(http.StatusOK) + })) + defer ts.Close() + + localClient := newTestFrontendHTTPClient(ts) + + // Use nil headers to take the simplest path through routeSystemCallbackRequest. + r := &http.Request{ + Method: http.MethodPost, + URL: &url.URL{ + Scheme: "temporal", + Host: "system", + }, + Header: nil, + } + + resp, err := routeRequest( + r, + nil, + nil, + nil, + nil, + &http.Client{}, + localClient, + log.NewNoopLogger(), + ) + require.NoError(t, err) + defer func() { _ = resp.Body.Close() }() + require.Equal(t, http.StatusOK, resp.StatusCode) + require.Equal(t, commonnexus.PathCompletionCallbackNoIdentifier, gotPath) +} diff --git a/chasm/lib/callback/statemachine.go b/chasm/lib/callback/statemachine.go new file mode 100644 index 00000000000..779b9773989 --- /dev/null +++ b/chasm/lib/callback/statemachine.go @@ -0,0 +1,122 @@ +package callback + +import ( + "fmt" + "net/url" + "time" + + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/server/chasm" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/common/backoff" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// EventScheduled is triggered when the callback is meant to be scheduled for the first time - when its Trigger +// condition is met. +type EventScheduled struct{} + +var TransitionScheduled = chasm.NewTransition( + []callbackspb.CallbackStatus{callbackspb.CALLBACK_STATUS_STANDBY}, + callbackspb.CALLBACK_STATUS_SCHEDULED, + func(cb *Callback, ctx chasm.MutableContext, event EventScheduled) error { + u, err := url.Parse(cb.Callback.GetNexus().GetUrl()) + if err != nil { + return fmt.Errorf("failed to parse URL: %v: %w", cb.Callback, err) + } + ctx.AddTask(cb, chasm.TaskAttributes{Destination: u.Scheme + "://" + u.Host}, &callbackspb.InvocationTask{}) + return nil + }, +) + +// EventRescheduled is triggered when the callback is meant to be rescheduled after backing off from a previous attempt. +type EventRescheduled struct{} + +var TransitionRescheduled = chasm.NewTransition( + []callbackspb.CallbackStatus{callbackspb.CALLBACK_STATUS_BACKING_OFF}, + callbackspb.CALLBACK_STATUS_SCHEDULED, + func(cb *Callback, ctx chasm.MutableContext, event EventRescheduled) error { + cb.NextAttemptScheduleTime = nil + u, err := url.Parse(cb.Callback.GetNexus().Url) + if err != nil { + return fmt.Errorf("failed to parse URL: %v: %w", cb.Callback, err) + } + ctx.AddTask( + cb, + chasm.TaskAttributes{Destination: u.Scheme + "://" + u.Host}, + &callbackspb.InvocationTask{Attempt: cb.Attempt}, + ) + return nil + }, +) + +// EventAttemptFailed is triggered when an attempt is failed with a retryable error. +type EventAttemptFailed struct { + Time time.Time + Err error + RetryPolicy backoff.RetryPolicy +} + +var TransitionAttemptFailed = chasm.NewTransition( + []callbackspb.CallbackStatus{callbackspb.CALLBACK_STATUS_SCHEDULED}, + callbackspb.CALLBACK_STATUS_BACKING_OFF, + func(cb *Callback, ctx chasm.MutableContext, event EventAttemptFailed) error { + cb.recordAttempt(event.Time) + // Use 0 for elapsed time as we don't limit the retry by time (for now). + nextDelay := event.RetryPolicy.ComputeNextDelay(0, int(cb.Attempt), event.Err) + nextAttemptScheduleTime := event.Time.Add(nextDelay) + cb.NextAttemptScheduleTime = timestamppb.New(nextAttemptScheduleTime) + cb.LastAttemptFailure = &failurepb.Failure{ + Message: event.Err.Error(), + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + NonRetryable: false, + }, + }, + } + ctx.AddTask( + cb, + chasm.TaskAttributes{ScheduledTime: nextAttemptScheduleTime}, + &callbackspb.BackoffTask{Attempt: cb.Attempt}, + ) + return nil + }, +) + +// EventFailed is triggered when an attempt is failed with a non retryable error. +type EventFailed struct { + Time time.Time + Err error +} + +var TransitionFailed = chasm.NewTransition( + []callbackspb.CallbackStatus{callbackspb.CALLBACK_STATUS_SCHEDULED}, + callbackspb.CALLBACK_STATUS_FAILED, + func(cb *Callback, ctx chasm.MutableContext, event EventFailed) error { + cb.recordAttempt(event.Time) + cb.LastAttemptFailure = &failurepb.Failure{ + Message: event.Err.Error(), + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + NonRetryable: true, + }, + }, + } + return nil + }, +) + +// EventSucceeded is triggered when an attempt succeeds. +type EventSucceeded struct { + Time time.Time +} + +var TransitionSucceeded = chasm.NewTransition( + []callbackspb.CallbackStatus{callbackspb.CALLBACK_STATUS_SCHEDULED}, + callbackspb.CALLBACK_STATUS_SUCCEEDED, + func(cb *Callback, ctx chasm.MutableContext, event EventSucceeded) error { + cb.recordAttempt(event.Time) + cb.LastAttemptFailure = nil + return nil + }, +) diff --git a/chasm/lib/callback/statemachine_test.go b/chasm/lib/callback/statemachine_test.go new file mode 100644 index 00000000000..dbefaf5d96c --- /dev/null +++ b/chasm/lib/callback/statemachine_test.go @@ -0,0 +1,112 @@ +package callback + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/chasm" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/common/backoff" + "google.golang.org/protobuf/proto" +) + +func TestValidTransitions(t *testing.T) { + // Setup + currentTime := time.Now().UTC() + callback := &Callback{ + CallbackState: &callbackspb.CallbackState{ + Callback: &callbackspb.Callback{ + Variant: &callbackspb.Callback_Nexus_{ + Nexus: &callbackspb.Callback_Nexus{ + Url: "http://address:666/path/to/callback?query=string", + }, + }, + }, + }, + } + callback.SetStateMachineState(callbackspb.CALLBACK_STATUS_SCHEDULED) + + // AttemptFailed + mctx := &chasm.MockMutableContext{} + err := TransitionAttemptFailed.Apply(callback, mctx, EventAttemptFailed{ + Time: currentTime, + Err: errors.New("test"), + RetryPolicy: backoff.NewExponentialRetryPolicy(time.Second), + }) + require.NoError(t, err) + + // Assert info object is updated + require.Equal(t, callbackspb.CALLBACK_STATUS_BACKING_OFF, callback.StateMachineState()) + require.Equal(t, int32(1), callback.Attempt) + require.Equal(t, "test", callback.LastAttemptFailure.Message) + require.False(t, callback.LastAttemptFailure.GetApplicationFailureInfo().NonRetryable) + require.Equal(t, currentTime, callback.LastAttemptCompleteTime.AsTime()) + dt := currentTime.Add(time.Second).Sub(callback.NextAttemptScheduleTime.AsTime()) + require.Less(t, dt, time.Millisecond*200) + + // Assert backoff task is generated + require.Len(t, mctx.Tasks, 1) + require.IsType(t, &callbackspb.BackoffTask{}, mctx.Tasks[0].Payload) + + // Rescheduled + mctx = &chasm.MockMutableContext{} + err = TransitionRescheduled.Apply(callback, mctx, EventRescheduled{}) + require.NoError(t, err) + + // Assert info object is updated only where needed + require.Equal(t, callbackspb.CALLBACK_STATUS_SCHEDULED, callback.StateMachineState()) + require.Equal(t, int32(1), callback.Attempt) + require.Equal(t, "test", callback.LastAttemptFailure.Message) + // Remains unmodified + require.Equal(t, currentTime, callback.LastAttemptCompleteTime.AsTime()) + require.Nil(t, callback.NextAttemptScheduleTime) + + // Assert callback task is generated + require.Len(t, mctx.Tasks, 1) + require.IsType(t, &callbackspb.InvocationTask{}, mctx.Tasks[0].Payload) + + // Store the pre-succeeded state to test Failed later + dup := &Callback{ + CallbackState: proto.Clone(callback.CallbackState).(*callbackspb.CallbackState), + } + dup.Status = callback.StateMachineState() + + // Succeeded + currentTime = currentTime.Add(time.Second) + mctx = &chasm.MockMutableContext{} + err = TransitionSucceeded.Apply(callback, mctx, EventSucceeded{Time: currentTime}) + require.NoError(t, err) + + // Assert info object is updated only where needed + require.Equal(t, callbackspb.CALLBACK_STATUS_SUCCEEDED, callback.StateMachineState()) + require.Equal(t, int32(2), callback.Attempt) + require.Nil(t, callback.LastAttemptFailure) + require.Equal(t, currentTime, callback.LastAttemptCompleteTime.AsTime()) + require.Nil(t, callback.NextAttemptScheduleTime) + + // Assert no task is generated on success transition + require.Empty(t, mctx.Tasks) + + // Reset back to scheduled + callback = dup + // Increment the time to ensure it's updated in the transition + currentTime = currentTime.Add(time.Second) + + // failed + mctx = &chasm.MockMutableContext{} + err = TransitionFailed.Apply(callback, mctx, EventFailed{Time: currentTime, Err: errors.New("failed")}) + require.NoError(t, err) + + // Assert info object is updated only where needed + require.Equal(t, callbackspb.CALLBACK_STATUS_FAILED, callback.StateMachineState()) + require.Equal(t, int32(2), callback.Attempt) + require.Equal(t, "failed", callback.LastAttemptFailure.Message) + require.True(t, callback.LastAttemptFailure.GetApplicationFailureInfo().NonRetryable) + require.Equal(t, currentTime, callback.LastAttemptCompleteTime.AsTime()) + require.Nil(t, callback.NextAttemptScheduleTime) + + // Assert task is not generated, failed is terminal + require.Empty(t, mctx.Tasks) +} diff --git a/chasm/lib/callback/tasks.go b/chasm/lib/callback/tasks.go new file mode 100644 index 00000000000..b53efafea19 --- /dev/null +++ b/chasm/lib/callback/tasks.go @@ -0,0 +1,182 @@ +package callback + +import ( + "context" + "fmt" + "net/http" + + "go.temporal.io/server/chasm" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/service/history/queues/common" + "go.uber.org/fx" +) + +// HTTPCaller is a method that can be used to invoke HTTP requests. +type HTTPCaller func(*http.Request) (*http.Response, error) + +// HTTPCallerProvider is a method that can be used to retrieve an HTTPCaller for a given namespace and destination. +type HTTPCallerProvider func(common.NamespaceIDAndDestination) HTTPCaller + +// invocationResult is a marker for the callbackInvokable.Invoke result to indicate to the handler how to handle the +// invocation outcome. +type invocationResult interface { + // A marker for all possible implementations. + mustImplementInvocationResult() + error() error +} + +// invocationResultOK marks an invocation as successful. +type invocationResultOK struct{} + +func (invocationResultOK) mustImplementInvocationResult() {} + +func (invocationResultOK) error() error { + return nil +} + +// invocationResultFail marks an invocation as permanently failed. +type invocationResultFail struct { + err error +} + +func (invocationResultFail) mustImplementInvocationResult() {} + +func (r invocationResultFail) error() error { + return r.err +} + +// invocationResultRetry marks an invocation as failed with the intent to retry. +type invocationResultRetry struct { + err error +} + +func (invocationResultRetry) mustImplementInvocationResult() {} + +func (r invocationResultRetry) error() error { + return r.err +} + +type invocable interface { + // Invoke executes the callback logic and returns the invocation result. + Invoke(ctx context.Context, ns *namespace.Namespace, h *invocationTaskHandler, task *callbackspb.InvocationTask, taskAttr chasm.TaskAttributes) invocationResult + // WrapError provides each variant the opportunity to wrap the error returned by the task handler for, e.g. to + // trigger the circuit breaker. + WrapError(result invocationResult, err error) error +} + +type invocationTaskHandlerOptions struct { + fx.In + + Config *Config + NamespaceRegistry namespace.Registry + MetricsHandler metrics.Handler + Logger log.Logger + HTTPCallerProvider HTTPCallerProvider + HTTPTraceProvider commonnexus.HTTPClientTraceProvider + HistoryClient resource.HistoryClient +} + +type invocationTaskHandler struct { + chasm.SideEffectTaskHandlerBase[*callbackspb.InvocationTask] + config *Config + namespaceRegistry namespace.Registry + metricsHandler metrics.Handler + logger log.Logger + httpCallerProvider HTTPCallerProvider + httpTraceProvider commonnexus.HTTPClientTraceProvider + historyClient resource.HistoryClient +} + +func newInvocationTaskHandler(opts invocationTaskHandlerOptions) *invocationTaskHandler { + return &invocationTaskHandler{ + config: opts.Config, + namespaceRegistry: opts.NamespaceRegistry, + metricsHandler: opts.MetricsHandler, + logger: opts.Logger, + httpCallerProvider: opts.HTTPCallerProvider, + httpTraceProvider: opts.HTTPTraceProvider, + historyClient: opts.HistoryClient, + } +} + +func (h *invocationTaskHandler) Validate(ctx chasm.Context, cb *Callback, attrs chasm.TaskAttributes, task *callbackspb.InvocationTask) (bool, error) { + return cb.Attempt == task.Attempt && cb.Status == callbackspb.CALLBACK_STATUS_SCHEDULED, nil +} + +func (h *invocationTaskHandler) Execute( + ctx context.Context, + ref chasm.ComponentRef, + taskAttr chasm.TaskAttributes, + task *callbackspb.InvocationTask, +) error { + ns, err := h.namespaceRegistry.GetNamespaceByID(namespace.ID(ref.NamespaceID)) + if err != nil { + return fmt.Errorf("failed to get namespace by ID: %w", err) + } + + invokable, err := chasm.ReadComponent( + ctx, + ref, + (*Callback).loadInvocationArgs, + nil, + ) + if err != nil { + return err + } + + callCtx, cancel := context.WithTimeout( + ctx, + h.config.RequestTimeout(ns.Name().String(), taskAttr.Destination), + ) + defer cancel() + + result := invokable.Invoke(callCtx, ns, h, task, taskAttr) + _, _, saveErr := chasm.UpdateComponent( + ctx, + ref, + (*Callback).saveResult, + saveResultInput{ + result: result, + retryPolicy: h.config.RetryPolicy(), + }, + ) + return invokable.WrapError(result, saveErr) +} + +type backoffTaskHandler struct { + chasm.PureTaskHandlerBase +} + +type backoffTaskHandlerOptions struct { + fx.In +} + +func newBackoffTaskHandler(opts backoffTaskHandlerOptions) *backoffTaskHandler { + return &backoffTaskHandler{} +} + +// Execute toggles the callback status from BACKING_OFF to SCHEDULED to trigger a new invocation attempt. +func (h *backoffTaskHandler) Execute( + ctx chasm.MutableContext, + callback *Callback, + taskAttrs chasm.TaskAttributes, + task *callbackspb.BackoffTask, +) error { + return TransitionRescheduled.Apply(callback, ctx, EventRescheduled{}) +} + +// Validate validates that the callback is in BACKING_OFF state and that the attempt number matches before allowing the +// backoff task to execute. +func (h *backoffTaskHandler) Validate( + ctx chasm.Context, + callback *Callback, + taskAttr chasm.TaskAttributes, + task *callbackspb.BackoffTask, +) (bool, error) { + return callback.Status == callbackspb.CALLBACK_STATUS_BACKING_OFF && callback.Attempt == task.Attempt, nil +} diff --git a/chasm/lib/callback/tasks_test.go b/chasm/lib/callback/tasks_test.go new file mode 100644 index 00000000000..71e5c881a64 --- /dev/null +++ b/chasm/lib/callback/tasks_test.go @@ -0,0 +1,666 @@ +package callback + +import ( + "context" + "encoding/base64" + "errors" + "net/http" + "testing" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/chasmtest" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/service/history/queues/common" + queueserrors "go.temporal.io/server/service/history/queues/errors" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type mockNexusCompletionGetterComponent struct { + chasm.UnimplementedComponent + + Empty *emptypb.Empty + + completion nexusrpc.CompleteOperationOptions + err error + + Callback chasm.Field[*Callback] +} + +func (m *mockNexusCompletionGetterComponent) GetNexusCompletion(_ chasm.Context, requestID string) (nexusrpc.CompleteOperationOptions, error) { + return m.completion, m.err +} + +func (m *mockNexusCompletionGetterComponent) LifecycleState(_ chasm.Context) chasm.LifecycleState { + return chasm.LifecycleStateRunning +} + +func (m *mockNexusCompletionGetterComponent) ContextMetadata(_ chasm.Context) map[string]string { + return nil +} + +func (m *mockNexusCompletionGetterComponent) Terminate( + _ chasm.MutableContext, + _ chasm.TerminateComponentRequest, +) (chasm.TerminateComponentResponse, error) { + return chasm.TerminateComponentResponse{}, nil +} + +type mockNexusCompletionGetterLibrary struct { + chasm.UnimplementedLibrary +} + +func (l *mockNexusCompletionGetterLibrary) Name() string { + return "mock" +} + +func (l *mockNexusCompletionGetterLibrary) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*mockNexusCompletionGetterComponent]("nexusCompletionGetter"), + } +} + +// Test the full executeInvocationTask flow with direct handler calls +func TestExecuteInvocationTaskNexus_Outcomes(t *testing.T) { + cases := []struct { + name string + caller HTTPCaller + expectedMetricOutcome string + assertOutcome func(*testing.T, *Callback, error) + }{ + { + name: "success", + caller: func(r *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 200, Body: http.NoBody}, nil + }, + expectedMetricOutcome: "success", + assertOutcome: func(t *testing.T, cb *Callback, err error) { + require.NoError(t, err) + require.Equal(t, callbackspb.CALLBACK_STATUS_SUCCEEDED, cb.Status) + }, + }, + { + name: "network-error-retry", + caller: func(r *http.Request) (*http.Response, error) { + return nil, errors.New("fake failure") + }, + expectedMetricOutcome: "unknown-error", + assertOutcome: func(t *testing.T, cb *Callback, err error) { + var destDownErr *queueserrors.DestinationDownError + require.ErrorAs(t, err, &destDownErr) + require.Equal(t, callbackspb.CALLBACK_STATUS_BACKING_OFF, cb.Status) + }, + }, + { + name: "retryable-http-error", + caller: func(r *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 500, Body: http.NoBody}, nil + }, + expectedMetricOutcome: "handler-error:INTERNAL", + assertOutcome: func(t *testing.T, cb *Callback, err error) { + var destDownErr *queueserrors.DestinationDownError + require.ErrorAs(t, err, &destDownErr) + require.Equal(t, callbackspb.CALLBACK_STATUS_BACKING_OFF, cb.Status) + }, + }, + { + name: "non-retryable-http-error", + caller: func(r *http.Request) (*http.Response, error) { + return &http.Response{StatusCode: 400, Body: http.NoBody}, nil + }, + expectedMetricOutcome: "handler-error:BAD_REQUEST", + assertOutcome: func(t *testing.T, cb *Callback, err error) { + require.NoError(t, err) + require.Equal(t, callbackspb.CALLBACK_STATUS_FAILED, cb.Status) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Setup namespace + factory := namespace.NewDefaultReplicationResolverFactory() + detail := &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: "namespace-id", + Name: "namespace-name", + }, + Config: &persistencespb.NamespaceConfig{}, + } + ns, err := namespace.FromPersistentState(detail, factory(detail)) + require.NoError(t, err) + + // Setup metrics expectations + metricsHandler := metrics.NewMockHandler(ctrl) + metricsHandler.EXPECT().WithTags(gomock.Any()).Return(metricsHandler).AnyTimes() + counter := metrics.NewMockCounterIface(ctrl) + timer := metrics.NewMockTimerIface(ctrl) + metricsHandler.EXPECT().Counter(RequestCounter.Name()).Return(counter) + counter.EXPECT().Record(int64(1), + metrics.NamespaceTag("namespace-name"), + metrics.DestinationTag("http://localhost"), + metrics.OutcomeTag(tc.expectedMetricOutcome)) + metricsHandler.EXPECT().Timer(RequestLatencyHistogram.Name()).Return(timer) + timer.EXPECT().Record(gomock.Any(), + metrics.NamespaceTag("namespace-name"), + metrics.DestinationTag("http://localhost"), + metrics.OutcomeTag(tc.expectedMetricOutcome)) + + // Setup logger + logger := log.NewTestLogger() + + // Create task handler with mock namespace registry + nsRegistry := namespace.NewMockRegistry(ctrl) + nsRegistry.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil) + + handler := &invocationTaskHandler{ + config: &Config{ + RequestTimeout: dynamicconfig.GetDurationPropertyFnFilteredByDestination(time.Second), + RetryPolicy: func() backoff.RetryPolicy { + return backoff.NewExponentialRetryPolicy(time.Second) + }, + }, + namespaceRegistry: nsRegistry, + metricsHandler: metricsHandler, + logger: logger, + httpCallerProvider: func(nid common.NamespaceIDAndDestination) HTTPCaller { + return tc.caller + }, + } + + chasmRegistry := chasm.NewRegistry(logger) + err = chasmRegistry.Register(&Library{ + InvocationTaskHandler: handler, + }) + require.NoError(t, err) + err = chasmRegistry.Register(&mockNexusCompletionGetterLibrary{}) + require.NoError(t, err) + + callback := &Callback{ + CallbackState: &callbackspb.CallbackState{ + RequestId: "request-id", + RegistrationTime: timestamppb.New(time.Now()), + Callback: &callbackspb.Callback{ + Variant: &callbackspb.Callback_Nexus_{ + Nexus: &callbackspb.Callback_Nexus{ + Url: "http://localhost", + }, + }, + }, + Status: callbackspb.CALLBACK_STATUS_SCHEDULED, + Attempt: 0, + }, + } + + // Create completion + completion := nexusrpc.CompleteOperationOptions{} + + executionKey := chasm.ExecutionKey{ + NamespaceID: "namespace-id", + BusinessID: "workflow-id", + RunID: "run-id", + } + testEngine := chasmtest.NewEngine(t, chasmRegistry) + engineCtx := chasm.NewEngineContext(context.Background(), testEngine) + _, err = chasm.StartExecution( + engineCtx, + executionKey, + func(ctx chasm.MutableContext, _ struct{}) (*mockNexusCompletionGetterComponent, error) { + return &mockNexusCompletionGetterComponent{ + completion: completion, + Callback: chasm.NewComponentField(ctx, callback), + }, nil + }, + struct{}{}, + ) + require.NoError(t, err) + + rootRef := chasm.NewComponentRef[*mockNexusCompletionGetterComponent](executionKey) + callbackRef, err := chasm.ReadComponent( + engineCtx, + rootRef, + func(_ *mockNexusCompletionGetterComponent, chasmCtx chasm.Context, _ struct{}) (chasm.ComponentRef, error) { + serialized, err := chasmCtx.Ref(callback) + if err != nil { + return chasm.ComponentRef{}, err + } + return chasm.DeserializeComponentRef(serialized) + }, + struct{}{}, + ) + require.NoError(t, err) + + executeErr := handler.Execute( + engineCtx, + callbackRef, + chasm.TaskAttributes{Destination: "http://localhost"}, + &callbackspb.InvocationTask{Attempt: 0}, + ) + + // Verify outcome by reading component state directly. + resultCallback, err := chasm.ReadComponent( + engineCtx, + callbackRef, + func(c *Callback, _ chasm.Context, _ struct{}) (*Callback, error) { + return c, nil + }, + struct{}{}, + ) + require.NoError(t, err) + tc.assertOutcome(t, resultCallback, executeErr) + }) + } +} + +// TestProcessBackoffTask tests the backoff task execution that transitions +// a callback from BACKING_OFF to SCHEDULED state and adds an invocation task. +func TestProcessBackoffTask(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + timeSource := clock.NewEventTimeSource() + timeSource.Update(time.Now()) + + // Create callback in BACKING_OFF state + callback := &Callback{ + CallbackState: &callbackspb.CallbackState{ + RequestId: "request-id", + Callback: &callbackspb.Callback{ + Variant: &callbackspb.Callback_Nexus_{ + Nexus: &callbackspb.Callback_Nexus{ + Url: "http://localhost", + }, + }, + }, + Status: callbackspb.CALLBACK_STATUS_BACKING_OFF, + Attempt: 1, + NextAttemptScheduleTime: timestamppb.New(timeSource.Now().Add(time.Minute)), + }, + } + + // Create mock mutable context + mockCtx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(component chasm.Component) time.Time { + return timeSource.Now() + }, + HandleRef: func(component chasm.Component) ([]byte, error) { + return []byte{}, nil + }, + }, + } + + handler := backoffTaskHandler{} + + // Execute the backoff task + task := &callbackspb.BackoffTask{Attempt: 1} + attrs := chasm.TaskAttributes{Destination: "http://localhost"} + err := handler.Execute(mockCtx, callback, attrs, task) + + // Verify no error + require.NoError(t, err) + + // Verify callback transitioned to SCHEDULED state + require.Equal(t, callbackspb.CALLBACK_STATUS_SCHEDULED, callback.Status) + require.Nil(t, callback.NextAttemptScheduleTime) + + // Verify an invocation task was added + require.Len(t, mockCtx.Tasks, 1) + require.IsType(t, &callbackspb.InvocationTask{}, mockCtx.Tasks[0].Payload) + invTask := mockCtx.Tasks[0].Payload.(*callbackspb.InvocationTask) + require.Equal(t, int32(1), invTask.Attempt) +} + +func TestExecuteInvocationTaskChasm_Outcomes(t *testing.T) { + dummyRef := persistencespb.ChasmComponentRef{ + NamespaceId: "namespace-id", + BusinessId: "business-id", + RunId: "run-id", + ArchetypeId: 1234, + } + + serializedRef, err := dummyRef.Marshal() + require.NoError(t, err) + encodedRef := base64.RawURLEncoding.EncodeToString(serializedRef) + dummyTime := time.Now().UTC() + + createPayloadBytes := func(data []byte) *commonpb.Payload { + return &commonpb.Payload{Data: data} + } + + cases := []struct { + name string + setupHistoryClient func(*testing.T, *gomock.Controller) resource.HistoryClient + completion nexusrpc.CompleteOperationOptions + headerValue string + assertOutcome func(*testing.T, *Callback, error) + }{ + { + name: "success-with-successful-operation", + setupHistoryClient: func(t *testing.T, ctrl *gomock.Controller) resource.HistoryClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().CompleteNexusOperationChasm( + gomock.Any(), + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, req *historyservice.CompleteNexusOperationChasmRequest, opts ...grpc.CallOption) (*historyservice.CompleteNexusOperationChasmResponse, error) { + // Verify completion token + require.NotNil(t, req.Completion) + require.NotNil(t, req.Completion.ComponentRef) + require.Equal(t, "request-id", req.Completion.RequestId) + + // Verify successful operation data + require.NotNil(t, req.GetSuccess()) + require.Equal(t, []byte("result-data"), req.GetSuccess().Data) + require.Equal(t, req.CloseTime.AsTime(), dummyTime) + + return &historyservice.CompleteNexusOperationChasmResponse{}, nil + }) + return client + }, + completion: func() nexusrpc.CompleteOperationOptions { + return nexusrpc.CompleteOperationOptions{ + Result: createPayloadBytes([]byte("result-data")), + CloseTime: dummyTime, + } + }(), + headerValue: encodedRef, + assertOutcome: func(t *testing.T, cb *Callback, err error) { + require.NoError(t, err) + require.Equal(t, callbackspb.CALLBACK_STATUS_SUCCEEDED, cb.Status) + }, + }, + { + name: "success-with-failed-operation", + setupHistoryClient: func(t *testing.T, ctrl *gomock.Controller) resource.HistoryClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().CompleteNexusOperationChasm( + gomock.Any(), + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, req *historyservice.CompleteNexusOperationChasmRequest, opts ...grpc.CallOption) (*historyservice.CompleteNexusOperationChasmResponse, error) { + require.NotNil(t, req.Completion) + require.NotNil(t, req.GetFailure()) + require.Equal(t, req.CloseTime.AsTime(), dummyTime) + + return &historyservice.CompleteNexusOperationChasmResponse{}, nil + }) + return client + }, + completion: func() nexusrpc.CompleteOperationOptions { + return nexusrpc.CompleteOperationOptions{ + Error: &nexus.OperationError{ + State: nexus.OperationStateFailed, + Cause: &nexus.FailureError{Failure: nexus.Failure{Message: "operation failed"}}, + }, + CloseTime: dummyTime, + } + }(), + headerValue: encodedRef, + assertOutcome: func(t *testing.T, cb *Callback, err error) { + require.NoError(t, err) + require.Equal(t, callbackspb.CALLBACK_STATUS_SUCCEEDED, cb.Status) + }, + }, + { + name: "retryable-rpc-error", + setupHistoryClient: func(t *testing.T, ctrl *gomock.Controller) resource.HistoryClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().CompleteNexusOperationChasm( + gomock.Any(), + gomock.Any(), + ).Return(nil, status.Error(codes.Unavailable, "service unavailable")) + return client + }, + completion: func() nexusrpc.CompleteOperationOptions { + return nexusrpc.CompleteOperationOptions{ + Result: createPayloadBytes([]byte("result-data")), + } + }(), + headerValue: encodedRef, + assertOutcome: func(t *testing.T, cb *Callback, err error) { + require.ErrorContains(t, err, "internal error, reference-id:") + require.Equal(t, callbackspb.CALLBACK_STATUS_BACKING_OFF, cb.Status) + }, + }, + { + name: "non-retryable-rpc-error", + setupHistoryClient: func(t *testing.T, ctrl *gomock.Controller) resource.HistoryClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().CompleteNexusOperationChasm( + gomock.Any(), + gomock.Any(), + ).Return(nil, status.Error(codes.InvalidArgument, "invalid request")) + return client + }, + completion: func() nexusrpc.CompleteOperationOptions { + return nexusrpc.CompleteOperationOptions{ + Result: createPayloadBytes([]byte("result-data")), + } + }(), + headerValue: encodedRef, + assertOutcome: func(t *testing.T, cb *Callback, err error) { + require.ErrorContains(t, err, "internal error, reference-id:") + require.Equal(t, callbackspb.CALLBACK_STATUS_FAILED, cb.Status) + }, + }, + { + name: "invalid-base64-header", + setupHistoryClient: func(t *testing.T, ctrl *gomock.Controller) resource.HistoryClient { + // No RPC call expected + return historyservicemock.NewMockHistoryServiceClient(ctrl) + }, + completion: func() nexusrpc.CompleteOperationOptions { + return nexusrpc.CompleteOperationOptions{ + Result: createPayloadBytes([]byte("result-data")), + } + }(), + headerValue: "invalid-base64!!!", + assertOutcome: func(t *testing.T, cb *Callback, err error) { + require.ErrorContains(t, err, "internal error, reference-id:") + require.Equal(t, callbackspb.CALLBACK_STATUS_FAILED, cb.Status) + }, + }, + { + name: "invalid-protobuf-in-ref", + setupHistoryClient: func(t *testing.T, ctrl *gomock.Controller) resource.HistoryClient { + // No RPC call expected + return historyservicemock.NewMockHistoryServiceClient(ctrl) + }, + completion: func() nexusrpc.CompleteOperationOptions { + return nexusrpc.CompleteOperationOptions{ + Result: createPayloadBytes([]byte("result-data")), + } + }(), + headerValue: base64.RawURLEncoding.EncodeToString([]byte("not-valid-protobuf")), + assertOutcome: func(t *testing.T, cb *Callback, err error) { + require.ErrorContains(t, err, "internal error, reference-id:") + require.Equal(t, callbackspb.CALLBACK_STATUS_FAILED, cb.Status) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Setup namespace + factory := namespace.NewDefaultReplicationResolverFactory() + detail := &persistencespb.NamespaceDetail{ + Info: &persistencespb.NamespaceInfo{ + Id: "namespace-id", + Name: "namespace-name", + }, + Config: &persistencespb.NamespaceConfig{}, + } + ns, err := namespace.FromPersistentState(detail, factory(detail)) + require.NoError(t, err) + + // Setup history client + historyClient := tc.setupHistoryClient(t, ctrl) + + // Setup logger, metricsHandler, and time source + logger := log.NewTestLogger() + metricsHandler := metrics.NoopMetricsHandler + timeSource := clock.NewEventTimeSource() + timeSource.Update(time.Now()) + + // Create mock namespace registry + nsRegistry := namespace.NewMockRegistry(ctrl) + nsRegistry.EXPECT().GetNamespaceByID(gomock.Any()).Return(ns, nil) + + // Create mock engine and setup expectations + mockEngine := chasm.NewMockEngine(ctrl) + handler := &invocationTaskHandler{ + config: &Config{ + RequestTimeout: dynamicconfig.GetDurationPropertyFnFilteredByDestination(time.Second), + RetryPolicy: func() backoff.RetryPolicy { + return backoff.NewExponentialRetryPolicy(time.Second) + }, + }, + namespaceRegistry: nsRegistry, + metricsHandler: metricsHandler, + logger: logger, + historyClient: historyClient, + } + + chasmRegistry := chasm.NewRegistry(logger) + err = chasmRegistry.Register(&Library{ + InvocationTaskHandler: handler, + }) + require.NoError(t, err) + err = chasmRegistry.Register(&mockNexusCompletionGetterLibrary{}) + require.NoError(t, err) + + nodeBackend := &chasm.MockNodeBackend{} + root := chasm.NewEmptyTree(chasmRegistry, timeSource, nodeBackend, chasm.DefaultPathEncoder, logger, metricsHandler) + + // Create headers + headers := nexus.Header{} + if tc.headerValue != "" { + headers.Set(commonnexus.CallbackTokenHeader, tc.headerValue) + } + + // Create callback with chasm internal URL + callback := &Callback{ + CallbackState: &callbackspb.CallbackState{ + RequestId: "request-id", + RegistrationTime: timestamppb.New(timeSource.Now()), + Callback: &callbackspb.Callback{ + Variant: &callbackspb.Callback_Nexus_{ + Nexus: &callbackspb.Callback_Nexus{ + Url: chasm.NexusCompletionHandlerURL, + Header: headers, + }, + }, + }, + Status: callbackspb.CALLBACK_STATUS_SCHEDULED, + Attempt: 1, + }, + } + + // Set up the CompletionSource field to return our mock completion + require.NoError(t, root.SetRootComponent(&mockNexusCompletionGetterComponent{ + completion: tc.completion, + // Create callback in SCHEDULED state + Callback: chasm.NewComponentField( + chasm.NewMutableContext(context.Background(), root), + callback, + ), + })) + _, err = root.CloseTransaction() + require.NoError(t, err) + + mockEngine.EXPECT().ReadComponent( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, ref chasm.ComponentRef, readFn func(chasm.Context, chasm.Component) error, opts ...chasm.TransitionOption) error { + // Create a mock context + mockCtx := &chasm.MockContext{ + HandleNow: func(component chasm.Component) time.Time { + return timeSource.Now() + }, + HandleRef: func(component chasm.Component) ([]byte, error) { + return []byte{}, nil + }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{ + NamespaceID: "namespace-id", + BusinessID: "workflow-id", + RunID: "run-id", + } + }, + } + + // Call the readFn with our callback + return readFn(mockCtx, callback) + }) + + mockEngine.EXPECT().UpdateComponent( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).DoAndReturn(func(ctx context.Context, ref chasm.ComponentRef, updateFn func(chasm.MutableContext, chasm.Component) error, opts ...chasm.TransitionOption) ([]any, error) { + // Create a mock mutable context + mockCtx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(component chasm.Component) time.Time { + return timeSource.Now() + }, + HandleRef: func(component chasm.Component) ([]byte, error) { + return []byte{}, nil + }, + }, + } + + // Call the updateFn with our callback + err := updateFn(mockCtx, callback) + return nil, err + }) + + // Create ComponentRef + ref := chasm.NewComponentRef[*Callback](chasm.ExecutionKey{ + NamespaceID: "namespace-id", + BusinessID: "workflow-id", + RunID: "run-id", + }) + + // Create context with engine + ctx := chasm.NewEngineContext(context.Background(), mockEngine) + + // Execute the invocation task + task := &callbackspb.InvocationTask{Attempt: 1} + err = handler.Execute( + ctx, + ref, + chasm.TaskAttributes{}, + task, + ) + + tc.assertOutcome(t, callback, err) + }) + } +} diff --git a/chasm/lib/callback/validator.go b/chasm/lib/callback/validator.go new file mode 100644 index 00000000000..d9de4ea607b --- /dev/null +++ b/chasm/lib/callback/validator.go @@ -0,0 +1,84 @@ +package callback + +import ( + "context" + "fmt" + "strings" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/dynamicconfig" + "google.golang.org/grpc/status" +) + +// Validator validates completion callbacks attached to executions (workflows and standalone activities). +type Validator interface { + Validate(ctx context.Context, namespaceName string, cbs []*commonpb.Callback) error +} + +type validator struct { + maxCallbacksPerExecution dynamicconfig.IntPropertyFnWithNamespaceFilter + urlMaxLength dynamicconfig.IntPropertyFnWithNamespaceFilter + headerMaxSize dynamicconfig.IntPropertyFnWithNamespaceFilter + endpointRules dynamicconfig.TypedPropertyFnWithNamespaceFilter[AddressMatchRules] +} + +func NewValidator( + maxCallbacksPerExecution dynamicconfig.IntPropertyFnWithNamespaceFilter, + urlMaxLength dynamicconfig.IntPropertyFnWithNamespaceFilter, + headerMaxSize dynamicconfig.IntPropertyFnWithNamespaceFilter, + endpointRules dynamicconfig.TypedPropertyFnWithNamespaceFilter[AddressMatchRules], +) Validator { + return &validator{ + maxCallbacksPerExecution: maxCallbacksPerExecution, + urlMaxLength: urlMaxLength, + headerMaxSize: headerMaxSize, + endpointRules: endpointRules, + } +} + +// Validate validates completion callbacks: count, URL length, endpoint allowlist, header size, and normalizes header +// keys to lowercase. +func (v *validator) Validate(_ context.Context, namespaceName string, cbs []*commonpb.Callback) error { + if len(cbs) > v.maxCallbacksPerExecution(namespaceName) { + return serviceerror.NewInvalidArgumentf( + "cannot attach more than %d callbacks to an execution", v.maxCallbacksPerExecution(namespaceName), + ) + } + + for _, cb := range cbs { + switch variant := cb.GetVariant().(type) { + case *commonpb.Callback_Nexus_: + rawURL := variant.Nexus.GetUrl() + if len(rawURL) > v.urlMaxLength(namespaceName) { + return serviceerror.NewInvalidArgumentf( + "invalid url: url length longer than max length allowed of %d", v.urlMaxLength(namespaceName), + ) + } + if err := v.endpointRules(namespaceName).Validate(rawURL); err != nil { + if s, ok := status.FromError(err); ok { + return serviceerror.NewInvalidArgument(s.Message()) + } + return serviceerror.NewInvalidArgument(err.Error()) + } + + headerSize := 0 + lowerCaseHeaders := make(map[string]string, len(variant.Nexus.GetHeader())) + for k, val := range variant.Nexus.GetHeader() { + headerSize += len(k) + len(val) + lowerCaseHeaders[strings.ToLower(k)] = val + } + if headerSize > v.headerMaxSize(namespaceName) { + return serviceerror.NewInvalidArgumentf( + "invalid header: header size longer than max allowed size of %d", v.headerMaxSize(namespaceName), + ) + } + variant.Nexus.Header = lowerCaseHeaders + case *commonpb.Callback_Internal_: + continue + default: + return serviceerror.NewUnimplemented(fmt.Sprintf("unknown callback variant: %T", variant)) + } + } + return nil +} diff --git a/chasm/lib/callback/validator_test.go b/chasm/lib/callback/validator_test.go new file mode 100644 index 00000000000..63d95d87d96 --- /dev/null +++ b/chasm/lib/callback/validator_test.go @@ -0,0 +1,153 @@ +package callback + +import ( + "context" + "regexp" + "testing" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" +) + +func TestValidateCallbacks(t *testing.T) { + allowAll := AddressMatchRules{ + Rules: []AddressMatchRule{ + {Regexp: regexp.MustCompile(`.*`), AllowInsecure: true}, + }, + } + v := NewValidator( + func(string) int { return 10 }, + func(string) int { return 1000 }, + func(string) int { return 4096 }, + func(string) AddressMatchRules { return allowAll }, + ) + + t.Run("ValidNexusCallback", func(t *testing.T) { + cbs := []*commonpb.Callback{ + {Variant: &commonpb.Callback_Nexus_{ + Nexus: &commonpb.Callback_Nexus{ + Url: "http://localhost:8080/callback", + Header: map[string]string{"Content-Type": "application/json"}, + }, + }}, + } + err := v.Validate(context.Background(), "ns", cbs) + require.NoError(t, err) + }) + + t.Run("TooManyCallbacks", func(t *testing.T) { + v := NewValidator( + func(string) int { return 1 }, + func(string) int { return 1000 }, + func(string) int { return 4096 }, + func(string) AddressMatchRules { return allowAll }, + ) + cbs := []*commonpb.Callback{ + {Variant: &commonpb.Callback_Nexus_{Nexus: &commonpb.Callback_Nexus{Url: "http://localhost/cb1"}}}, + {Variant: &commonpb.Callback_Nexus_{Nexus: &commonpb.Callback_Nexus{Url: "http://localhost/cb2"}}}, + } + err := v.Validate(context.Background(), "ns", cbs) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), "cannot attach more than 1 callbacks") + }) + + t.Run("URLTooLong", func(t *testing.T) { + v := NewValidator( + func(string) int { return 10 }, + func(string) int { return 50 }, + func(string) int { return 4096 }, + func(string) AddressMatchRules { return allowAll }, + ) + cbs := []*commonpb.Callback{ + {Variant: &commonpb.Callback_Nexus_{ + Nexus: &commonpb.Callback_Nexus{ + Url: "http://localhost/" + string(make([]byte, 51)), + }, + }}, + } + err := v.Validate(context.Background(), "ns", cbs) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), "url length longer than max length allowed") + }) + + t.Run("HeaderTooLarge", func(t *testing.T) { + cbs := []*commonpb.Callback{ + {Variant: &commonpb.Callback_Nexus_{ + Nexus: &commonpb.Callback_Nexus{ + Url: "http://localhost:8080/callback", + Header: map[string]string{"X-Large": string(make([]byte, 5000))}, + }, + }}, + } + err := v.Validate(context.Background(), "ns", cbs) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), "header size longer than max allowed size") + }) + + t.Run("HeaderKeysNormalizedToLowercase", func(t *testing.T) { + cbs := []*commonpb.Callback{ + {Variant: &commonpb.Callback_Nexus_{ + Nexus: &commonpb.Callback_Nexus{ + Url: "http://localhost:8080/callback", + Header: map[string]string{"Content-Type": "application/json", "X-Custom": "value"}, + }, + }}, + } + err := v.Validate(context.Background(), "ns", cbs) + require.NoError(t, err) + nexus := cbs[0].GetNexus() + require.Equal(t, "application/json", nexus.Header["content-type"]) + require.Equal(t, "value", nexus.Header["x-custom"]) + _, hasMixed := nexus.Header["Content-Type"] + require.False(t, hasMixed) + }) + + t.Run("URLNotInAllowlist", func(t *testing.T) { + v := NewValidator( + func(string) int { return 10 }, + func(string) int { return 1000 }, + func(string) int { return 4096 }, + func(string) AddressMatchRules { return AddressMatchRules{} }, + ) + cbs := []*commonpb.Callback{ + {Variant: &commonpb.Callback_Nexus_{ + Nexus: &commonpb.Callback_Nexus{ + Url: "http://localhost:8080/callback", + }, + }}, + } + err := v.Validate(context.Background(), "ns", cbs) + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), "does not match any configured callback address") + }) + + t.Run("UnsupportedVariant", func(t *testing.T) { + cbs := []*commonpb.Callback{ + {Variant: nil}, + } + err := v.Validate(context.Background(), "ns", cbs) + var unimplementedErr *serviceerror.Unimplemented + require.ErrorAs(t, err, &unimplementedErr) + require.Contains(t, err.Error(), "unknown callback variant") + }) + + t.Run("EmptyCallbacksNoError", func(t *testing.T) { + err := v.Validate(context.Background(), "ns", nil) + require.NoError(t, err) + }) + + t.Run("InternalCallbackSkipped", func(t *testing.T) { + cbs := []*commonpb.Callback{ + {Variant: &commonpb.Callback_Internal_{ + Internal: &commonpb.Callback_Internal{}, + }}, + } + err := v.Validate(context.Background(), "ns", cbs) + require.NoError(t, err) + }) +} diff --git a/chasm/lib/nexusoperation/cancellation.go b/chasm/lib/nexusoperation/cancellation.go new file mode 100644 index 00000000000..955ee40a716 --- /dev/null +++ b/chasm/lib/nexusoperation/cancellation.go @@ -0,0 +1,173 @@ +package nexusoperation + +import ( + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/backoff" +) + +var _ chasm.Component = (*Cancellation)(nil) +var _ chasm.StateMachine[nexusoperationpb.CancellationStatus] = (*Cancellation)(nil) + +// Cancellation is a CHASM component that represents a pending cancellation of a Nexus operation. +type Cancellation struct { + chasm.UnimplementedComponent + + // Persisted internal state + *nexusoperationpb.CancellationState + + // Operation is a pointer to the parent Operation component. + Operation chasm.ParentPtr[*Operation] +} + +func newCancellation(state *nexusoperationpb.CancellationState) *Cancellation { + return &Cancellation{CancellationState: state} +} + +// LifecycleState maps the cancellation's status to a CHASM lifecycle state. +func (c *Cancellation) LifecycleState(_ chasm.Context) chasm.LifecycleState { + switch c.Status { + case nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED: + return chasm.LifecycleStateCompleted + case nexusoperationpb.CANCELLATION_STATUS_FAILED, + nexusoperationpb.CANCELLATION_STATUS_TIMED_OUT: + return chasm.LifecycleStateFailed + default: + return chasm.LifecycleStateRunning + } +} + +// StateMachineState returns the current cancellation status. +func (c *Cancellation) StateMachineState() nexusoperationpb.CancellationStatus { + return c.Status +} + +// SetStateMachineState sets the cancellation status. +func (c *Cancellation) SetStateMachineState(status nexusoperationpb.CancellationStatus) { + c.Status = status +} + +// cancelArgs holds the arguments needed to cancel a Nexus operation. +type cancelArgs struct { + service string + operation string + token string + requestID string + endpointName string + endpointID string + currentTime time.Time + scheduledTime time.Time + startedTime time.Time + scheduleToCloseTimeout time.Duration + startToCloseTimeout time.Duration + headers map[string]string + payload *commonpb.Payload +} + +func (c *Cancellation) onCompleted(ctx chasm.MutableContext) error { + op := c.Operation.Get(ctx) + if store, ok := op.Store.TryGet(ctx); ok { + return store.OnNexusOperationCancellationCompleted(ctx, op) + } + return TransitionCancellationSucceeded.Apply(c, ctx, EventCancellationSucceeded{}) +} + +func (c *Cancellation) onFailed(ctx chasm.MutableContext, failure *failurepb.Failure) error { + op := c.Operation.Get(ctx) + if store, ok := op.Store.TryGet(ctx); ok { + return store.OnNexusOperationCancellationFailed(ctx, op, failure) + } + return TransitionCancellationFailed.Apply(c, ctx, EventCancellationFailed{ + Failure: failure, + }) +} + +// loadArgs loads the cancel arguments from the cancellation and its parent operation. +func (c *Cancellation) loadArgs( + ctx chasm.Context, + _ chasm.NoValue, +) (cancelArgs, error) { + op := c.Operation.Get(ctx) + + var invocationData InvocationData + if store, ok := op.Store.TryGet(ctx); ok { + var err error + invocationData, err = store.NexusOperationInvocationData(ctx, op) + if err != nil { + return cancelArgs{}, err + } + } else { + requestData := op.RequestData.Get(ctx) + invocationData = InvocationData{ + Input: requestData.GetInput(), + Header: requestData.GetNexusHeader(), + } + } + + return cancelArgs{ + service: op.GetService(), + operation: op.GetOperation(), + token: op.GetOperationToken(), + requestID: op.GetRequestId(), + endpointName: op.GetEndpoint(), + endpointID: op.GetEndpointId(), + currentTime: ctx.Now(c), + scheduledTime: op.GetScheduledTime().AsTime(), + startedTime: op.GetStartedTime().AsTime(), + scheduleToCloseTimeout: op.GetScheduleToCloseTimeout().AsDuration(), + startToCloseTimeout: op.GetStartToCloseTimeout().AsDuration(), + headers: invocationData.Header, + payload: invocationData.Input, + }, nil +} + +// saveCancellationResultInput is the input to the Cancellation.saveResult method. +type saveCancellationResultInput struct { + result cancellationResult + retryPolicy func() backoff.RetryPolicy +} + +// saveResult applies the outcome of a cancel operation call to the cancellation state machine. +func (c *Cancellation) saveResult( + ctx chasm.MutableContext, + input saveCancellationResultInput, +) (chasm.NoValue, error) { + switch r := input.result.(type) { + case cancellationResultOK: + return nil, c.onCompleted(ctx) + case cancellationResultFail: + return nil, c.onFailed(ctx, r.failure) + case cancellationResultRetry: + return nil, transitionCancellationAttemptFailed.Apply(c, ctx, EventCancellationAttemptFailed{ + Failure: r.failure, + RetryPolicy: input.retryPolicy(), + }) + default: + return nil, serviceerror.NewInternalf("cannot save cancellation result of type %T", r) + } +} + +func CancellationAPIState(status nexusoperationpb.CancellationStatus) enumspb.NexusOperationCancellationState { + switch status { + case nexusoperationpb.CANCELLATION_STATUS_SCHEDULED: + return enumspb.NEXUS_OPERATION_CANCELLATION_STATE_SCHEDULED + case nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF: + return enumspb.NEXUS_OPERATION_CANCELLATION_STATE_BACKING_OFF + case nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED: + return enumspb.NEXUS_OPERATION_CANCELLATION_STATE_SUCCEEDED + case nexusoperationpb.CANCELLATION_STATUS_FAILED: + return enumspb.NEXUS_OPERATION_CANCELLATION_STATE_FAILED + case nexusoperationpb.CANCELLATION_STATUS_TIMED_OUT: + return enumspb.NEXUS_OPERATION_CANCELLATION_STATE_TIMED_OUT + case nexusoperationpb.CANCELLATION_STATUS_BLOCKED: + return enumspb.NEXUS_OPERATION_CANCELLATION_STATE_BLOCKED + default: + return enumspb.NEXUS_OPERATION_CANCELLATION_STATE_UNSPECIFIED + } +} diff --git a/chasm/lib/nexusoperation/cancellation_statemachine.go b/chasm/lib/nexusoperation/cancellation_statemachine.go new file mode 100644 index 00000000000..26a160c647b --- /dev/null +++ b/chasm/lib/nexusoperation/cancellation_statemachine.go @@ -0,0 +1,124 @@ +package nexusoperation + +import ( + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/backoff" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// EventCancellationScheduled is triggered when cancellation is meant to be scheduled for the first time - immediately +// after it has been requested. +type EventCancellationScheduled struct { + // Destination is the endpoint name for the cancellation task. + // Must be provided by the caller because ParentPtr is not available during inline creation. + Destination string +} + +var TransitionCancellationScheduled = chasm.NewTransition( + []nexusoperationpb.CancellationStatus{nexusoperationpb.CANCELLATION_STATUS_UNSPECIFIED}, + nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + func(c *Cancellation, ctx chasm.MutableContext, event EventCancellationScheduled) error { + c.Attempt++ + + ctx.AddTask(c, chasm.TaskAttributes{ + Destination: event.Destination, + }, &nexusoperationpb.CancellationTask{ + Attempt: c.Attempt, + }) + + return nil + }, +) + +// EventCancellationRescheduled is triggered when cancellation is meant to be rescheduled after backing off from a +// previous attempt. +type EventCancellationRescheduled struct { +} + +var transitionCancellationRescheduled = chasm.NewTransition( + []nexusoperationpb.CancellationStatus{nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF}, + nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + func(c *Cancellation, ctx chasm.MutableContext, event EventCancellationRescheduled) error { + c.Attempt++ + c.NextAttemptScheduleTime = nil + + ctx.AddTask(c, chasm.TaskAttributes{ + Destination: c.Operation.Get(ctx).GetEndpoint(), + }, &nexusoperationpb.CancellationTask{ + Attempt: c.Attempt, + }) + + return nil + }, +) + +// EventCancellationAttemptFailed is triggered when a cancellation attempt is failed with a retryable error. +type EventCancellationAttemptFailed struct { + Failure *failurepb.Failure + RetryPolicy backoff.RetryPolicy +} + +var transitionCancellationAttemptFailed = chasm.NewTransition( + []nexusoperationpb.CancellationStatus{nexusoperationpb.CANCELLATION_STATUS_SCHEDULED}, + nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, + func(c *Cancellation, ctx chasm.MutableContext, event EventCancellationAttemptFailed) error { + currentTime := ctx.Now(c) + + c.LastAttemptCompleteTime = timestamppb.New(currentTime) + c.LastAttemptFailure = event.Failure + + nextDelay := event.RetryPolicy.ComputeNextDelay(0, int(c.Attempt), nil) + nextAttemptScheduleTime := currentTime.Add(nextDelay) + c.NextAttemptScheduleTime = timestamppb.New(nextAttemptScheduleTime) + + ctx.AddTask(c, chasm.TaskAttributes{ + ScheduledTime: nextAttemptScheduleTime, + }, &nexusoperationpb.CancellationBackoffTask{ + Attempt: c.Attempt, + }) + + return nil + }, +) + +// EventCancellationFailed is triggered when a cancellation attempt is failed with a non retryable error. +type EventCancellationFailed struct { + Failure *failurepb.Failure +} + +var TransitionCancellationFailed = chasm.NewTransition( + []nexusoperationpb.CancellationStatus{ + // We can immediately transition to failed since we don't know how to send a cancellation request for an + // unstarted operation. + // TODO: This doesn't seem to happen in either the HSM or CHASM implementations. + nexusoperationpb.CANCELLATION_STATUS_UNSPECIFIED, + nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + }, + nexusoperationpb.CANCELLATION_STATUS_FAILED, + func(c *Cancellation, ctx chasm.MutableContext, event EventCancellationFailed) error { + currentTime := ctx.Now(c) + c.LastAttemptCompleteTime = timestamppb.New(currentTime) + c.LastAttemptFailure = event.Failure + // Terminal state - no tasks to emit. + return nil + }, +) + +// EventCancellationSucceeded is triggered when a cancellation attempt succeeds. +type EventCancellationSucceeded struct { +} + +var TransitionCancellationSucceeded = chasm.NewTransition( + []nexusoperationpb.CancellationStatus{nexusoperationpb.CANCELLATION_STATUS_SCHEDULED}, + nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, + func(c *Cancellation, ctx chasm.MutableContext, event EventCancellationSucceeded) error { + currentTime := ctx.Now(c) + c.LastAttemptCompleteTime = timestamppb.New(currentTime) + c.LastAttemptFailure = nil + + // Terminal state - no tasks to emit. + return nil + }, +) diff --git a/chasm/lib/nexusoperation/cancellation_statemachine_test.go b/chasm/lib/nexusoperation/cancellation_statemachine_test.go new file mode 100644 index 00000000000..af637c38eb9 --- /dev/null +++ b/chasm/lib/nexusoperation/cancellation_statemachine_test.go @@ -0,0 +1,220 @@ +package nexusoperation + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/testing/protorequire" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestTransitionCancellationScheduled(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + cancellation := newCancellation(&nexusoperationpb.CancellationState{Status: nexusoperationpb.CANCELLATION_STATUS_UNSPECIFIED}) + + err := TransitionCancellationScheduled.Apply(cancellation, ctx, EventCancellationScheduled{ + Destination: "test-endpoint", + }) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, cancellation.Status) + require.Equal(t, int32(1), cancellation.Attempt) + + // Verify cancellation task with correct destination. + require.Len(t, ctx.Tasks, 1) + task := ctx.Tasks[0] + require.Equal(t, "test-endpoint", task.Attributes.Destination) + require.Empty(t, task.Attributes.ScheduledTime) + cancellationTask, ok := task.Payload.(*nexusoperationpb.CancellationTask) + require.True(t, ok, "expected CancellationTask") + require.Equal(t, int32(1), cancellationTask.Attempt) +} + +func TestTransitionCancellationAttemptFailed(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + retryDelay := 5 * time.Second + cancellation := newCancellation(&nexusoperationpb.CancellationState{Status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, Attempt: 2}) + failure := &failurepb.Failure{Message: "transient error"} + + err := transitionCancellationAttemptFailed.Apply(cancellation, ctx, EventCancellationAttemptFailed{ + Failure: failure, + RetryPolicy: backoff.NewConstantDelayRetryPolicy(retryDelay), + }) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, cancellation.Status) + require.Equal(t, defaultTime, cancellation.LastAttemptCompleteTime.AsTime()) + protorequire.ProtoEqual(t, failure, cancellation.LastAttemptFailure) + require.Equal(t, defaultTime.Add(retryDelay), cancellation.NextAttemptScheduleTime.AsTime()) + + // Verify backoff task. + require.Len(t, ctx.Tasks, 1) + backoffTask, ok := ctx.Tasks[0].Payload.(*nexusoperationpb.CancellationBackoffTask) + require.True(t, ok, "expected CancellationBackoffTask") + require.Equal(t, int32(2), backoffTask.Attempt) + require.Equal(t, defaultTime.Add(retryDelay), ctx.Tasks[0].Attributes.ScheduledTime) + require.Empty(t, ctx.Tasks[0].Attributes.Destination) +} + +func TestTransitionCancellationRescheduled(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + op := newTestOperation() + cancellation := newCancellation(&nexusoperationpb.CancellationState{ + Status: nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, + Attempt: 2, + NextAttemptScheduleTime: timestamppb.New(defaultTime.Add(time.Minute)), + }) + cancellation.Operation = chasm.NewMockParentPtr[*Operation](op) + + err := transitionCancellationRescheduled.Apply(cancellation, ctx, EventCancellationRescheduled{}) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, cancellation.Status) + require.Equal(t, int32(3), cancellation.Attempt) + require.Nil(t, cancellation.NextAttemptScheduleTime) + + // Verify cancellation task with correct destination. + require.Len(t, ctx.Tasks, 1) + task := ctx.Tasks[0] + require.Equal(t, "test-endpoint", task.Attributes.Destination) + require.Empty(t, task.Attributes.ScheduledTime) + cancellationTask, ok := task.Payload.(*nexusoperationpb.CancellationTask) + require.True(t, ok, "expected CancellationTask") + require.Equal(t, int32(3), cancellationTask.Attempt) +} + +func TestTransitionCancellationFailed(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + cancellation := newCancellation(&nexusoperationpb.CancellationState{Status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED}) + failure := &failurepb.Failure{Message: "permanent failure"} + + err := TransitionCancellationFailed.Apply(cancellation, ctx, EventCancellationFailed{ + Failure: failure, + }) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_FAILED, cancellation.Status) + require.Equal(t, defaultTime, cancellation.LastAttemptCompleteTime.AsTime()) + protorequire.ProtoEqual(t, failure, cancellation.LastAttemptFailure) + // Terminal state - no tasks. + require.Empty(t, ctx.Tasks) +} + +func TestTransitionCancellationSucceeded(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + cancellation := newCancellation(&nexusoperationpb.CancellationState{ + Status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + Attempt: 1, + LastAttemptFailure: &failurepb.Failure{Message: "previous attempt failed"}, + }) + + err := TransitionCancellationSucceeded.Apply(cancellation, ctx, EventCancellationSucceeded{}) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, cancellation.Status) + require.Equal(t, defaultTime, cancellation.LastAttemptCompleteTime.AsTime()) + // LastAttemptFailure should be cleared on success. + require.Nil(t, cancellation.LastAttemptFailure) + // Terminal state - no tasks. + require.Empty(t, ctx.Tasks) +} + +func TestCancellationLifecycleState(t *testing.T) { + testCases := []struct { + status nexusoperationpb.CancellationStatus + expected chasm.LifecycleState + }{ + {nexusoperationpb.CANCELLATION_STATUS_UNSPECIFIED, chasm.LifecycleStateRunning}, + {nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, chasm.LifecycleStateRunning}, + {nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, chasm.LifecycleStateRunning}, + {nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, chasm.LifecycleStateCompleted}, + {nexusoperationpb.CANCELLATION_STATUS_FAILED, chasm.LifecycleStateFailed}, + {nexusoperationpb.CANCELLATION_STATUS_TIMED_OUT, chasm.LifecycleStateFailed}, + } + + for _, tc := range testCases { + t.Run(tc.status.String(), func(t *testing.T) { + cancellation := newCancellation(&nexusoperationpb.CancellationState{Status: tc.status}) + require.Equal(t, tc.expected, cancellation.LifecycleState(nil)) + }) + } +} + +func TestCancellationStateMachineState(t *testing.T) { + cancellation := newCancellation(&nexusoperationpb.CancellationState{Status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED}) + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, cancellation.StateMachineState()) + + cancellation.SetStateMachineState(nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED) + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, cancellation.StateMachineState()) +} + +func TestCancellationFullLifecycle(t *testing.T) { + // Test a full lifecycle: scheduled → attempt failed → backing off → (rescheduled →) attempt failed → succeeded. + // We can't test Scheduled/Rescheduled transitions directly due to ParentPtr requirements, + // but we can test the retry and terminal transitions in sequence. + + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + cancellation := newCancellation(&nexusoperationpb.CancellationState{Status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, Attempt: 1}) + + // First attempt fails with retryable error. + retryPolicy := backoff.NewExponentialRetryPolicy(time.Second) + err := transitionCancellationAttemptFailed.Apply(cancellation, ctx, EventCancellationAttemptFailed{ + Failure: &failurepb.Failure{Message: "transient"}, + RetryPolicy: retryPolicy, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, cancellation.Status) + require.Len(t, ctx.Tasks, 1) + backoffTask, ok := ctx.Tasks[0].Payload.(*nexusoperationpb.CancellationBackoffTask) + require.True(t, ok) + require.Equal(t, int32(1), backoffTask.Attempt) + + // Simulate rescheduling by manually transitioning to SCHEDULED (since we can't call + // transitionCancellationRescheduled without ParentPtr). + cancellation.Status = nexusoperationpb.CANCELLATION_STATUS_SCHEDULED + cancellation.Attempt = 2 + cancellation.NextAttemptScheduleTime = nil + + // Second attempt succeeds. + ctx.Tasks = nil // Reset tasks. + err = TransitionCancellationSucceeded.Apply(cancellation, ctx, EventCancellationSucceeded{}) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, cancellation.Status) + require.Nil(t, cancellation.LastAttemptFailure) + require.Empty(t, ctx.Tasks) +} diff --git a/chasm/lib/nexusoperation/cancellation_tasks.go b/chasm/lib/nexusoperation/cancellation_tasks.go new file mode 100644 index 00000000000..893b3c1793c --- /dev/null +++ b/chasm/lib/nexusoperation/cancellation_tasks.go @@ -0,0 +1,230 @@ +package nexusoperation + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + queueserrors "go.temporal.io/server/service/history/queues/errors" + "go.uber.org/fx" +) + +type cancellationResult interface { + mustImplementCancellationResult() +} + +type cancellationResultOK struct{} + +func (cancellationResultOK) mustImplementCancellationResult() {} + +type cancellationResultFail struct { + failure *failurepb.Failure +} + +func (cancellationResultFail) mustImplementCancellationResult() {} + +type cancellationResultRetry struct { + failure *failurepb.Failure +} + +func (cancellationResultRetry) mustImplementCancellationResult() {} + +func newCancellationResult(callErr error) (cancellationResult, error) { + if callErr == nil { + return cancellationResultOK{}, nil + } + + if opTimeoutBelowMinErr, ok := errors.AsType[*operationTimeoutBelowMinError](callErr); ok { + failure := &failurepb.Failure{ + Message: "operation timed out before cancellation could be delivered", + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: opTimeoutBelowMinErr.timeoutType, + }, + }, + } + return cancellationResultFail{failure: failure}, nil + } + + failure, retryable, err := callErrorToFailure(callErr) + if err != nil { + return nil, err + } + if retryable { + return cancellationResultRetry{failure: failure}, nil + } + return cancellationResultFail{failure: failure}, nil +} + +type cancellationInvocationTaskHandlerOptions struct { + fx.In + + InvocationTaskHandlerOptions +} + +type cancellationInvocationTaskHandler struct { + chasm.SideEffectTaskHandlerBase[*nexusoperationpb.CancellationTask] + + nexusTaskHandlerBase +} + +func newCancellationInvocationTaskHandler(opts cancellationInvocationTaskHandlerOptions) *cancellationInvocationTaskHandler { + return &cancellationInvocationTaskHandler{ + nexusTaskHandlerBase: opts.toBase(), + } +} + +func (h *cancellationInvocationTaskHandler) Validate( + _ chasm.Context, + cancellation *Cancellation, + _ chasm.TaskAttributes, + task *nexusoperationpb.CancellationTask, +) (bool, error) { + return cancellation.Status == nexusoperationpb.CANCELLATION_STATUS_SCHEDULED && + cancellation.GetAttempt() == task.GetAttempt(), nil +} + +func (h *cancellationInvocationTaskHandler) Execute( + ctx context.Context, + cancelRef chasm.ComponentRef, + attrs chasm.TaskAttributes, + task *nexusoperationpb.CancellationTask, +) error { + ns, err := h.namespaceRegistry.GetNamespaceByID(namespace.ID(cancelRef.NamespaceID)) + if err != nil { + return serviceerror.NewNotFoundf("failed to get namespace by ID: %v", err) + } + + args, err := chasm.ReadComponent(ctx, cancelRef, (*Cancellation).loadArgs, nil) + if err != nil { + return err + } + + endpoint, err := h.lookupEndpoint(ctx, ns.ID(), args.endpointID, args.endpointName) + if err != nil { + if _, ok := errors.AsType[*serviceerror.NotFound](err); ok { + h.logger.Error("endpoint not found while processing cancellation invocation", tag.Error(err)) + handlerErr := nexus.NewHandlerErrorf(nexus.HandlerErrorTypeNotFound, "endpoint not registered") + return h.saveCancellationResult(ctx, cancelRef, handlerErr) + } + return err + } + + callTimeout := h.config.RequestTimeout(ns.Name().String(), attrs.Destination) + var timeoutType enumspb.TimeoutType + if args.startToCloseTimeout > 0 { + if t := args.startToCloseTimeout - args.currentTime.Sub(args.startedTime); t < callTimeout { + callTimeout = t + timeoutType = enumspb.TIMEOUT_TYPE_START_TO_CLOSE + } + } + if args.scheduleToCloseTimeout > 0 { + if t := args.scheduleToCloseTimeout - args.currentTime.Sub(args.scheduledTime); t < callTimeout { + callTimeout = t + timeoutType = enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE + } + } + + callCtx, cancel := h.setupCallContext(ctx, callTimeout) + defer cancel() + + inv, err := h.newInvocation( + callCtx, ns, endpoint, args.endpointName, args.service, + callTimeout, timeoutType, + invocationTraceContext{ + operationTag: "CancelOperation", + namespaceName: ns.Name().String(), + requestID: args.requestID, + operation: args.operation, + endpointName: args.endpointName, + workflowID: cancelRef.BusinessID, + runID: cancelRef.RunID, + attemptStart: args.currentTime.UTC(), + attempt: task.GetAttempt(), + }, + ) + if err != nil { + return fmt.Errorf("failed to construct invocation: %w", err) + } + startTime := time.Now() // nolint:forbidigo // Time can be used for timing metrics. + callErr := inv.Cancel(callCtx, args, nexus.CancelOperationOptions{Header: nexus.Header(args.headers)}) + failureSource := failureSourceFromContext(callCtx) + + h.recordCallOutcome(ns, endpoint, args.endpointName, "CancelOperation", cancelCallOutcomeTag(callCtx, callErr), callErr, time.Since(startTime), failureSource) + + saveErr := h.saveCancellationResult(ctx, cancelRef, callErr) + + if callErr != nil && isDestinationDown(callErr) { + saveErr = queueserrors.NewDestinationDownError(callErr.Error(), saveErr) + } + + return saveErr +} + +// saveCancellationResult saves the cancellation result by updating the cancellation component. +func (h *cancellationInvocationTaskHandler) saveCancellationResult( + ctx context.Context, + cancelRef chasm.ComponentRef, + callErr error, +) error { + result, err := newCancellationResult(callErr) + if err != nil { + return fmt.Errorf("failed to construct cancellation result: %w", err) + } + _, _, err = chasm.UpdateComponent( + ctx, + cancelRef, + (*Cancellation).saveResult, + saveCancellationResultInput{ + result: result, + retryPolicy: h.config.RetryPolicy, + }, + ) + return err +} + +type cancellationBackoffTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config + + metricsHandler metrics.Handler + logger log.Logger +} + +func newCancellationBackoffTaskHandler(opts commonTaskHandlerOptions) *cancellationBackoffTaskHandler { + return &cancellationBackoffTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + logger: opts.Logger, + } +} + +func (h *cancellationBackoffTaskHandler) Validate( + _ chasm.Context, + cancellation *Cancellation, + _ chasm.TaskAttributes, + task *nexusoperationpb.CancellationBackoffTask, +) (bool, error) { + isValid := cancellation.Status == nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF && cancellation.GetAttempt() == task.GetAttempt() + return isValid, nil +} + +func (h *cancellationBackoffTaskHandler) Execute( + ctx chasm.MutableContext, + cancellation *Cancellation, + _ chasm.TaskAttributes, + _ *nexusoperationpb.CancellationBackoffTask, +) error { + return transitionCancellationRescheduled.Apply(cancellation, ctx, EventCancellationRescheduled{}) +} diff --git a/chasm/lib/nexusoperation/cancellation_tasks_test.go b/chasm/lib/nexusoperation/cancellation_tasks_test.go new file mode 100644 index 00000000000..93e66c4b326 --- /dev/null +++ b/chasm/lib/nexusoperation/cancellation_tasks_test.go @@ -0,0 +1,753 @@ +package nexusoperation + +import ( + "cmp" + "context" + "testing" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/metrics/metricstest" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/nexus/nexustest" + "go.temporal.io/server/common/testing/protorequire" + queueserrors "go.temporal.io/server/service/history/queues/errors" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// cancellationTaskTestEnv holds the test infrastructure for cancellation task handler tests. +type cancellationTaskTestEnv struct { + t *testing.T + ctrl *gomock.Controller + handler *cancellationInvocationTaskHandler + op *Operation + cancellation *Cancellation + mockEngine *chasm.MockEngine + timeSource *clock.EventTimeSource +} + +func newCancellationTaskTestEnv( + t *testing.T, + op *Operation, + cancellation *Cancellation, + invocationData InvocationData, + endpointReg nexustest.FakeEndpointRegistry, + clientProvider ClientProvider, + metricsHandler metrics.Handler, + requestTimeout time.Duration, +) *cancellationTaskTestEnv { + t.Helper() + + ctrl := gomock.NewController(t) + timeSource := clock.NewEventTimeSource() + timeSource.Update(time.Now()) + + nsRegistry := namespace.NewMockRegistry(ctrl) + nsRegistry.EXPECT().GetNamespaceByID(namespace.ID("ns-id")).Return( + namespace.NewNamespaceForTest(&persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0), nil) + + handler := &cancellationInvocationTaskHandler{ + nexusTaskHandlerBase: nexusTaskHandlerBase{ + config: &Config{ + RequestTimeout: dynamicconfig.GetDurationPropertyFnFilteredByDestination(requestTimeout), + MinRequestTimeout: dynamicconfig.GetDurationPropertyFnFilteredByNamespace(time.Millisecond), + UseNewFailureWireFormat: dynamicconfig.GetBoolPropertyFnFilteredByNamespace(true), + RetryPolicy: dynamicconfig.GetTypedPropertyFn[backoff.RetryPolicy]( + backoff.NewExponentialRetryPolicy(time.Second), + ), + }, + namespaceRegistry: nsRegistry, + metricsHandler: metricsHandler, + logger: log.NewNoopLogger(), + clientProvider: clientProvider, + endpointRegistry: endpointReg, + }, + } + + // Wire mock parent pointers so Cancellation.loadArgs can traverse the tree. + mockStore := &mockStoreComponent{invocationData: invocationData} + op.Store = chasm.NewMockParentPtr[OperationStore](mockStore) + cancellation.Operation = chasm.NewMockParentPtr(op) + op.Cancellation = chasm.NewComponentField(nil, cancellation) + + mockEngine := chasm.NewMockEngine(ctrl) + + return &cancellationTaskTestEnv{ + t: t, + ctrl: ctrl, + handler: handler, + op: op, + cancellation: cancellation, + mockEngine: mockEngine, + timeSource: timeSource, + } +} + +func (e *cancellationTaskTestEnv) setupReadComponent() { + e.mockEngine.EXPECT().ReadComponent( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).DoAndReturn(func(_ context.Context, _ chasm.ComponentRef, readFn func(chasm.Context, chasm.Component) error, _ ...chasm.TransitionOption) error { + mockCtx := &chasm.MockContext{ + HandleNow: func(_ chasm.Component) time.Time { + return e.timeSource.Now() + }, + } + return readFn(mockCtx, e.cancellation) + }) +} + +func (e *cancellationTaskTestEnv) setupUpdateComponent() { + e.mockEngine.EXPECT().UpdateComponent( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).DoAndReturn(func(_ context.Context, _ chasm.ComponentRef, updateFn func(chasm.MutableContext, chasm.Component) error, _ ...chasm.TransitionOption) ([]byte, error) { + mockCtx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(_ chasm.Component) time.Time { + return e.timeSource.Now() + }, + }, + } + err := updateFn(mockCtx, e.cancellation) + return nil, err + }) +} + +func (e *cancellationTaskTestEnv) execute(task *nexusoperationpb.CancellationTask) error { + ref := chasm.NewComponentRef[*Cancellation](chasm.ExecutionKey{ + NamespaceID: "ns-id", + BusinessID: "wf-id", + RunID: "run-id", + }) + engineCtx := chasm.NewEngineContext(context.Background(), e.mockEngine) + return e.handler.Execute(engineCtx, ref, chasm.TaskAttributes{Destination: "endpoint"}, task) +} + +func TestCancellationInvocationTaskHandler_Validate(t *testing.T) { + testCases := []struct { + name string + status nexusoperationpb.CancellationStatus + cancelAttempt int32 + taskAttempt int32 + valid bool + }{ + { + name: "valid when scheduled and attempt matches", + status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + cancelAttempt: 1, + taskAttempt: 1, + valid: true, + }, + { + name: "invalid when scheduled but attempt mismatches", + status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + cancelAttempt: 2, + taskAttempt: 1, + valid: false, + }, + { + name: "invalid when backing off", + status: nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, + cancelAttempt: 1, + taskAttempt: 1, + valid: false, + }, + } + + handler := &cancellationInvocationTaskHandler{} + ctx := &chasm.MockContext{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c := newCancellation(&nexusoperationpb.CancellationState{ + Status: tc.status, + Attempt: tc.cancelAttempt, + }) + + valid, err := handler.Validate(ctx, c, chasm.TaskAttributes{}, &nexusoperationpb.CancellationTask{Attempt: tc.taskAttempt}) + require.NoError(t, err) + require.Equal(t, tc.valid, valid) + }) + } +} + +func TestCancellationBackoffTaskHandler_Validate(t *testing.T) { + testCases := []struct { + name string + status nexusoperationpb.CancellationStatus + attempt int32 + task *nexusoperationpb.CancellationBackoffTask + valid bool + }{ + { + name: "valid when backing off and attempt matches", + status: nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, + attempt: 2, + task: &nexusoperationpb.CancellationBackoffTask{Attempt: 2}, + valid: true, + }, + { + name: "invalid when backing off but attempt mismatches", + status: nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, + attempt: 2, + task: &nexusoperationpb.CancellationBackoffTask{Attempt: 1}, + valid: false, + }, + { + name: "invalid when scheduled", + status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + attempt: 1, + task: &nexusoperationpb.CancellationBackoffTask{Attempt: 1}, + valid: false, + }, + } + + handler := &cancellationBackoffTaskHandler{} + ctx := &chasm.MockContext{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + c := newCancellation(&nexusoperationpb.CancellationState{ + Status: tc.status, + Attempt: tc.attempt, + }) + + valid, err := handler.Validate(ctx, c, chasm.TaskAttributes{}, tc.task) + require.NoError(t, err) + require.Equal(t, tc.valid, valid) + }) + } +} + +func TestCancellationBackoffTaskHandler_Execute(t *testing.T) { + op := newTestOperation() + op.Status = nexusoperationpb.OPERATION_STATUS_STARTED + op.OperationToken = "op-token" + + cancellation := newCancellation(&nexusoperationpb.CancellationState{ + Status: nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, + Attempt: 2, + }) + cancellation.Operation = chasm.NewMockParentPtr(op) + + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + handler := &cancellationBackoffTaskHandler{} + err := handler.Execute(ctx, cancellation, chasm.TaskAttributes{}, &nexusoperationpb.CancellationBackoffTask{Attempt: 2}) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, cancellation.Status) + require.Equal(t, int32(3), cancellation.Attempt) + require.Len(t, ctx.Tasks, 1) + _, ok := ctx.Tasks[0].Payload.(*nexusoperationpb.CancellationTask) + require.True(t, ok, "expected CancellationTask") +} + +func TestCancellationLoadArgs_StandaloneFallsBackToRequestData(t *testing.T) { + now := time.Now().UTC() + input := &commonpb.Payload{Data: []byte("test-input")} + headers := map[string]string{"test-header": "test-value"} + + op := NewOperation(&nexusoperationpb.OperationState{ + Service: "test-service", + Operation: "test-operation", + OperationToken: "test-operation-token", + RequestId: "test-request-id", + Endpoint: "test-endpoint", + EndpointId: "test-endpoint-id", + ScheduledTime: timestamppb.New(now.Add(-2 * time.Minute)), + StartedTime: timestamppb.New(now.Add(-1 * time.Minute)), + ScheduleToCloseTimeout: durationpb.New(10 * time.Minute), + StartToCloseTimeout: durationpb.New(5 * time.Minute), + }) + op.RequestData = chasm.NewDataField(nil, &nexusoperationpb.OperationRequestData{ + Input: input, + NexusHeader: headers, + }) + + cancellation := newCancellation(&nexusoperationpb.CancellationState{}) + cancellation.Operation = chasm.NewMockParentPtr(op) + + args, err := cancellation.loadArgs(&chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return now }, + }, nil) + require.NoError(t, err) + protorequire.ProtoEqual(t, input, args.payload) + require.Equal(t, headers, args.headers) +} + +func TestCancellationInvocationTaskHandler_HTTP(t *testing.T) { + cases := []struct { + name string + header map[string]string + onCancelOperation func(ctx context.Context, service, operation, token string, options nexus.CancelOperationOptions) error + expectedMetricOutcome string + checkOutcome func(t *testing.T, c *Cancellation) + requestTimeout time.Duration + schedToCloseTimeout time.Duration + startToCloseTimeout time.Duration + destinationDown bool + endpointNotFound bool + }{ + { + name: "failure", + onCancelOperation: func(ctx context.Context, service, operation, token string, options nexus.CancelOperationOptions) error { + return &nexus.HandlerError{ + Type: nexus.HandlerErrorTypeInternal, + Message: "operation not found", + RetryBehavior: nexus.HandlerErrorRetryBehaviorNonRetryable, + } + }, + expectedMetricOutcome: "handler-error:INTERNAL", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_FAILED, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "operation not found", + FailureInfo: &failurepb.Failure_NexusHandlerFailureInfo{ + NexusHandlerFailureInfo: &failurepb.NexusHandlerFailureInfo{ + Type: string(nexus.HandlerErrorTypeInternal), + RetryBehavior: enumspb.NEXUS_HANDLER_ERROR_RETRY_BEHAVIOR_NON_RETRYABLE, + }, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "success", + onCancelOperation: func(ctx context.Context, service, operation, token string, options nexus.CancelOperationOptions) error { + return nil + }, + expectedMetricOutcome: "successful", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, c.Status) + require.Nil(t, c.LastAttemptFailure) + }, + }, + { + name: "success with headers", + header: map[string]string{"key": "value"}, + onCancelOperation: func(ctx context.Context, service, operation, token string, options nexus.CancelOperationOptions) error { + if options.Header["key"] != "value" { + return nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, `"key" header is not equal to "value"`) + } + return nil + }, + expectedMetricOutcome: "successful", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, c.Status) + require.Nil(t, c.LastAttemptFailure) + }, + }, + { + name: "transient error", + destinationDown: true, + onCancelOperation: func(ctx context.Context, service, operation, token string, options nexus.CancelOperationOptions) error { + return nexus.NewHandlerErrorf(nexus.HandlerErrorTypeInternal, "internal server error") + }, + expectedMetricOutcome: "handler-error:INTERNAL", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "internal server error", + FailureInfo: &failurepb.Failure_NexusHandlerFailureInfo{ + NexusHandlerFailureInfo: &failurepb.NexusHandlerFailureInfo{ + Type: string(nexus.HandlerErrorTypeInternal), + }, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "invocation timeout by request timeout", + requestTimeout: 2 * time.Millisecond, + destinationDown: true, + onCancelOperation: func(ctx context.Context, service, operation, token string, options nexus.CancelOperationOptions) error { + time.Sleep(time.Millisecond * 100) //nolint:forbidigo + return nil + }, + expectedMetricOutcome: "request-timeout", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "request timed out", + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{}, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "invocation timeout by ScheduleToCloseTimeout", + schedToCloseTimeout: 10 * time.Millisecond, + destinationDown: true, + onCancelOperation: func(ctx context.Context, service, operation, token string, options nexus.CancelOperationOptions) error { + time.Sleep(time.Millisecond * 100) //nolint:forbidigo + return nil + }, + expectedMetricOutcome: "request-timeout", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "request timed out", + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{}, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "invocation timeout by StartToCloseTimeout", + startToCloseTimeout: 10 * time.Millisecond, + destinationDown: true, + onCancelOperation: func(ctx context.Context, service, operation, token string, options nexus.CancelOperationOptions) error { + time.Sleep(time.Millisecond * 100) //nolint:forbidigo + return nil + }, + expectedMetricOutcome: "request-timeout", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "request timed out", + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{}, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "operation timeout by ScheduleToCloseTimeout", + schedToCloseTimeout: time.Microsecond, + onCancelOperation: nil, // Should not be called if the operation has timed out. + expectedMetricOutcome: "operation-timeout", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_FAILED, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "operation timed out before cancellation could be delivered", + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + }, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "operation timeout by StartToCloseTimeout", + startToCloseTimeout: time.Microsecond, + onCancelOperation: nil, // Should not be called if the operation has timed out. + expectedMetricOutcome: "operation-timeout", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_FAILED, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "operation timed out before cancellation could be delivered", + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "endpoint not found", + endpointNotFound: true, + requestTimeout: time.Hour, + onCancelOperation: nil, // Should not be called if the endpoint is not found. + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_FAILED, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "endpoint not registered", + FailureInfo: &failurepb.Failure_NexusHandlerFailureInfo{ + NexusHandlerFailureInfo: &failurepb.NexusHandlerFailureInfo{ + Type: string(nexus.HandlerErrorTypeNotFound), + }, + }, + }, c.LastAttemptFailure) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + listenAddr := nexustest.AllocListenAddress() + h := nexustest.Handler{} + h.OnCancelOperation = tc.onCancelOperation + nexustest.NewNexusServer(t, listenAddr, h) + + op := &Operation{ + OperationState: &nexusoperationpb.OperationState{ + Status: nexusoperationpb.OPERATION_STATUS_STARTED, + EndpointId: "endpoint-id", + Endpoint: "endpoint", + Service: "service", + Operation: "operation", + ScheduledTime: timestamppb.Now(), + StartedTime: timestamppb.Now(), + ScheduleToCloseTimeout: durationpb.New(tc.schedToCloseTimeout), + StartToCloseTimeout: durationpb.New(tc.startToCloseTimeout), + RequestId: "request-id", + OperationToken: "op-token", + Attempt: 1, + }, + } + cancellation := newCancellation(&nexusoperationpb.CancellationState{ + Status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + Attempt: 1, + }) + + endpointReg := nexustest.FakeEndpointRegistry{ + OnGetByID: func(ctx context.Context, endpointID string) (*persistencespb.NexusEndpointEntry, error) { + if tc.endpointNotFound { + return nil, serviceerror.NewNotFound("endpoint not found") + } + return endpointEntry, nil + }, + OnGetByName: func(ctx context.Context, namespaceID namespace.ID, endpointName string) (*persistencespb.NexusEndpointEntry, error) { + if tc.endpointNotFound { + return nil, serviceerror.NewNotFound("endpoint not found") + } + return endpointEntry, nil + }, + } + + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + clientProvider := func(ctx context.Context, namespaceID string, entry *persistencespb.NexusEndpointEntry, service string) (*nexusrpc.HTTPClient, error) { + return nexusrpc.NewHTTPClient(nexusrpc.HTTPClientOptions{ + BaseURL: "http://" + listenAddr, + Service: service, + Serializer: commonnexus.PayloadSerializer, + }) + } + + env := newCancellationTaskTestEnv(t, op, cancellation, + InvocationData{ + Header: tc.header, + }, + endpointReg, clientProvider, metricsHandler, cmp.Or(tc.requestTimeout, time.Hour)) + + env.setupReadComponent() + env.setupUpdateComponent() + + err := env.execute(&nexusoperationpb.CancellationTask{Attempt: 1}) + if tc.destinationDown { + var destinationDownErr *queueserrors.DestinationDownError + require.ErrorAs(t, err, &destinationDownErr) + } else { + require.NoError(t, err) + } + tc.checkOutcome(t, cancellation) + + if tc.expectedMetricOutcome != "" { + snap := capture.Snapshot() + counterRecordings := snap[OutboundRequestCounter.Name()] + require.Len(t, counterRecordings, 1) + require.Equal(t, int64(1), counterRecordings[0].Value) + require.Equal(t, "ns-name", counterRecordings[0].Tags["namespace"]) + require.Equal(t, "endpoint", counterRecordings[0].Tags["destination"]) + require.Equal(t, "CancelOperation", counterRecordings[0].Tags["method"]) + require.Equal(t, tc.expectedMetricOutcome, counterRecordings[0].Tags["outcome"]) + require.Equal(t, "_unknown_", counterRecordings[0].Tags["failure_source"]) + + timerRecordings := snap[OutboundRequestLatency.Name()] + require.Len(t, timerRecordings, 1) + require.Equal(t, tc.expectedMetricOutcome, timerRecordings[0].Tags["outcome"]) + } + }) + } +} + +// testCancelProcessor implements chasm.NexusOperationProcessor[string] for system endpoint tests. +type testCancelProcessor struct{} + +func (p *testCancelProcessor) ProcessInput( + _ chasm.NexusOperationProcessorContext, + _ string, +) (*chasm.NexusOperationProcessorResult, error) { + return &chasm.NexusOperationProcessorResult{ + RoutingKey: chasm.NexusOperationRoutingKeyRandom{}, + }, nil +} + +func TestCancellationInvocationTaskHandler_SystemEndpoint(t *testing.T) { + cases := []struct { + name string + setupHistoryClient func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient + registerProcessor bool + expectedMetricOutcome string + checkOutcome func(t *testing.T, c *Cancellation) + }{ + { + name: "success", + registerProcessor: true, + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().CancelNexusOperation(gomock.Any(), gomock.Any()). + Return(&historyservice.CancelNexusOperationResponse{}, nil) + return client + }, + expectedMetricOutcome: "successful", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, c.Status) + require.Nil(t, c.LastAttemptFailure) + }, + }, + { + name: "history service error - retryable", + registerProcessor: true, + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().CancelNexusOperation(gomock.Any(), gomock.Any()). + Return(nil, serviceerror.NewUnavailable("unavailable")) + return client + }, + expectedMetricOutcome: "service-error:Unavailable", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_BACKING_OFF, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "Unavailable: unavailable", + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{}, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "history service error - InvalidArgument", + registerProcessor: true, + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().CancelNexusOperation(gomock.Any(), gomock.Any()). + Return(nil, serviceerror.NewInvalidArgument("invalid")) + return client + }, + expectedMetricOutcome: "service-error:InvalidArgument", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_FAILED, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "InvalidArgument: invalid", + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{ + NonRetryable: true, + }, + }, + }, c.LastAttemptFailure) + }, + }, + { + name: "chasm processor error", + registerProcessor: false, + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + return historyservicemock.NewMockHistoryServiceClient(ctrl) + }, + expectedMetricOutcome: "operation-processor-failed", + checkOutcome: func(t *testing.T, c *Cancellation) { + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_FAILED, c.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: `service "service" not found`, + FailureInfo: &failurepb.Failure_NexusHandlerFailureInfo{ + NexusHandlerFailureInfo: &failurepb.NexusHandlerFailureInfo{ + Type: string(nexus.HandlerErrorTypeNotFound), + }, + }, + }, c.LastAttemptFailure) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + op := &Operation{ + OperationState: &nexusoperationpb.OperationState{ + Status: nexusoperationpb.OPERATION_STATUS_STARTED, + Endpoint: commonnexus.SystemEndpoint, + Service: "service", + Operation: "operation", + ScheduledTime: timestamppb.Now(), + StartedTime: timestamppb.Now(), + RequestId: "request-id", + OperationToken: "op-token", + Attempt: 1, + }, + } + cancellation := newCancellation(&nexusoperationpb.CancellationState{ + Status: nexusoperationpb.CANCELLATION_STATUS_SCHEDULED, + Attempt: 1, + }) + + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + env := newCancellationTaskTestEnv(t, op, cancellation, + InvocationData{Input: mustToPayload(t, "test")}, + nexustest.FakeEndpointRegistry{}, nil, metricsHandler, time.Hour) + + // Set up system endpoint dependencies. + historyClient := tc.setupHistoryClient(env.ctrl) + env.handler.historyClient = historyClient + env.handler.config.NumHistoryShards = 4 + + reg := chasm.NewRegistry(log.NewNoopLogger()) + if tc.registerProcessor { + serviceProc := chasm.NewNexusServiceProcessor("service") + serviceProc.MustRegisterOperation("operation", + chasm.NewRegisterableNexusOperationProcessor(&testCancelProcessor{})) + reg.NexusEndpointProcessor.MustRegisterServiceProcessor(serviceProc) + } + env.handler.chasmRegistry = reg + + env.setupReadComponent() + env.setupUpdateComponent() + + err := env.execute(&nexusoperationpb.CancellationTask{Attempt: 1}) + require.NoError(t, err) + + tc.checkOutcome(t, cancellation) + + snap := capture.Snapshot() + counterRecordings := snap[OutboundRequestCounter.Name()] + require.Len(t, counterRecordings, 1) + require.Equal(t, int64(1), counterRecordings[0].Value) + require.Equal(t, "ns-name", counterRecordings[0].Tags["namespace"]) + require.Equal(t, commonnexus.SystemEndpoint, counterRecordings[0].Tags["destination"]) + require.Equal(t, "CancelOperation", counterRecordings[0].Tags["method"]) + require.Equal(t, tc.expectedMetricOutcome, counterRecordings[0].Tags["outcome"]) + + timerRecordings := snap[OutboundRequestLatency.Name()] + require.Len(t, timerRecordings, 1) + require.Equal(t, tc.expectedMetricOutcome, timerRecordings[0].Tags["outcome"]) + }) + } +} diff --git a/chasm/lib/nexusoperation/config.go b/chasm/lib/nexusoperation/config.go new file mode 100644 index 00000000000..990d8dbef03 --- /dev/null +++ b/chasm/lib/nexusoperation/config.go @@ -0,0 +1,278 @@ +package nexusoperation + +import ( + "fmt" + "strings" + "text/template" + "time" + + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/rpc/interceptor" +) + +var LongPollTimeout = dynamicconfig.NewNamespaceDurationSetting( + "nexusoperation.longPollTimeout", + common.DefaultLongPollTimeout, + `Maximum timeout for nexus operation long-poll requests. Actual wait may be shorter to leave +longPollBuffer before the caller deadline.`, +) + +var LongPollBuffer = dynamicconfig.NewNamespaceDurationSetting( + "nexusoperation.longPollBuffer", + common.DefaultLongPollBuffer, + `A buffer used to adjust the nexus operation long-poll timeouts. + Specifically, nexus operation long-poll requests are timed out at a time which leaves at least the buffer's duration + remaining before the caller's deadline, if permitted by the caller's deadline.`, +) + +var Enabled = dynamicconfig.NewNamespaceBoolSetting( + "nexusoperation.enableStandalone", + false, + `Toggles standalone Nexus operation functionality on the server.`, +) + +var EnableChasmWorkflowOperations = dynamicconfig.NewNamespaceBoolSetting( + "nexusoperation.enableChasmWorkflowOperations", + false, + `Feature flag that controls whether the legacy HSM-based implementation (when flag is false; default) or the newer +CHASM-based implementation of Nexus will be used when scheduling new Nexus Operations.`, +) + +var RequestTimeout = dynamicconfig.NewDestinationDurationSetting( + "nexusoperation.request.timeout", + time.Second*10, + `Timeout for making a single nexus start or cancel request.`, +) + +var MinRequestTimeout = dynamicconfig.NewNamespaceDurationSetting( + "nexusoperation.limit.request.timeout.min", + time.Millisecond*1500, + `Minimum time remaining for a request to complete for the server to make RPCs. If the remaining request timeout is +less than this value, a non-retryable timeout error will be returned.`, +) + +var MinDispatchTaskTimeout = dynamicconfig.NewNamespaceDurationSetting( + "nexusoperation.limit.dispatch.task.timeout.min", + time.Second, + `Minimum time remaining for a request to be dispatched to the handler worker. If the remaining request timeout is less +than this value, a timeout error will be returned. Working in conjunction with MinRequestTimeout, both configs help +ensure that the server has enough time to complete a Nexus request.`, +) + +var MaxConcurrentOperationsPerWorkflow = dynamicconfig.NewNamespaceIntSetting( + "nexusoperation.limit.operation.concurrencyPerWorkflow.max", + 2000, + `Limits the maximum allowed concurrent Nexus Operations for a given workflow execution. Once the limit is reached, +ScheduleNexusOperation commands will be rejected.`, +) + +var MaxServiceNameLength = dynamicconfig.NewNamespaceIntSetting( + "nexusoperation.limit.service.name.length", + 1000, + `Limits the maximum allowed length for a Nexus Service name. ScheduleNexusOperation commands with a service name that +exceeds this limit will be rejected. Uses Go's len() function to determine the length.`, +) + +var MaxOperationNameLength = dynamicconfig.NewNamespaceIntSetting( + "nexusoperation.limit.operation.name.length", + 1000, + `Limits the maximum allowed length for a Nexus Operation name. ScheduleNexusOperation commands with an operation name +that exceeds this limit will be rejected. Uses Go's len() function to determine the length.`, +) + +var MaxOperationTokenLength = dynamicconfig.NewNamespaceIntSetting( + "nexusoperation.limit.operation.token.length", + 4096, + `Limits the maximum allowed length for a Nexus Operation token. Tokens returned via start responses or via async +completions that exceed this limit will be rejected. Uses Go's len() function to determine the length. +Leave this limit long enough to fit a workflow ID and namespace name plus padding at minimum since that's what the SDKs +use as the token.`, +) + +var MaxOperationHeaderSize = dynamicconfig.NewNamespaceIntSetting( + "nexusoperation.limit.header.size", + 8192, + `The maximum allowed header size for a Nexus Operation. +ScheduleNexusOperation commands with a "nexus_header" field that exceeds this limit will be rejected. +Uses Go's len() function on header keys and values to determine the total size.`, +) + +var DisallowedOperationHeaders = dynamicconfig.NewGlobalTypedSettingWithConverter( + "nexusoperation.disallowedHeaders", + func(in any) ([]string, error) { + keys, err := dynamicconfig.ConvertStructure[[]string](nil)(in) + if err != nil { + return nil, err + } + for i, k := range keys { + keys[i] = strings.ToLower(k) + } + return keys, nil + }, + []string{ + "request-timeout", + interceptor.DCRedirectionApiHeaderName, + interceptor.DCRedirectionContextHeaderName, + headers.CallerNameHeaderName, + headers.CallerTypeHeaderName, + headers.CallOriginHeaderName, + headers.PrincipalTypeHeaderName, + headers.PrincipalNameHeaderName, + }, + `Case insensitive list of disallowed header keys for Nexus Operations. ScheduleNexusOperation commands with a +"nexus_header" field that contains any of these disallowed keys will be rejected.`, +) + +var MaxOperationScheduleToCloseTimeout = dynamicconfig.NewNamespaceDurationSetting( + "nexusoperation.limit.scheduleToCloseTimeout", + 0, + `Maximum allowed duration of a Nexus Operation. ScheduleOperation commands that specify no schedule-to-close timeout +or a longer timeout than permitted will have their schedule-to-close timeout capped to this value. 0 implies no limit.`, +) + +var CallbackURLTemplate = dynamicconfig.NewGlobalTypedSettingWithConverter( + "nexusoperation.callback.endpoint.template", + func(in any) (*template.Template, error) { + s, ok := in.(string) + if !ok { + return nil, fmt.Errorf("invalid config type: %T for nexusoperation.callback.endpoint.template, expected string", in) + } + if s == "unset" { + return nil, nil + } + return template.New("NexusCallbackURL").Parse(s) + }, + nil, + `Controls the template for generating callback URLs included in Nexus operation requests, which are used to deliver +asynchronous completion for external endpoint targets. The template can be used to interpolate the {{.NamepaceName}} +and {{.NamespaceID}} parameters to construct a publicly accessible URL. +Must be set to call external endpoints.`, +) + +type RetryPolicyConfig struct { + InitialInterval time.Duration + MaxInterval time.Duration +} + +func (cfg RetryPolicyConfig) build() backoff.RetryPolicy { + return backoff.NewExponentialRetryPolicy(cfg.InitialInterval). + WithMaximumInterval(cfg.MaxInterval). + WithExpirationInterval(backoff.NoInterval) +} + +var defaultRetryPolicyConfig = RetryPolicyConfig{ + InitialInterval: time.Second, + MaxInterval: time.Hour, +} + +var RetryPolicy = dynamicconfig.NewGlobalTypedSettingWithConverter( + "nexusoperation.retryPolicy", + func(in any) (backoff.RetryPolicy, error) { + cfg, err := dynamicconfig.ConvertStructure(defaultRetryPolicyConfig)(in) + if err != nil { + return nil, err + } + return cfg.build(), nil + }, + defaultRetryPolicyConfig.build(), + `The retry policy for nexus StartOperation or CancelOperation requests for a given operation.`, +) + +var MetricTagConfiguration = dynamicconfig.NewGlobalTypedSetting( + "nexusoperation.metrics.tags", + NexusMetricTagConfig{}, + `Controls which metric tags are included with Nexus operation metrics. This configuration supports: +1. Service name tag - adds the Nexus service name as a metric dimension (IncludeServiceTag) +2. Operation name tag - adds the Nexus operation name as a metric dimension (IncludeOperationTag) +3. Header-based tags - maps values from request headers to metric tags (HeaderTagMappings) + +Note: default metric tags (like namespace, endpoint) are always included and not affected by this configuration. +Adding high-cardinality tags (like unique operation names) can significantly increase metric storage requirements and +query complexity. Consider the cardinality impact when enabling these tags.`, +) + +var UseSystemCallbackURL = dynamicconfig.NewGlobalBoolSetting( + "nexusoperation.useSystemCallbackURL", + true, + `Controls how the executor generates callback URLs for worker targets in Nexus Operations. +When true, uses the fixed system callback URL for all worker targets.`, +) + +var MaxReasonLength = dynamicconfig.NewNamespaceIntSetting( + "nexusoperation.limit.reasonLength", + 1000, + `Limits the maximum allowed length for a reason string in Nexus operation requests. +Uses Go's len() function to determine the length.`, +) + +var UseNewFailureWireFormat = dynamicconfig.NewNamespaceBoolSetting( + "nexusoperation.useNewFailureWireFormat", + true, + `Controls whether to use the new failure wire format via an HTTP header that is attached to StartOperation requests. +Added for safety. Defaults to true. Likely to be removed in future server versions.`, +) + +type Config struct { + Enabled dynamicconfig.BoolPropertyFnWithNamespaceFilter + EnableChasm dynamicconfig.BoolPropertyFnWithNamespaceFilter + EnableChasmNexusWorkflowOperations dynamicconfig.BoolPropertyFnWithNamespaceFilter + NumHistoryShards int32 + LongPollBuffer dynamicconfig.DurationPropertyFnWithNamespaceFilter + LongPollTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter + RequestTimeout dynamicconfig.DurationPropertyFnWithDestinationFilter + MinRequestTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter + MaxConcurrentOperationsPerWorkflow dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxServiceNameLength dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxOperationNameLength dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxOperationTokenLength dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxOperationHeaderSize dynamicconfig.IntPropertyFnWithNamespaceFilter + DisallowedOperationHeaders dynamicconfig.TypedPropertyFn[[]string] + MaxOperationScheduleToCloseTimeout dynamicconfig.DurationPropertyFnWithNamespaceFilter + PayloadSizeLimit dynamicconfig.IntPropertyFnWithNamespaceFilter + CallbackURLTemplate dynamicconfig.TypedPropertyFn[*template.Template] + UseSystemCallbackURL dynamicconfig.BoolPropertyFn + PayloadSizeLimitWarn dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxUserMetadataSummarySize dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxUserMetadataDetailsSize dynamicconfig.IntPropertyFnWithNamespaceFilter + UseNewFailureWireFormat dynamicconfig.BoolPropertyFnWithNamespaceFilter + RecordCancelRequestCompletionEvents dynamicconfig.BoolPropertyFn + VisibilityMaxPageSize dynamicconfig.IntPropertyFnWithNamespaceFilter + MaxIDLengthLimit dynamicconfig.IntPropertyFn + MaxReasonLength dynamicconfig.IntPropertyFnWithNamespaceFilter + RetryPolicy func() backoff.RetryPolicy +} + +func configProvider(dc *dynamicconfig.Collection, cfg *config.Persistence) *Config { + return &Config{ + Enabled: Enabled.Get(dc), + EnableChasm: dynamicconfig.EnableChasm.Get(dc), + EnableChasmNexusWorkflowOperations: EnableChasmWorkflowOperations.Get(dc), + NumHistoryShards: cfg.NumHistoryShards, + LongPollBuffer: LongPollBuffer.Get(dc), + LongPollTimeout: LongPollTimeout.Get(dc), + RequestTimeout: RequestTimeout.Get(dc), + MinRequestTimeout: MinRequestTimeout.Get(dc), + MaxConcurrentOperationsPerWorkflow: MaxConcurrentOperationsPerWorkflow.Get(dc), + MaxServiceNameLength: MaxServiceNameLength.Get(dc), + MaxOperationNameLength: MaxOperationNameLength.Get(dc), + MaxOperationTokenLength: MaxOperationTokenLength.Get(dc), + MaxOperationHeaderSize: MaxOperationHeaderSize.Get(dc), + DisallowedOperationHeaders: DisallowedOperationHeaders.Get(dc), + MaxOperationScheduleToCloseTimeout: MaxOperationScheduleToCloseTimeout.Get(dc), + PayloadSizeLimit: dynamicconfig.BlobSizeLimitError.Get(dc), + PayloadSizeLimitWarn: dynamicconfig.BlobSizeLimitWarn.Get(dc), + MaxUserMetadataSummarySize: dynamicconfig.MaxUserMetadataSummarySize.Get(dc), + MaxUserMetadataDetailsSize: dynamicconfig.MaxUserMetadataDetailsSize.Get(dc), + CallbackURLTemplate: CallbackURLTemplate.Get(dc), + UseSystemCallbackURL: UseSystemCallbackURL.Get(dc), + UseNewFailureWireFormat: UseNewFailureWireFormat.Get(dc), + VisibilityMaxPageSize: dynamicconfig.FrontendVisibilityMaxPageSize.Get(dc), + MaxIDLengthLimit: dynamicconfig.MaxIDLengthLimit.Get(dc), + MaxReasonLength: MaxReasonLength.Get(dc), + RetryPolicy: RetryPolicy.Get(dc), + } +} diff --git a/chasm/lib/nexusoperation/frontend.go b/chasm/lib/nexusoperation/frontend.go new file mode 100644 index 00000000000..d7e34c89983 --- /dev/null +++ b/chasm/lib/nexusoperation/frontend.go @@ -0,0 +1,325 @@ +package nexusoperation + +import ( + "context" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + nexuspb "go.temporal.io/api/nexus/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/searchattribute" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// FrontendHandler provides the frontend-facing API for standalone Nexus operations. +type FrontendHandler interface { + StartNexusOperationExecution(context.Context, *workflowservice.StartNexusOperationExecutionRequest) (*workflowservice.StartNexusOperationExecutionResponse, error) + DescribeNexusOperationExecution(context.Context, *workflowservice.DescribeNexusOperationExecutionRequest) (*workflowservice.DescribeNexusOperationExecutionResponse, error) + PollNexusOperationExecution(context.Context, *workflowservice.PollNexusOperationExecutionRequest) (*workflowservice.PollNexusOperationExecutionResponse, error) + ListNexusOperationExecutions(context.Context, *workflowservice.ListNexusOperationExecutionsRequest) (*workflowservice.ListNexusOperationExecutionsResponse, error) + CountNexusOperationExecutions(context.Context, *workflowservice.CountNexusOperationExecutionsRequest) (*workflowservice.CountNexusOperationExecutionsResponse, error) + RequestCancelNexusOperationExecution(context.Context, *workflowservice.RequestCancelNexusOperationExecutionRequest) (*workflowservice.RequestCancelNexusOperationExecutionResponse, error) + TerminateNexusOperationExecution(context.Context, *workflowservice.TerminateNexusOperationExecutionRequest) (*workflowservice.TerminateNexusOperationExecutionResponse, error) + DeleteNexusOperationExecution(context.Context, *workflowservice.DeleteNexusOperationExecutionRequest) (*workflowservice.DeleteNexusOperationExecutionResponse, error) +} + +var ErrStandaloneNexusOperationDisabled = serviceerror.NewUnimplemented("Standalone Nexus operation is disabled") + +type frontendHandler struct { + client nexusoperationpb.NexusOperationServiceClient + config *Config + logger log.Logger + namespaceRegistry namespace.Registry + endpointRegistry commonnexus.EndpointRegistry + saMapperProvider searchattribute.MapperProvider + saValidator *searchattribute.Validator +} + +func NewFrontendHandler( + client nexusoperationpb.NexusOperationServiceClient, + config *Config, + logger log.Logger, + namespaceRegistry namespace.Registry, + endpointRegistry commonnexus.EndpointRegistry, + saMapperProvider searchattribute.MapperProvider, + saValidator *searchattribute.Validator, +) FrontendHandler { + return &frontendHandler{ + client: client, + config: config, + logger: logger, + namespaceRegistry: namespaceRegistry, + endpointRegistry: endpointRegistry, + saMapperProvider: saMapperProvider, + saValidator: saValidator, + } +} + +func (h *frontendHandler) StartNexusOperationExecution( + ctx context.Context, + req *workflowservice.StartNexusOperationExecutionRequest, +) (*workflowservice.StartNexusOperationExecutionResponse, error) { + if !h.isStandaloneNexusOperationEnabled(req.GetNamespace()) { + return nil, ErrStandaloneNexusOperationDisabled + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + if err := validateAndNormalizeStartRequest(req, h.config, h.logger, h.saMapperProvider, h.saValidator); err != nil { + return nil, err + } + + // Verify the endpoint exists before creating the operation. + endpointEntry, err := h.endpointRegistry.GetByName(ctx, namespaceID, req.GetEndpoint()) + if err != nil { + return nil, err + } + + resp, err := h.client.StartNexusOperation(ctx, &nexusoperationpb.StartNexusOperationRequest{ + EndpointId: endpointEntry.GetId(), + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + return resp.GetFrontendResponse(), err +} + +func (h *frontendHandler) DescribeNexusOperationExecution( + ctx context.Context, + req *workflowservice.DescribeNexusOperationExecutionRequest, +) (*workflowservice.DescribeNexusOperationExecutionResponse, error) { + if !h.isStandaloneNexusOperationEnabled(req.GetNamespace()) { + return nil, ErrStandaloneNexusOperationDisabled + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + if err := validateAndNormalizeDescribeRequest(req, namespaceID.String(), h.config); err != nil { + return nil, err + } + + resp, err := h.client.DescribeNexusOperation(ctx, &nexusoperationpb.DescribeNexusOperationRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + return resp.GetFrontendResponse(), err +} + +// PollNexusOperationExecution long-polls for a Nexus operation to reach a specific stage. +func (h *frontendHandler) PollNexusOperationExecution( + ctx context.Context, + req *workflowservice.PollNexusOperationExecutionRequest, +) (*workflowservice.PollNexusOperationExecutionResponse, error) { + if !h.isStandaloneNexusOperationEnabled(req.GetNamespace()) { + return nil, ErrStandaloneNexusOperationDisabled + } + + if err := validateAndNormalizePollRequest(req, h.config); err != nil { + return nil, err + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + resp, err := h.client.PollNexusOperation(ctx, &nexusoperationpb.PollNexusOperationRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + return resp.GetFrontendResponse(), err +} + +func (h *frontendHandler) ListNexusOperationExecutions( + ctx context.Context, + req *workflowservice.ListNexusOperationExecutionsRequest, +) (*workflowservice.ListNexusOperationExecutionsResponse, error) { + if !h.isStandaloneNexusOperationEnabled(req.GetNamespace()) { + return nil, ErrStandaloneNexusOperationDisabled + } + + pageSize := req.GetPageSize() + maxPageSize := int32(h.config.VisibilityMaxPageSize(req.GetNamespace())) + if pageSize <= 0 || pageSize > maxPageSize { + pageSize = maxPageSize + } + + resp, err := chasm.ListExecutions[*Operation, *emptypb.Empty](ctx, &chasm.ListExecutionsRequest{ + NamespaceName: req.GetNamespace(), + PageSize: int(pageSize), + NextPageToken: req.GetNextPageToken(), + Query: req.GetQuery(), + }) + if err != nil { + return nil, err + } + + operations := make([]*nexuspb.NexusOperationExecutionListInfo, 0, len(resp.Executions)) + for _, exec := range resp.Executions { + endpoint, _ := chasm.SearchAttributeValue(exec.ChasmSearchAttributes, EndpointSearchAttribute) + service, _ := chasm.SearchAttributeValue(exec.ChasmSearchAttributes, ServiceSearchAttribute) + operation, _ := chasm.SearchAttributeValue(exec.ChasmSearchAttributes, OperationSearchAttribute) + statusStr, _ := chasm.SearchAttributeValue(exec.ChasmSearchAttributes, StatusSearchAttribute) + status, _ := enumspb.NexusOperationExecutionStatusFromString(statusStr) + + var closeTime *timestamppb.Timestamp + var executionDuration *durationpb.Duration + if !exec.CloseTime.IsZero() { + closeTime = timestamppb.New(exec.CloseTime) + if !exec.StartTime.IsZero() { + executionDuration = durationpb.New(exec.CloseTime.Sub(exec.StartTime)) + } + } + + operations = append(operations, &nexuspb.NexusOperationExecutionListInfo{ + OperationId: exec.BusinessID, + RunId: exec.RunID, + Endpoint: endpoint, + Service: service, + Operation: operation, + Status: status, + ScheduleTime: timestamppb.New(exec.StartTime), + CloseTime: closeTime, + ExecutionDuration: executionDuration, + StateTransitionCount: exec.StateTransitionCount, + SearchAttributes: &commonpb.SearchAttributes{IndexedFields: exec.CustomSearchAttributes}, + }) + } + + return &workflowservice.ListNexusOperationExecutionsResponse{ + Operations: operations, + NextPageToken: resp.NextPageToken, + }, nil +} + +func (h *frontendHandler) CountNexusOperationExecutions( + ctx context.Context, + req *workflowservice.CountNexusOperationExecutionsRequest, +) (*workflowservice.CountNexusOperationExecutionsResponse, error) { + if !h.isStandaloneNexusOperationEnabled(req.GetNamespace()) { + return nil, ErrStandaloneNexusOperationDisabled + } + + resp, err := chasm.CountExecutions[*Operation](ctx, &chasm.CountExecutionsRequest{ + NamespaceName: req.GetNamespace(), + Query: req.GetQuery(), + }) + if err != nil { + return nil, err + } + + groups := make([]*workflowservice.CountNexusOperationExecutionsResponse_AggregationGroup, 0, len(resp.Groups)) + for _, g := range resp.Groups { + groups = append(groups, &workflowservice.CountNexusOperationExecutionsResponse_AggregationGroup{ + GroupValues: g.Values, + Count: g.Count, + }) + } + + return &workflowservice.CountNexusOperationExecutionsResponse{ + Count: resp.Count, + Groups: groups, + }, nil +} + +func (h *frontendHandler) RequestCancelNexusOperationExecution( + ctx context.Context, + req *workflowservice.RequestCancelNexusOperationExecutionRequest, +) (*workflowservice.RequestCancelNexusOperationExecutionResponse, error) { + if !h.isStandaloneNexusOperationEnabled(req.GetNamespace()) { + return nil, ErrStandaloneNexusOperationDisabled + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + if err := validateAndNormalizeCancelRequest(req, h.config); err != nil { + return nil, err + } + + _, err = h.client.RequestCancelNexusOperation(ctx, &nexusoperationpb.RequestCancelNexusOperationRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + if err != nil { + return nil, err + } + + return &workflowservice.RequestCancelNexusOperationExecutionResponse{}, nil +} + +func (h *frontendHandler) TerminateNexusOperationExecution( + ctx context.Context, + req *workflowservice.TerminateNexusOperationExecutionRequest, +) (*workflowservice.TerminateNexusOperationExecutionResponse, error) { + if !h.isStandaloneNexusOperationEnabled(req.GetNamespace()) { + return nil, ErrStandaloneNexusOperationDisabled + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + if err := validateAndNormalizeTerminateRequest(req, h.config); err != nil { + return nil, err + } + + _, err = h.client.TerminateNexusOperation(ctx, &nexusoperationpb.TerminateNexusOperationRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + if err != nil { + return nil, err + } + + return &workflowservice.TerminateNexusOperationExecutionResponse{}, nil +} + +func (h *frontendHandler) DeleteNexusOperationExecution( + ctx context.Context, + req *workflowservice.DeleteNexusOperationExecutionRequest, +) (*workflowservice.DeleteNexusOperationExecutionResponse, error) { + if !h.isStandaloneNexusOperationEnabled(req.GetNamespace()) { + return nil, ErrStandaloneNexusOperationDisabled + } + + namespaceID, err := h.namespaceRegistry.GetNamespaceID(namespace.Name(req.GetNamespace())) + if err != nil { + return nil, err + } + + if err := validateAndNormalizeDeleteRequest(req, h.config); err != nil { + return nil, err + } + + _, err = h.client.DeleteNexusOperation(ctx, &nexusoperationpb.DeleteNexusOperationRequest{ + NamespaceId: namespaceID.String(), + FrontendRequest: req, + }) + if err != nil { + return nil, err + } + + return &workflowservice.DeleteNexusOperationExecutionResponse{}, nil +} + +// isStandaloneNexusOperationEnabled checks if standalone Nexus operations are enabled for the given namespace. +func (h *frontendHandler) isStandaloneNexusOperationEnabled(namespaceName string) bool { + return h.config.EnableChasm(namespaceName) && h.config.Enabled(namespaceName) +} diff --git a/chasm/lib/nexusoperation/fx.go b/chasm/lib/nexusoperation/fx.go new file mode 100644 index 00000000000..ec5caeb11ad --- /dev/null +++ b/chasm/lib/nexusoperation/fx.go @@ -0,0 +1,178 @@ +package nexusoperation + +import ( + "context" + "fmt" + "net/http" + + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/collection" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/rpc" + "go.uber.org/fx" +) + +const nexusCallbackSourceHeader = "Nexus-Callback-Source" + +var Module = fx.Module( + "chasm.lib.nexusoperation", + fx.Provide(configProvider), + fx.Provide(commonnexus.NewCallbackTokenGenerator), + fx.Provide(endpointRegistryProvider), + fx.Invoke(endpointRegistryLifetimeHooks), + fx.Provide(defaultNexusTransportProvider), + fx.Provide(clientProviderFactory), + fx.Provide(newHandler), + fx.Provide(newCancellationBackoffTaskHandler), + fx.Provide(newCancellationInvocationTaskHandler), + fx.Provide(newOperationBackoffTaskHandler), + fx.Provide(newOperationInvocationTaskHandler), + fx.Provide(newOperationScheduleToCloseTimeoutTaskHandler), + fx.Provide(newOperationScheduleToStartTimeoutTaskHandler), + fx.Provide(newOperationStartToCloseTimeoutTaskHandler), + fx.Provide(newLibrary), + fx.Invoke(register), +) + +var FrontendModule = fx.Module( + "chasm.lib.nexusoperation.frontend", + fx.Provide(configProvider), + fx.Provide(nexusoperationpb.NewNexusOperationServiceLayeredClient), + fx.Provide(NewFrontendHandler), + fx.Provide(newComponentOnlyLibrary), + fx.Invoke(func(l *componentOnlyLibrary, registry *chasm.Registry) error { + // Frontend needs to register the component in order to serialize ComponentRefs, but doesn't + // need task handlers. + return registry.Register(l) + }), +) + +func register( + registry *chasm.Registry, + library *Library, +) error { + return registry.Register(library) +} + +func endpointRegistryProvider( + matchingClient resource.MatchingClient, + endpointManager persistence.NexusEndpointManager, + dc *dynamicconfig.Collection, + logger log.Logger, + metricsHandler metrics.Handler, +) commonnexus.EndpointRegistry { + registryConfig := commonnexus.NewEndpointRegistryConfig(dc) + return commonnexus.NewEndpointRegistry( + registryConfig, + matchingClient, + endpointManager, + logger, + metricsHandler, + ) +} + +func endpointRegistryLifetimeHooks(lc fx.Lifecycle, registry commonnexus.EndpointRegistry) { + lc.Append(fx.StartStopHook(registry.StartLifecycle, registry.StopLifecycle)) +} + +// NexusTransportProvider allows customization of the HTTP transport used for Nexus requests. +type NexusTransportProvider func(namespaceID, serviceName string) http.RoundTripper + +func defaultNexusTransportProvider() NexusTransportProvider { + return func(namespaceID, serviceName string) http.RoundTripper { + return http.DefaultTransport + } +} + +// responseSizeLimiter wraps an http.RoundTripper to limit response body size. +type responseSizeLimiter struct { + rt http.RoundTripper +} + +func (r responseSizeLimiter) RoundTrip(request *http.Request) (*http.Response, error) { + response, err := r.rt.RoundTrip(request) + if err != nil { + return nil, err + } + response.Body = http.MaxBytesReader(nil, response.Body, rpc.MaxNexusAPIRequestBodyBytes) + return response, nil +} + +type clientProviderCacheKey struct { + namespaceID, endpointID string + url string +} + +func clientProviderFactory( + httpTransportProvider NexusTransportProvider, + clusterMetadata cluster.Metadata, + rpcFactory common.RPCFactory, +) (ClientProvider, error) { + cl, err := rpcFactory.CreateLocalFrontendHTTPClient() + if err != nil { + return nil, fmt.Errorf("cannot create local frontend HTTP client: %w", err) + } + var clusterID string + + if clusterInfo, ok := clusterMetadata.GetAllClusterInfo()[clusterMetadata.GetCurrentClusterName()]; ok { + clusterID = clusterInfo.ClusterID + } + m := collection.NewFallibleOnceMap(func(key clientProviderCacheKey) (*http.Client, error) { + transport := httpTransportProvider(key.namespaceID, key.endpointID) + return &http.Client{ + Transport: responseSizeLimiter{transport}, + }, nil + }) + + return func(ctx context.Context, namespaceID string, entry *persistencespb.NexusEndpointEntry, service string) (*nexusrpc.HTTPClient, error) { + var url string + var httpClient *http.Client + httpCaller := httpClient.Do + switch variant := entry.Endpoint.Spec.Target.Variant.(type) { + case *persistencespb.NexusEndpointTarget_External_: + url = variant.External.GetUrl() + var err error + httpClient, err = m.Get(clientProviderCacheKey{namespaceID, entry.Id, url}) + if err != nil { + return nil, err + } + if clusterID != "" { + httpCaller = func(r *http.Request) (*http.Response, error) { + resp, callErr := httpClient.Do(r) + commonnexus.SetFailureSourceOnContext(ctx, resp) + return resp, callErr + } + } + case *persistencespb.NexusEndpointTarget_Worker_: + url = cl.BaseURL() + "/" + commonnexus.RouteDispatchNexusTaskByEndpoint.Path(entry.Id) + httpClient = &cl.Client + if clusterID != "" { + httpCaller = func(r *http.Request) (*http.Response, error) { + r.Header.Set(nexusCallbackSourceHeader, clusterID) + resp, callErr := httpClient.Do(r) + commonnexus.SetFailureSourceOnContext(ctx, resp) + return resp, callErr + } + } + default: + return nil, serviceerror.NewInternal("got unexpected endpoint target") + } + return nexusrpc.NewHTTPClient(nexusrpc.HTTPClientOptions{ + BaseURL: url, + Service: service, + HTTPCaller: httpCaller, + Serializer: commonnexus.PayloadSerializer, + }) + }, nil +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/operation.go-helpers.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/operation.go-helpers.pb.go new file mode 100644 index 00000000000..f08af023576 --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/operation.go-helpers.pb.go @@ -0,0 +1,241 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package nexusoperationpb + +import ( + "fmt" + + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type OperationState to the protobuf v3 wire format +func (val *OperationState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type OperationState from the protobuf v3 wire format +func (val *OperationState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *OperationState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two OperationState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *OperationState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *OperationState + switch t := that.(type) { + case *OperationState: + that1 = t + case OperationState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type NexusOperationTerminateState to the protobuf v3 wire format +func (val *NexusOperationTerminateState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusOperationTerminateState from the protobuf v3 wire format +func (val *NexusOperationTerminateState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusOperationTerminateState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusOperationTerminateState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusOperationTerminateState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusOperationTerminateState + switch t := that.(type) { + case *NexusOperationTerminateState: + that1 = t + case NexusOperationTerminateState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type OperationOutcome to the protobuf v3 wire format +func (val *OperationOutcome) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type OperationOutcome from the protobuf v3 wire format +func (val *OperationOutcome) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *OperationOutcome) Size() int { + return proto.Size(val) +} + +// Equal returns whether two OperationOutcome values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *OperationOutcome) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *OperationOutcome + switch t := that.(type) { + case *OperationOutcome: + that1 = t + case OperationOutcome: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CancellationState to the protobuf v3 wire format +func (val *CancellationState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CancellationState from the protobuf v3 wire format +func (val *CancellationState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CancellationState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CancellationState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CancellationState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CancellationState + switch t := that.(type) { + case *CancellationState: + that1 = t + case CancellationState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type OperationRequestData to the protobuf v3 wire format +func (val *OperationRequestData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type OperationRequestData from the protobuf v3 wire format +func (val *OperationRequestData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *OperationRequestData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two OperationRequestData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *OperationRequestData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *OperationRequestData + switch t := that.(type) { + case *OperationRequestData: + that1 = t + case OperationRequestData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +var ( + OperationStatus_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Scheduled": 1, + "BackingOff": 2, + "Started": 3, + "Succeeded": 4, + "Failed": 5, + "Canceled": 6, + "TimedOut": 7, + "Terminated": 8, + } +) + +// OperationStatusFromString parses a OperationStatus value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to OperationStatus +func OperationStatusFromString(s string) (OperationStatus, error) { + if v, ok := OperationStatus_value[s]; ok { + return OperationStatus(v), nil + } else if v, ok := OperationStatus_shorthandValue[s]; ok { + return OperationStatus(v), nil + } + return OperationStatus(0), fmt.Errorf("%s is not a valid OperationStatus", s) +} + +var ( + CancellationStatus_shorthandValue = map[string]int32{ + "Unspecified": 0, + "Scheduled": 1, + "BackingOff": 2, + "Succeeded": 3, + "Failed": 4, + "TimedOut": 5, + "Blocked": 6, + } +) + +// CancellationStatusFromString parses a CancellationStatus value from either the protojson +// canonical SCREAMING_CASE enum or the traditional temporal PascalCase enum to CancellationStatus +func CancellationStatusFromString(s string) (CancellationStatus, error) { + if v, ok := CancellationStatus_value[s]; ok { + return CancellationStatus(v), nil + } else if v, ok := CancellationStatus_shorthandValue[s]; ok { + return CancellationStatus(v), nil + } + return CancellationStatus(0), fmt.Errorf("%s is not a valid CancellationStatus", s) +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/operation.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/operation.pb.go new file mode 100644 index 00000000000..4156df1d5ec --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/operation.pb.go @@ -0,0 +1,1027 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/nexusoperation/proto/v1/operation.proto + +package nexusoperationpb + +import ( + reflect "reflect" + "strconv" + sync "sync" + unsafe "unsafe" + + v11 "go.temporal.io/api/common/v1" + v1 "go.temporal.io/api/failure/v1" + v12 "go.temporal.io/api/sdk/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type OperationStatus int32 + +const ( + // Default value, unspecified status. + OPERATION_STATUS_UNSPECIFIED OperationStatus = 0 + // Operation is in the queue waiting to be executed or is currently executing. + OPERATION_STATUS_SCHEDULED OperationStatus = 1 + // Operation has failed with a retryable error and is backing off before the next attempt. + OPERATION_STATUS_BACKING_OFF OperationStatus = 2 + // Operation was started and will complete asynchronously. + OPERATION_STATUS_STARTED OperationStatus = 3 + // Operation succeeded. + // This may happen either as a response to a start request or as reported via callback. + OPERATION_STATUS_SUCCEEDED OperationStatus = 4 + // Operation failed either when a start request encounters a non-retryable error or as reported via callback. + OPERATION_STATUS_FAILED OperationStatus = 5 + // Operation completed as canceled (may have not ever been delivered). + // This may happen either as a response to a start request or as reported via callback. + OPERATION_STATUS_CANCELED OperationStatus = 6 + // Operation timed out - exceeded the user supplied schedule-to-close timeout. + // Any attempts to complete the operation in this status will be ignored. + OPERATION_STATUS_TIMED_OUT OperationStatus = 7 + OPERATION_STATUS_TERMINATED OperationStatus = 8 +) + +// Enum value maps for OperationStatus. +var ( + OperationStatus_name = map[int32]string{ + 0: "OPERATION_STATUS_UNSPECIFIED", + 1: "OPERATION_STATUS_SCHEDULED", + 2: "OPERATION_STATUS_BACKING_OFF", + 3: "OPERATION_STATUS_STARTED", + 4: "OPERATION_STATUS_SUCCEEDED", + 5: "OPERATION_STATUS_FAILED", + 6: "OPERATION_STATUS_CANCELED", + 7: "OPERATION_STATUS_TIMED_OUT", + 8: "OPERATION_STATUS_TERMINATED", + } + OperationStatus_value = map[string]int32{ + "OPERATION_STATUS_UNSPECIFIED": 0, + "OPERATION_STATUS_SCHEDULED": 1, + "OPERATION_STATUS_BACKING_OFF": 2, + "OPERATION_STATUS_STARTED": 3, + "OPERATION_STATUS_SUCCEEDED": 4, + "OPERATION_STATUS_FAILED": 5, + "OPERATION_STATUS_CANCELED": 6, + "OPERATION_STATUS_TIMED_OUT": 7, + "OPERATION_STATUS_TERMINATED": 8, + } +) + +func (x OperationStatus) Enum() *OperationStatus { + p := new(OperationStatus) + *p = x + return p +} + +func (x OperationStatus) String() string { + switch x { + case OPERATION_STATUS_UNSPECIFIED: + return "Unspecified" + case OPERATION_STATUS_SCHEDULED: + return "Scheduled" + case OPERATION_STATUS_BACKING_OFF: + return "BackingOff" + case OPERATION_STATUS_STARTED: + return "Started" + case OPERATION_STATUS_SUCCEEDED: + return "Succeeded" + case OPERATION_STATUS_FAILED: + return "Failed" + case OPERATION_STATUS_CANCELED: + return "Canceled" + case OPERATION_STATUS_TIMED_OUT: + return "TimedOut" + case OPERATION_STATUS_TERMINATED: + return "Terminated" + default: + return strconv.Itoa(int(x)) + } + +} + +func (OperationStatus) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_enumTypes[0].Descriptor() +} + +func (OperationStatus) Type() protoreflect.EnumType { + return &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_enumTypes[0] +} + +func (x OperationStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use OperationStatus.Descriptor instead. +func (OperationStatus) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{0} +} + +type CancellationStatus int32 + +const ( + // Default value, unspecified status. + CANCELLATION_STATUS_UNSPECIFIED CancellationStatus = 0 + // Cancellation request is in the queue waiting to be executed or is currently executing. + CANCELLATION_STATUS_SCHEDULED CancellationStatus = 1 + // Cancellation request has failed with a retryable error and is backing off before the next attempt. + CANCELLATION_STATUS_BACKING_OFF CancellationStatus = 2 + // Cancellation request succeeded. + CANCELLATION_STATUS_SUCCEEDED CancellationStatus = 3 + // Cancellation request failed with a non-retryable error. + CANCELLATION_STATUS_FAILED CancellationStatus = 4 + // The associated operation timed out - exceeded the user supplied schedule-to-close timeout. + CANCELLATION_STATUS_TIMED_OUT CancellationStatus = 5 + // Cancellation request is blocked (eg: by circuit breaker). + CANCELLATION_STATUS_BLOCKED CancellationStatus = 6 +) + +// Enum value maps for CancellationStatus. +var ( + CancellationStatus_name = map[int32]string{ + 0: "CANCELLATION_STATUS_UNSPECIFIED", + 1: "CANCELLATION_STATUS_SCHEDULED", + 2: "CANCELLATION_STATUS_BACKING_OFF", + 3: "CANCELLATION_STATUS_SUCCEEDED", + 4: "CANCELLATION_STATUS_FAILED", + 5: "CANCELLATION_STATUS_TIMED_OUT", + 6: "CANCELLATION_STATUS_BLOCKED", + } + CancellationStatus_value = map[string]int32{ + "CANCELLATION_STATUS_UNSPECIFIED": 0, + "CANCELLATION_STATUS_SCHEDULED": 1, + "CANCELLATION_STATUS_BACKING_OFF": 2, + "CANCELLATION_STATUS_SUCCEEDED": 3, + "CANCELLATION_STATUS_FAILED": 4, + "CANCELLATION_STATUS_TIMED_OUT": 5, + "CANCELLATION_STATUS_BLOCKED": 6, + } +) + +func (x CancellationStatus) Enum() *CancellationStatus { + p := new(CancellationStatus) + *p = x + return p +} + +func (x CancellationStatus) String() string { + switch x { + case CANCELLATION_STATUS_UNSPECIFIED: + return "Unspecified" + case CANCELLATION_STATUS_SCHEDULED: + return "Scheduled" + case CANCELLATION_STATUS_BACKING_OFF: + return "BackingOff" + case CANCELLATION_STATUS_SUCCEEDED: + return "Succeeded" + case CANCELLATION_STATUS_FAILED: + return "Failed" + case CANCELLATION_STATUS_TIMED_OUT: + return "TimedOut" + case CANCELLATION_STATUS_BLOCKED: + return "Blocked" + default: + return strconv.Itoa(int(x)) + } + +} + +func (CancellationStatus) Descriptor() protoreflect.EnumDescriptor { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_enumTypes[1].Descriptor() +} + +func (CancellationStatus) Type() protoreflect.EnumType { + return &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_enumTypes[1] +} + +func (x CancellationStatus) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CancellationStatus.Descriptor instead. +func (CancellationStatus) EnumDescriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{1} +} + +type OperationState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Current status of the operation. + Status OperationStatus `protobuf:"varint,1,opt,name=status,proto3,enum=temporal.server.chasm.lib.nexusoperation.proto.v1.OperationStatus" json:"status,omitempty"` + // Endpoint ID - used internally to avoid failing requests when endpoint is renamed. + EndpointId string `protobuf:"bytes,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"` + // Endpoint name - resolved from the endpoint registry for this workflow's namespace. + Endpoint string `protobuf:"bytes,3,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + // Service name. + Service string `protobuf:"bytes,4,opt,name=service,proto3" json:"service,omitempty"` + // Operation name. + Operation string `protobuf:"bytes,5,opt,name=operation,proto3" json:"operation,omitempty"` + // The time when the operation was scheduled. + ScheduledTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=scheduled_time,json=scheduledTime,proto3" json:"scheduled_time,omitempty"` + // The time when the operation was started. Only set for asynchronous operations after a successful StartOperation + // call. Taken from the component time or the time reported in an async completion request, whichever happens first. + StartedTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=started_time,json=startedTime,proto3" json:"started_time,omitempty"` + // The time when the operation reached a terminal state. Taken from the component time or the time reported in an + // async completion request, whichever happens first. + ClosedTime *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=closed_time,json=closedTime,proto3" json:"closed_time,omitempty"` + // Schedule-to-start timeout for this operation. + ScheduleToStartTimeout *durationpb.Duration `protobuf:"bytes,9,opt,name=schedule_to_start_timeout,json=scheduleToStartTimeout,proto3" json:"schedule_to_start_timeout,omitempty"` + // Start-to-close timeout for this operation. + StartToCloseTimeout *durationpb.Duration `protobuf:"bytes,10,opt,name=start_to_close_timeout,json=startToCloseTimeout,proto3" json:"start_to_close_timeout,omitempty"` + // Schedule-to-close timeout for this operation. + ScheduleToCloseTimeout *durationpb.Duration `protobuf:"bytes,11,opt,name=schedule_to_close_timeout,json=scheduleToCloseTimeout,proto3" json:"schedule_to_close_timeout,omitempty"` + // Unique request ID allocated for all retry attempts of the StartOperation request. + RequestId string `protobuf:"bytes,12,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Opaque data injected by the parent (e.g. workflow) for its own bookkeeping. + // The operation component itself does not interpret this field. + ParentData *anypb.Any `protobuf:"bytes,13,opt,name=parent_data,json=parentData,proto3" json:"parent_data,omitempty"` + // The number of attempts made to deliver the start operation request. + // This number is approximate, it is incremented when a task is added to the history queue. + // In practice, there could be more attempts if a task is executed but fails to commit, or less attempts if a task was + // never executed. + Attempt int32 `protobuf:"varint,14,opt,name=attempt,proto3" json:"attempt,omitempty"` + // The time when the last attempt completed. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // The last attempt's failure, if any. + LastAttemptFailure *v1.Failure `protobuf:"bytes,16,opt,name=last_attempt_failure,json=lastAttemptFailure,proto3" json:"last_attempt_failure,omitempty"` + // The time when the next attempt is scheduled (only set when in BACKING_OFF state). + NextAttemptScheduleTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=next_attempt_schedule_time,json=nextAttemptScheduleTime,proto3" json:"next_attempt_schedule_time,omitempty"` + // Operation token - only set for asynchronous operations after a successful StartOperation call. + OperationToken string `protobuf:"bytes,18,opt,name=operation_token,json=operationToken,proto3" json:"operation_token,omitempty"` + // Explicit terminate request state for standalone operations. + TerminateState *NexusOperationTerminateState `protobuf:"bytes,19,opt,name=terminate_state,json=terminateState,proto3" json:"terminate_state,omitempty"` + // Links are only populated for standalone operations. Workflow-backed operations derive links from history events. + Links []*v11.Link `protobuf:"bytes,20,rep,name=links,proto3" json:"links,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OperationState) Reset() { + *x = OperationState{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OperationState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OperationState) ProtoMessage() {} + +func (x *OperationState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OperationState.ProtoReflect.Descriptor instead. +func (*OperationState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{0} +} + +func (x *OperationState) GetStatus() OperationStatus { + if x != nil { + return x.Status + } + return OPERATION_STATUS_UNSPECIFIED +} + +func (x *OperationState) GetEndpointId() string { + if x != nil { + return x.EndpointId + } + return "" +} + +func (x *OperationState) GetEndpoint() string { + if x != nil { + return x.Endpoint + } + return "" +} + +func (x *OperationState) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *OperationState) GetOperation() string { + if x != nil { + return x.Operation + } + return "" +} + +func (x *OperationState) GetScheduledTime() *timestamppb.Timestamp { + if x != nil { + return x.ScheduledTime + } + return nil +} + +func (x *OperationState) GetStartedTime() *timestamppb.Timestamp { + if x != nil { + return x.StartedTime + } + return nil +} + +func (x *OperationState) GetClosedTime() *timestamppb.Timestamp { + if x != nil { + return x.ClosedTime + } + return nil +} + +func (x *OperationState) GetScheduleToStartTimeout() *durationpb.Duration { + if x != nil { + return x.ScheduleToStartTimeout + } + return nil +} + +func (x *OperationState) GetStartToCloseTimeout() *durationpb.Duration { + if x != nil { + return x.StartToCloseTimeout + } + return nil +} + +func (x *OperationState) GetScheduleToCloseTimeout() *durationpb.Duration { + if x != nil { + return x.ScheduleToCloseTimeout + } + return nil +} + +func (x *OperationState) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *OperationState) GetParentData() *anypb.Any { + if x != nil { + return x.ParentData + } + return nil +} + +func (x *OperationState) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +func (x *OperationState) GetLastAttemptCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.LastAttemptCompleteTime + } + return nil +} + +func (x *OperationState) GetLastAttemptFailure() *v1.Failure { + if x != nil { + return x.LastAttemptFailure + } + return nil +} + +func (x *OperationState) GetNextAttemptScheduleTime() *timestamppb.Timestamp { + if x != nil { + return x.NextAttemptScheduleTime + } + return nil +} + +func (x *OperationState) GetOperationToken() string { + if x != nil { + return x.OperationToken + } + return "" +} + +func (x *OperationState) GetTerminateState() *NexusOperationTerminateState { + if x != nil { + return x.TerminateState + } + return nil +} + +func (x *OperationState) GetLinks() []*v11.Link { + if x != nil { + return x.Links + } + return nil +} + +type NexusOperationTerminateState struct { + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusOperationTerminateState) Reset() { + *x = NexusOperationTerminateState{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusOperationTerminateState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusOperationTerminateState) ProtoMessage() {} + +func (x *NexusOperationTerminateState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusOperationTerminateState.ProtoReflect.Descriptor instead. +func (*NexusOperationTerminateState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{1} +} + +func (x *NexusOperationTerminateState) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +type OperationOutcome struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Variant: + // + // *OperationOutcome_Successful_ + // *OperationOutcome_Failed_ + Variant isOperationOutcome_Variant `protobuf_oneof:"variant"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OperationOutcome) Reset() { + *x = OperationOutcome{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OperationOutcome) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OperationOutcome) ProtoMessage() {} + +func (x *OperationOutcome) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OperationOutcome.ProtoReflect.Descriptor instead. +func (*OperationOutcome) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{2} +} + +func (x *OperationOutcome) GetVariant() isOperationOutcome_Variant { + if x != nil { + return x.Variant + } + return nil +} + +func (x *OperationOutcome) GetSuccessful() *OperationOutcome_Successful { + if x != nil { + if x, ok := x.Variant.(*OperationOutcome_Successful_); ok { + return x.Successful + } + } + return nil +} + +func (x *OperationOutcome) GetFailed() *OperationOutcome_Failed { + if x != nil { + if x, ok := x.Variant.(*OperationOutcome_Failed_); ok { + return x.Failed + } + } + return nil +} + +type isOperationOutcome_Variant interface { + isOperationOutcome_Variant() +} + +type OperationOutcome_Successful_ struct { + Successful *OperationOutcome_Successful `protobuf:"bytes,1,opt,name=successful,proto3,oneof"` +} + +type OperationOutcome_Failed_ struct { + Failed *OperationOutcome_Failed `protobuf:"bytes,2,opt,name=failed,proto3,oneof"` +} + +func (*OperationOutcome_Successful_) isOperationOutcome_Variant() {} + +func (*OperationOutcome_Failed_) isOperationOutcome_Variant() {} + +type CancellationState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Current status of the cancellation request. + Status CancellationStatus `protobuf:"varint,1,opt,name=status,proto3,enum=temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationStatus" json:"status,omitempty"` + // The time when cancellation was requested. + RequestedTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=requested_time,json=requestedTime,proto3" json:"requested_time,omitempty"` + // The number of attempts made to deliver the cancel operation request. + // This number represents a minimum bound since the attempt is incremented after the request completes. + Attempt int32 `protobuf:"varint,3,opt,name=attempt,proto3" json:"attempt,omitempty"` + // The time when the last attempt completed. + LastAttemptCompleteTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_attempt_complete_time,json=lastAttemptCompleteTime,proto3" json:"last_attempt_complete_time,omitempty"` + // The last attempt's failure, if any. + LastAttemptFailure *v1.Failure `protobuf:"bytes,5,opt,name=last_attempt_failure,json=lastAttemptFailure,proto3" json:"last_attempt_failure,omitempty"` + // The time when the next attempt is scheduled (only set when in BACKING_OFF state). + NextAttemptScheduleTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=next_attempt_schedule_time,json=nextAttemptScheduleTime,proto3" json:"next_attempt_schedule_time,omitempty"` + // Opaque data injected by the parent (e.g. workflow) for its own bookkeeping. + // The cancellation component itself does not interpret this field. + ParentData *anypb.Any `protobuf:"bytes,7,opt,name=parent_data,json=parentData,proto3" json:"parent_data,omitempty"` + RequestId string `protobuf:"bytes,8,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Identity string `protobuf:"bytes,9,opt,name=identity,proto3" json:"identity,omitempty"` + Reason string `protobuf:"bytes,10,opt,name=reason,proto3" json:"reason,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CancellationState) Reset() { + *x = CancellationState{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CancellationState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancellationState) ProtoMessage() {} + +func (x *CancellationState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancellationState.ProtoReflect.Descriptor instead. +func (*CancellationState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{3} +} + +func (x *CancellationState) GetStatus() CancellationStatus { + if x != nil { + return x.Status + } + return CANCELLATION_STATUS_UNSPECIFIED +} + +func (x *CancellationState) GetRequestedTime() *timestamppb.Timestamp { + if x != nil { + return x.RequestedTime + } + return nil +} + +func (x *CancellationState) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +func (x *CancellationState) GetLastAttemptCompleteTime() *timestamppb.Timestamp { + if x != nil { + return x.LastAttemptCompleteTime + } + return nil +} + +func (x *CancellationState) GetLastAttemptFailure() *v1.Failure { + if x != nil { + return x.LastAttemptFailure + } + return nil +} + +func (x *CancellationState) GetNextAttemptScheduleTime() *timestamppb.Timestamp { + if x != nil { + return x.NextAttemptScheduleTime + } + return nil +} + +func (x *CancellationState) GetParentData() *anypb.Any { + if x != nil { + return x.ParentData + } + return nil +} + +func (x *CancellationState) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *CancellationState) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *CancellationState) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type OperationRequestData struct { + state protoimpl.MessageState `protogen:"open.v1"` + Input *v11.Payload `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"` + NexusHeader map[string]string `protobuf:"bytes,2,rep,name=nexus_header,json=nexusHeader,proto3" json:"nexus_header,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + UserMetadata *v12.UserMetadata `protobuf:"bytes,3,opt,name=user_metadata,json=userMetadata,proto3" json:"user_metadata,omitempty"` + Identity string `protobuf:"bytes,4,opt,name=identity,proto3" json:"identity,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OperationRequestData) Reset() { + *x = OperationRequestData{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OperationRequestData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OperationRequestData) ProtoMessage() {} + +func (x *OperationRequestData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OperationRequestData.ProtoReflect.Descriptor instead. +func (*OperationRequestData) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{4} +} + +func (x *OperationRequestData) GetInput() *v11.Payload { + if x != nil { + return x.Input + } + return nil +} + +func (x *OperationRequestData) GetNexusHeader() map[string]string { + if x != nil { + return x.NexusHeader + } + return nil +} + +func (x *OperationRequestData) GetUserMetadata() *v12.UserMetadata { + if x != nil { + return x.UserMetadata + } + return nil +} + +func (x *OperationRequestData) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +type OperationOutcome_Successful struct { + state protoimpl.MessageState `protogen:"open.v1"` + Result *v11.Payload `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OperationOutcome_Successful) Reset() { + *x = OperationOutcome_Successful{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OperationOutcome_Successful) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OperationOutcome_Successful) ProtoMessage() {} + +func (x *OperationOutcome_Successful) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OperationOutcome_Successful.ProtoReflect.Descriptor instead. +func (*OperationOutcome_Successful) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *OperationOutcome_Successful) GetResult() *v11.Payload { + if x != nil { + return x.Result + } + return nil +} + +type OperationOutcome_Failed struct { + state protoimpl.MessageState `protogen:"open.v1"` + Failure *v1.Failure `protobuf:"bytes,1,opt,name=failure,proto3" json:"failure,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *OperationOutcome_Failed) Reset() { + *x = OperationOutcome_Failed{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *OperationOutcome_Failed) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OperationOutcome_Failed) ProtoMessage() {} + +func (x *OperationOutcome_Failed) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OperationOutcome_Failed.ProtoReflect.Descriptor instead. +func (*OperationOutcome_Failed) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP(), []int{2, 1} +} + +func (x *OperationOutcome_Failed) GetFailure() *v1.Failure { + if x != nil { + return x.Failure + } + return nil +} + +var File_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDesc = "" + + "\n" + + "Atemporal/server/chasm/lib/nexusoperation/proto/v1/operation.proto\x121temporal.server.chasm.lib.nexusoperation.proto.v1\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a%temporal/api/failure/v1/message.proto\x1a'temporal/api/sdk/v1/user_metadata.proto\"\xe9\t\n" + + "\x0eOperationState\x12Z\n" + + "\x06status\x18\x01 \x01(\x0e2B.temporal.server.chasm.lib.nexusoperation.proto.v1.OperationStatusR\x06status\x12\x1f\n" + + "\vendpoint_id\x18\x02 \x01(\tR\n" + + "endpointId\x12\x1a\n" + + "\bendpoint\x18\x03 \x01(\tR\bendpoint\x12\x18\n" + + "\aservice\x18\x04 \x01(\tR\aservice\x12\x1c\n" + + "\toperation\x18\x05 \x01(\tR\toperation\x12A\n" + + "\x0escheduled_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\rscheduledTime\x12=\n" + + "\fstarted_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\vstartedTime\x12;\n" + + "\vclosed_time\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "closedTime\x12T\n" + + "\x19schedule_to_start_timeout\x18\t \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToStartTimeout\x12N\n" + + "\x16start_to_close_timeout\x18\n" + + " \x01(\v2\x19.google.protobuf.DurationR\x13startToCloseTimeout\x12T\n" + + "\x19schedule_to_close_timeout\x18\v \x01(\v2\x19.google.protobuf.DurationR\x16scheduleToCloseTimeout\x12\x1d\n" + + "\n" + + "request_id\x18\f \x01(\tR\trequestId\x125\n" + + "\vparent_data\x18\r \x01(\v2\x14.google.protobuf.AnyR\n" + + "parentData\x12\x18\n" + + "\aattempt\x18\x0e \x01(\x05R\aattempt\x12W\n" + + "\x1alast_attempt_complete_time\x18\x0f \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12R\n" + + "\x14last_attempt_failure\x18\x10 \x01(\v2 .temporal.api.failure.v1.FailureR\x12lastAttemptFailure\x12W\n" + + "\x1anext_attempt_schedule_time\x18\x11 \x01(\v2\x1a.google.protobuf.TimestampR\x17nextAttemptScheduleTime\x12'\n" + + "\x0foperation_token\x18\x12 \x01(\tR\x0eoperationToken\x12x\n" + + "\x0fterminate_state\x18\x13 \x01(\v2O.temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationTerminateStateR\x0eterminateState\x122\n" + + "\x05links\x18\x14 \x03(\v2\x1c.temporal.api.common.v1.LinkR\x05links\"=\n" + + "\x1cNexusOperationTerminateState\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\"\x82\x03\n" + + "\x10OperationOutcome\x12p\n" + + "\n" + + "successful\x18\x01 \x01(\v2N.temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.SuccessfulH\x00R\n" + + "successful\x12d\n" + + "\x06failed\x18\x02 \x01(\v2J.temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.FailedH\x00R\x06failed\x1aE\n" + + "\n" + + "Successful\x127\n" + + "\x06result\x18\x01 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\x06result\x1aD\n" + + "\x06Failed\x12:\n" + + "\afailure\x18\x01 \x01(\v2 .temporal.api.failure.v1.FailureR\afailureB\t\n" + + "\avariant\"\xdf\x04\n" + + "\x11CancellationState\x12]\n" + + "\x06status\x18\x01 \x01(\x0e2E.temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationStatusR\x06status\x12A\n" + + "\x0erequested_time\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\rrequestedTime\x12\x18\n" + + "\aattempt\x18\x03 \x01(\x05R\aattempt\x12W\n" + + "\x1alast_attempt_complete_time\x18\x04 \x01(\v2\x1a.google.protobuf.TimestampR\x17lastAttemptCompleteTime\x12R\n" + + "\x14last_attempt_failure\x18\x05 \x01(\v2 .temporal.api.failure.v1.FailureR\x12lastAttemptFailure\x12W\n" + + "\x1anext_attempt_schedule_time\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\x17nextAttemptScheduleTime\x125\n" + + "\vparent_data\x18\a \x01(\v2\x14.google.protobuf.AnyR\n" + + "parentData\x12\x1d\n" + + "\n" + + "request_id\x18\b \x01(\tR\trequestId\x12\x1a\n" + + "\bidentity\x18\t \x01(\tR\bidentity\x12\x16\n" + + "\x06reason\x18\n" + + " \x01(\tR\x06reason\"\xee\x02\n" + + "\x14OperationRequestData\x125\n" + + "\x05input\x18\x01 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\x05input\x12{\n" + + "\fnexus_header\x18\x02 \x03(\v2X.temporal.server.chasm.lib.nexusoperation.proto.v1.OperationRequestData.NexusHeaderEntryR\vnexusHeader\x12F\n" + + "\ruser_metadata\x18\x03 \x01(\v2!.temporal.api.sdk.v1.UserMetadataR\fuserMetadata\x12\x1a\n" + + "\bidentity\x18\x04 \x01(\tR\bidentity\x1a>\n" + + "\x10NexusHeaderEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01*\xb0\x02\n" + + "\x0fOperationStatus\x12 \n" + + "\x1cOPERATION_STATUS_UNSPECIFIED\x10\x00\x12\x1e\n" + + "\x1aOPERATION_STATUS_SCHEDULED\x10\x01\x12 \n" + + "\x1cOPERATION_STATUS_BACKING_OFF\x10\x02\x12\x1c\n" + + "\x18OPERATION_STATUS_STARTED\x10\x03\x12\x1e\n" + + "\x1aOPERATION_STATUS_SUCCEEDED\x10\x04\x12\x1b\n" + + "\x17OPERATION_STATUS_FAILED\x10\x05\x12\x1d\n" + + "\x19OPERATION_STATUS_CANCELED\x10\x06\x12\x1e\n" + + "\x1aOPERATION_STATUS_TIMED_OUT\x10\a\x12\x1f\n" + + "\x1bOPERATION_STATUS_TERMINATED\x10\b*\x88\x02\n" + + "\x12CancellationStatus\x12#\n" + + "\x1fCANCELLATION_STATUS_UNSPECIFIED\x10\x00\x12!\n" + + "\x1dCANCELLATION_STATUS_SCHEDULED\x10\x01\x12#\n" + + "\x1fCANCELLATION_STATUS_BACKING_OFF\x10\x02\x12!\n" + + "\x1dCANCELLATION_STATUS_SUCCEEDED\x10\x03\x12\x1e\n" + + "\x1aCANCELLATION_STATUS_FAILED\x10\x04\x12!\n" + + "\x1dCANCELLATION_STATUS_TIMED_OUT\x10\x05\x12\x1f\n" + + "\x1bCANCELLATION_STATUS_BLOCKED\x10\x06BVZTgo.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb;nexusoperationpbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDesc), len(file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDescData +} + +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_goTypes = []any{ + (OperationStatus)(0), // 0: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationStatus + (CancellationStatus)(0), // 1: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationStatus + (*OperationState)(nil), // 2: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState + (*NexusOperationTerminateState)(nil), // 3: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationTerminateState + (*OperationOutcome)(nil), // 4: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome + (*CancellationState)(nil), // 5: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationState + (*OperationRequestData)(nil), // 6: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationRequestData + (*OperationOutcome_Successful)(nil), // 7: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.Successful + (*OperationOutcome_Failed)(nil), // 8: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.Failed + nil, // 9: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationRequestData.NexusHeaderEntry + (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 11: google.protobuf.Duration + (*anypb.Any)(nil), // 12: google.protobuf.Any + (*v1.Failure)(nil), // 13: temporal.api.failure.v1.Failure + (*v11.Link)(nil), // 14: temporal.api.common.v1.Link + (*v11.Payload)(nil), // 15: temporal.api.common.v1.Payload + (*v12.UserMetadata)(nil), // 16: temporal.api.sdk.v1.UserMetadata +} +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_depIdxs = []int32{ + 0, // 0: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.status:type_name -> temporal.server.chasm.lib.nexusoperation.proto.v1.OperationStatus + 10, // 1: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.scheduled_time:type_name -> google.protobuf.Timestamp + 10, // 2: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.started_time:type_name -> google.protobuf.Timestamp + 10, // 3: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.closed_time:type_name -> google.protobuf.Timestamp + 11, // 4: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.schedule_to_start_timeout:type_name -> google.protobuf.Duration + 11, // 5: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.start_to_close_timeout:type_name -> google.protobuf.Duration + 11, // 6: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.schedule_to_close_timeout:type_name -> google.protobuf.Duration + 12, // 7: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.parent_data:type_name -> google.protobuf.Any + 10, // 8: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 13, // 9: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.last_attempt_failure:type_name -> temporal.api.failure.v1.Failure + 10, // 10: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.next_attempt_schedule_time:type_name -> google.protobuf.Timestamp + 3, // 11: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.terminate_state:type_name -> temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationTerminateState + 14, // 12: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationState.links:type_name -> temporal.api.common.v1.Link + 7, // 13: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.successful:type_name -> temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.Successful + 8, // 14: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.failed:type_name -> temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.Failed + 1, // 15: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationState.status:type_name -> temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationStatus + 10, // 16: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationState.requested_time:type_name -> google.protobuf.Timestamp + 10, // 17: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationState.last_attempt_complete_time:type_name -> google.protobuf.Timestamp + 13, // 18: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationState.last_attempt_failure:type_name -> temporal.api.failure.v1.Failure + 10, // 19: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationState.next_attempt_schedule_time:type_name -> google.protobuf.Timestamp + 12, // 20: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationState.parent_data:type_name -> google.protobuf.Any + 15, // 21: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationRequestData.input:type_name -> temporal.api.common.v1.Payload + 9, // 22: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationRequestData.nexus_header:type_name -> temporal.server.chasm.lib.nexusoperation.proto.v1.OperationRequestData.NexusHeaderEntry + 16, // 23: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationRequestData.user_metadata:type_name -> temporal.api.sdk.v1.UserMetadata + 15, // 24: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.Successful.result:type_name -> temporal.api.common.v1.Payload + 13, // 25: temporal.server.chasm.lib.nexusoperation.proto.v1.OperationOutcome.Failed.failure:type_name -> temporal.api.failure.v1.Failure + 26, // [26:26] is the sub-list for method output_type + 26, // [26:26] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_init() } +func file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_init() { + if File_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto != nil { + return + } + file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes[2].OneofWrappers = []any{ + (*OperationOutcome_Successful_)(nil), + (*OperationOutcome_Failed_)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDesc), len(file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_rawDesc)), + NumEnums: 2, + NumMessages: 8, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_depIdxs, + EnumInfos: file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_enumTypes, + MessageInfos: file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto = out.File + file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_goTypes = nil + file_temporal_server_chasm_lib_nexusoperation_proto_v1_operation_proto_depIdxs = nil +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/request_response.go-helpers.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/request_response.go-helpers.pb.go new file mode 100644 index 00000000000..42bc21bf6d0 --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/request_response.go-helpers.pb.go @@ -0,0 +1,450 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package nexusoperationpb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type StartNexusOperationRequest to the protobuf v3 wire format +func (val *StartNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartNexusOperationRequest from the protobuf v3 wire format +func (val *StartNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartNexusOperationRequest + switch t := that.(type) { + case *StartNexusOperationRequest: + that1 = t + case StartNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartNexusOperationResponse to the protobuf v3 wire format +func (val *StartNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartNexusOperationResponse from the protobuf v3 wire format +func (val *StartNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartNexusOperationResponse + switch t := that.(type) { + case *StartNexusOperationResponse: + that1 = t + case StartNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeNexusOperationRequest to the protobuf v3 wire format +func (val *DescribeNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeNexusOperationRequest from the protobuf v3 wire format +func (val *DescribeNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeNexusOperationRequest + switch t := that.(type) { + case *DescribeNexusOperationRequest: + that1 = t + case DescribeNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeNexusOperationResponse to the protobuf v3 wire format +func (val *DescribeNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeNexusOperationResponse from the protobuf v3 wire format +func (val *DescribeNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeNexusOperationResponse + switch t := that.(type) { + case *DescribeNexusOperationResponse: + that1 = t + case DescribeNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RequestCancelNexusOperationRequest to the protobuf v3 wire format +func (val *RequestCancelNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RequestCancelNexusOperationRequest from the protobuf v3 wire format +func (val *RequestCancelNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RequestCancelNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RequestCancelNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RequestCancelNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RequestCancelNexusOperationRequest + switch t := that.(type) { + case *RequestCancelNexusOperationRequest: + that1 = t + case RequestCancelNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type RequestCancelNexusOperationResponse to the protobuf v3 wire format +func (val *RequestCancelNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type RequestCancelNexusOperationResponse from the protobuf v3 wire format +func (val *RequestCancelNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *RequestCancelNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two RequestCancelNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *RequestCancelNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *RequestCancelNexusOperationResponse + switch t := that.(type) { + case *RequestCancelNexusOperationResponse: + that1 = t + case RequestCancelNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TerminateNexusOperationRequest to the protobuf v3 wire format +func (val *TerminateNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TerminateNexusOperationRequest from the protobuf v3 wire format +func (val *TerminateNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TerminateNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TerminateNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TerminateNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TerminateNexusOperationRequest + switch t := that.(type) { + case *TerminateNexusOperationRequest: + that1 = t + case TerminateNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TerminateNexusOperationResponse to the protobuf v3 wire format +func (val *TerminateNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TerminateNexusOperationResponse from the protobuf v3 wire format +func (val *TerminateNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TerminateNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TerminateNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TerminateNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TerminateNexusOperationResponse + switch t := that.(type) { + case *TerminateNexusOperationResponse: + that1 = t + case TerminateNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteNexusOperationRequest to the protobuf v3 wire format +func (val *DeleteNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteNexusOperationRequest from the protobuf v3 wire format +func (val *DeleteNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteNexusOperationRequest + switch t := that.(type) { + case *DeleteNexusOperationRequest: + that1 = t + case DeleteNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteNexusOperationResponse to the protobuf v3 wire format +func (val *DeleteNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteNexusOperationResponse from the protobuf v3 wire format +func (val *DeleteNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteNexusOperationResponse + switch t := that.(type) { + case *DeleteNexusOperationResponse: + that1 = t + case DeleteNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PollNexusOperationRequest to the protobuf v3 wire format +func (val *PollNexusOperationRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollNexusOperationRequest from the protobuf v3 wire format +func (val *PollNexusOperationRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollNexusOperationRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollNexusOperationRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollNexusOperationRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollNexusOperationRequest + switch t := that.(type) { + case *PollNexusOperationRequest: + that1 = t + case PollNexusOperationRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PollNexusOperationResponse to the protobuf v3 wire format +func (val *PollNexusOperationResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PollNexusOperationResponse from the protobuf v3 wire format +func (val *PollNexusOperationResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PollNexusOperationResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PollNexusOperationResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PollNexusOperationResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PollNexusOperationResponse + switch t := that.(type) { + case *PollNexusOperationResponse: + that1 = t + case PollNexusOperationResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/request_response.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/request_response.pb.go new file mode 100644 index 00000000000..91104136907 --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/request_response.pb.go @@ -0,0 +1,696 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/nexusoperation/proto/v1/request_response.proto + +package nexusoperationpb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/workflowservice/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StartNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + EndpointId string `protobuf:"bytes,2,opt,name=endpoint_id,json=endpointId,proto3" json:"endpoint_id,omitempty"` + FrontendRequest *v1.StartNexusOperationExecutionRequest `protobuf:"bytes,3,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartNexusOperationRequest) Reset() { + *x = StartNexusOperationRequest{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartNexusOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartNexusOperationRequest) ProtoMessage() {} + +func (x *StartNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*StartNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *StartNexusOperationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *StartNexusOperationRequest) GetEndpointId() string { + if x != nil { + return x.EndpointId + } + return "" +} + +func (x *StartNexusOperationRequest) GetFrontendRequest() *v1.StartNexusOperationExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type StartNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.StartNexusOperationExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartNexusOperationResponse) Reset() { + *x = StartNexusOperationResponse{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartNexusOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartNexusOperationResponse) ProtoMessage() {} + +func (x *StartNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*StartNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{1} +} + +func (x *StartNexusOperationResponse) GetFrontendResponse() *v1.StartNexusOperationExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type DescribeNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.DescribeNexusOperationExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeNexusOperationRequest) Reset() { + *x = DescribeNexusOperationRequest{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeNexusOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeNexusOperationRequest) ProtoMessage() {} + +func (x *DescribeNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*DescribeNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{2} +} + +func (x *DescribeNexusOperationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DescribeNexusOperationRequest) GetFrontendRequest() *v1.DescribeNexusOperationExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type DescribeNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.DescribeNexusOperationExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeNexusOperationResponse) Reset() { + *x = DescribeNexusOperationResponse{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeNexusOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeNexusOperationResponse) ProtoMessage() {} + +func (x *DescribeNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*DescribeNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{3} +} + +func (x *DescribeNexusOperationResponse) GetFrontendResponse() *v1.DescribeNexusOperationExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type RequestCancelNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.RequestCancelNexusOperationExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestCancelNexusOperationRequest) Reset() { + *x = RequestCancelNexusOperationRequest{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestCancelNexusOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestCancelNexusOperationRequest) ProtoMessage() {} + +func (x *RequestCancelNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestCancelNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*RequestCancelNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{4} +} + +func (x *RequestCancelNexusOperationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *RequestCancelNexusOperationRequest) GetFrontendRequest() *v1.RequestCancelNexusOperationExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type RequestCancelNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RequestCancelNexusOperationResponse) Reset() { + *x = RequestCancelNexusOperationResponse{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RequestCancelNexusOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RequestCancelNexusOperationResponse) ProtoMessage() {} + +func (x *RequestCancelNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RequestCancelNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*RequestCancelNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{5} +} + +type TerminateNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.TerminateNexusOperationExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TerminateNexusOperationRequest) Reset() { + *x = TerminateNexusOperationRequest{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TerminateNexusOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TerminateNexusOperationRequest) ProtoMessage() {} + +func (x *TerminateNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TerminateNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*TerminateNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{6} +} + +func (x *TerminateNexusOperationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *TerminateNexusOperationRequest) GetFrontendRequest() *v1.TerminateNexusOperationExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type TerminateNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TerminateNexusOperationResponse) Reset() { + *x = TerminateNexusOperationResponse{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TerminateNexusOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TerminateNexusOperationResponse) ProtoMessage() {} + +func (x *TerminateNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TerminateNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*TerminateNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{7} +} + +type DeleteNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.DeleteNexusOperationExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteNexusOperationRequest) Reset() { + *x = DeleteNexusOperationRequest{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteNexusOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteNexusOperationRequest) ProtoMessage() {} + +func (x *DeleteNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*DeleteNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{8} +} + +func (x *DeleteNexusOperationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DeleteNexusOperationRequest) GetFrontendRequest() *v1.DeleteNexusOperationExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type DeleteNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteNexusOperationResponse) Reset() { + *x = DeleteNexusOperationResponse{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteNexusOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteNexusOperationResponse) ProtoMessage() {} + +func (x *DeleteNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*DeleteNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{9} +} + +type PollNexusOperationRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.PollNexusOperationExecutionRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollNexusOperationRequest) Reset() { + *x = PollNexusOperationRequest{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollNexusOperationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollNexusOperationRequest) ProtoMessage() {} + +func (x *PollNexusOperationRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollNexusOperationRequest.ProtoReflect.Descriptor instead. +func (*PollNexusOperationRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{10} +} + +func (x *PollNexusOperationRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *PollNexusOperationRequest) GetFrontendRequest() *v1.PollNexusOperationExecutionRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type PollNexusOperationResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.PollNexusOperationExecutionResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PollNexusOperationResponse) Reset() { + *x = PollNexusOperationResponse{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PollNexusOperationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PollNexusOperationResponse) ProtoMessage() {} + +func (x *PollNexusOperationResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PollNexusOperationResponse.ProtoReflect.Descriptor instead. +func (*PollNexusOperationResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP(), []int{11} +} + +func (x *PollNexusOperationResponse) GetFrontendResponse() *v1.PollNexusOperationExecutionResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +var File_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDesc = "" + + "\n" + + "Htemporal/server/chasm/lib/nexusoperation/proto/v1/request_response.proto\x121temporal.server.chasm.lib.nexusoperation.proto.v1\x1a6temporal/api/workflowservice/v1/request_response.proto\"\xd1\x01\n" + + "\x1aStartNexusOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vendpoint_id\x18\x02 \x01(\tR\n" + + "endpointId\x12o\n" + + "\x10frontend_request\x18\x03 \x01(\v2D.temporal.api.workflowservice.v1.StartNexusOperationExecutionRequestR\x0ffrontendRequest\"\x91\x01\n" + + "\x1bStartNexusOperationResponse\x12r\n" + + "\x11frontend_response\x18\x01 \x01(\v2E.temporal.api.workflowservice.v1.StartNexusOperationExecutionResponseR\x10frontendResponse\"\xb6\x01\n" + + "\x1dDescribeNexusOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12r\n" + + "\x10frontend_request\x18\x02 \x01(\v2G.temporal.api.workflowservice.v1.DescribeNexusOperationExecutionRequestR\x0ffrontendRequest\"\x97\x01\n" + + "\x1eDescribeNexusOperationResponse\x12u\n" + + "\x11frontend_response\x18\x01 \x01(\v2H.temporal.api.workflowservice.v1.DescribeNexusOperationExecutionResponseR\x10frontendResponse\"\xc0\x01\n" + + "\"RequestCancelNexusOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12w\n" + + "\x10frontend_request\x18\x02 \x01(\v2L.temporal.api.workflowservice.v1.RequestCancelNexusOperationExecutionRequestR\x0ffrontendRequest\"%\n" + + "#RequestCancelNexusOperationResponse\"\xb8\x01\n" + + "\x1eTerminateNexusOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12s\n" + + "\x10frontend_request\x18\x02 \x01(\v2H.temporal.api.workflowservice.v1.TerminateNexusOperationExecutionRequestR\x0ffrontendRequest\"!\n" + + "\x1fTerminateNexusOperationResponse\"\xb2\x01\n" + + "\x1bDeleteNexusOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12p\n" + + "\x10frontend_request\x18\x02 \x01(\v2E.temporal.api.workflowservice.v1.DeleteNexusOperationExecutionRequestR\x0ffrontendRequest\"\x1e\n" + + "\x1cDeleteNexusOperationResponse\"\xae\x01\n" + + "\x19PollNexusOperationRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12n\n" + + "\x10frontend_request\x18\x02 \x01(\v2C.temporal.api.workflowservice.v1.PollNexusOperationExecutionRequestR\x0ffrontendRequest\"\x8f\x01\n" + + "\x1aPollNexusOperationResponse\x12q\n" + + "\x11frontend_response\x18\x01 \x01(\v2D.temporal.api.workflowservice.v1.PollNexusOperationExecutionResponseR\x10frontendResponseBVZTgo.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb;nexusoperationpbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDescData +} + +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 12) +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_goTypes = []any{ + (*StartNexusOperationRequest)(nil), // 0: temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationRequest + (*StartNexusOperationResponse)(nil), // 1: temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationResponse + (*DescribeNexusOperationRequest)(nil), // 2: temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationRequest + (*DescribeNexusOperationResponse)(nil), // 3: temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationResponse + (*RequestCancelNexusOperationRequest)(nil), // 4: temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationRequest + (*RequestCancelNexusOperationResponse)(nil), // 5: temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationResponse + (*TerminateNexusOperationRequest)(nil), // 6: temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationRequest + (*TerminateNexusOperationResponse)(nil), // 7: temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationResponse + (*DeleteNexusOperationRequest)(nil), // 8: temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationRequest + (*DeleteNexusOperationResponse)(nil), // 9: temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationResponse + (*PollNexusOperationRequest)(nil), // 10: temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationRequest + (*PollNexusOperationResponse)(nil), // 11: temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationResponse + (*v1.StartNexusOperationExecutionRequest)(nil), // 12: temporal.api.workflowservice.v1.StartNexusOperationExecutionRequest + (*v1.StartNexusOperationExecutionResponse)(nil), // 13: temporal.api.workflowservice.v1.StartNexusOperationExecutionResponse + (*v1.DescribeNexusOperationExecutionRequest)(nil), // 14: temporal.api.workflowservice.v1.DescribeNexusOperationExecutionRequest + (*v1.DescribeNexusOperationExecutionResponse)(nil), // 15: temporal.api.workflowservice.v1.DescribeNexusOperationExecutionResponse + (*v1.RequestCancelNexusOperationExecutionRequest)(nil), // 16: temporal.api.workflowservice.v1.RequestCancelNexusOperationExecutionRequest + (*v1.TerminateNexusOperationExecutionRequest)(nil), // 17: temporal.api.workflowservice.v1.TerminateNexusOperationExecutionRequest + (*v1.DeleteNexusOperationExecutionRequest)(nil), // 18: temporal.api.workflowservice.v1.DeleteNexusOperationExecutionRequest + (*v1.PollNexusOperationExecutionRequest)(nil), // 19: temporal.api.workflowservice.v1.PollNexusOperationExecutionRequest + (*v1.PollNexusOperationExecutionResponse)(nil), // 20: temporal.api.workflowservice.v1.PollNexusOperationExecutionResponse +} +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_depIdxs = []int32{ + 12, // 0: temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.StartNexusOperationExecutionRequest + 13, // 1: temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.StartNexusOperationExecutionResponse + 14, // 2: temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.DescribeNexusOperationExecutionRequest + 15, // 3: temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.DescribeNexusOperationExecutionResponse + 16, // 4: temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.RequestCancelNexusOperationExecutionRequest + 17, // 5: temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.TerminateNexusOperationExecutionRequest + 18, // 6: temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.DeleteNexusOperationExecutionRequest + 19, // 7: temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.PollNexusOperationExecutionRequest + 20, // 8: temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.PollNexusOperationExecutionResponse + 9, // [9:9] is the sub-list for method output_type + 9, // [9:9] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_init() } +func file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_init() { + if File_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 12, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto = out.File + file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_goTypes = nil + file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_depIdxs = nil +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service.pb.go new file mode 100644 index 00000000000..44b56e90792 --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service.pb.go @@ -0,0 +1,95 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/nexusoperation/proto/v1/service.proto + +package nexusoperationpb + +import ( + reflect "reflect" + unsafe "unsafe" + + _ "go.temporal.io/server/api/common/v1" + _ "go.temporal.io/server/api/routing/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_rawDesc = "" + + "\n" + + "?temporal/server/chasm/lib/nexusoperation/proto/v1/service.proto\x121temporal.server.chasm.lib.nexusoperation.proto.v1\x1aHtemporal/server/chasm/lib/nexusoperation/proto/v1/request_response.proto\x1a0temporal/server/api/common/v1/api_category.proto\x1a.temporal/server/api/routing/v1/extension.proto2\x90\v\n" + + "\x15NexusOperationService\x12\xdf\x01\n" + + "\x13StartNexusOperation\x12M.temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationRequest\x1aN.temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationResponse\")\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1f\x1a\x1dfrontend_request.operation_id\x12\xe8\x01\n" + + "\x16DescribeNexusOperation\x12P.temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationRequest\x1aQ.temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationResponse\")\x8a\xb5\x18\x02\b\x02\xd2\xc3\x18\x1f\x1a\x1dfrontend_request.operation_id\x12\xf7\x01\n" + + "\x1bRequestCancelNexusOperation\x12U.temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationRequest\x1aV.temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationResponse\")\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1f\x1a\x1dfrontend_request.operation_id\x12\xeb\x01\n" + + "\x17TerminateNexusOperation\x12Q.temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationRequest\x1aR.temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationResponse\")\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1f\x1a\x1dfrontend_request.operation_id\x12\xe2\x01\n" + + "\x14DeleteNexusOperation\x12N.temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationRequest\x1aO.temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationResponse\")\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1f\x1a\x1dfrontend_request.operation_id\x12\xdc\x01\n" + + "\x12PollNexusOperation\x12L.temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationRequest\x1aM.temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationResponse\")\x8a\xb5\x18\x02\b\x02\xd2\xc3\x18\x1f\x1a\x1dfrontend_request.operation_idBVZTgo.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb;nexusoperationpbb\x06proto3" + +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_goTypes = []any{ + (*StartNexusOperationRequest)(nil), // 0: temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationRequest + (*DescribeNexusOperationRequest)(nil), // 1: temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationRequest + (*RequestCancelNexusOperationRequest)(nil), // 2: temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationRequest + (*TerminateNexusOperationRequest)(nil), // 3: temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationRequest + (*DeleteNexusOperationRequest)(nil), // 4: temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationRequest + (*PollNexusOperationRequest)(nil), // 5: temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationRequest + (*StartNexusOperationResponse)(nil), // 6: temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationResponse + (*DescribeNexusOperationResponse)(nil), // 7: temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationResponse + (*RequestCancelNexusOperationResponse)(nil), // 8: temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationResponse + (*TerminateNexusOperationResponse)(nil), // 9: temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationResponse + (*DeleteNexusOperationResponse)(nil), // 10: temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationResponse + (*PollNexusOperationResponse)(nil), // 11: temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationResponse +} +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_depIdxs = []int32{ + 0, // 0: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.StartNexusOperation:input_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationRequest + 1, // 1: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.DescribeNexusOperation:input_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationRequest + 2, // 2: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.RequestCancelNexusOperation:input_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationRequest + 3, // 3: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.TerminateNexusOperation:input_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationRequest + 4, // 4: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.DeleteNexusOperation:input_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationRequest + 5, // 5: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.PollNexusOperation:input_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationRequest + 6, // 6: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.StartNexusOperation:output_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.StartNexusOperationResponse + 7, // 7: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.DescribeNexusOperation:output_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.DescribeNexusOperationResponse + 8, // 8: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.RequestCancelNexusOperation:output_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.RequestCancelNexusOperationResponse + 9, // 9: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.TerminateNexusOperation:output_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.TerminateNexusOperationResponse + 10, // 10: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.DeleteNexusOperation:output_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.DeleteNexusOperationResponse + 11, // 11: temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService.PollNexusOperation:output_type -> temporal.server.chasm.lib.nexusoperation.proto.v1.PollNexusOperationResponse + 6, // [6:12] is the sub-list for method output_type + 0, // [0:6] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_init() } +func file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_init() { + if File_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto != nil { + return + } + file_temporal_server_chasm_lib_nexusoperation_proto_v1_request_response_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_rawDesc), len(file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_depIdxs, + }.Build() + File_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto = out.File + file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_goTypes = nil + file_temporal_server_chasm_lib_nexusoperation_proto_v1_service_proto_depIdxs = nil +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service_client.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service_client.pb.go new file mode 100644 index 00000000000..df6f364db49 --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service_client.pb.go @@ -0,0 +1,318 @@ +// Code generated by protoc-gen-go-chasm. DO NOT EDIT. +package nexusoperationpb + +import ( + "context" + "time" + + "go.temporal.io/server/client/history" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives" + "google.golang.org/grpc" +) + +// NexusOperationServiceLayeredClient is a client for NexusOperationService. +type NexusOperationServiceLayeredClient struct { + metricsHandler metrics.Handler + numShards int32 + redirector history.Redirector[NexusOperationServiceClient] + retryPolicy backoff.RetryPolicy +} + +// NewNexusOperationServiceLayeredClient initializes a new NexusOperationServiceLayeredClient. +func NewNexusOperationServiceLayeredClient( + dc *dynamicconfig.Collection, + rpcFactory common.RPCFactory, + monitor membership.Monitor, + config *config.Persistence, + logger log.Logger, + metricsHandler metrics.Handler, +) (NexusOperationServiceClient, error) { + resolver, err := monitor.GetResolver(primitives.HistoryService) + if err != nil { + return nil, err + } + connections := history.NewConnectionPool(resolver, rpcFactory, NewNexusOperationServiceClient) + var redirector history.Redirector[NexusOperationServiceClient] + if dynamicconfig.HistoryClientOwnershipCachingEnabled.Get(dc)() { + redirector = history.NewCachingRedirector( + connections, + resolver, + logger, + dynamicconfig.HistoryClientOwnershipCachingStaleTTL.Get(dc), + ) + } else { + redirector = history.NewBasicRedirector(connections, resolver) + } + return &NexusOperationServiceLayeredClient{ + metricsHandler: metricsHandler, + redirector: redirector, + numShards: config.NumHistoryShards, + retryPolicy: common.CreateHistoryClientRetryPolicy(), + }, nil +} +func (c *NexusOperationServiceLayeredClient) callStartNexusOperationNoRetry( + ctx context.Context, + request *StartNexusOperationRequest, + opts ...grpc.CallOption, +) (*StartNexusOperationResponse, error) { + var response *StartNexusOperationResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("NexusOperationService.StartNexusOperation"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetOperationId(), c.numShards) + op := func(ctx context.Context, client NexusOperationServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.StartNexusOperation(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *NexusOperationServiceLayeredClient) StartNexusOperation( + ctx context.Context, + request *StartNexusOperationRequest, + opts ...grpc.CallOption, +) (*StartNexusOperationResponse, error) { + call := func(ctx context.Context) (*StartNexusOperationResponse, error) { + return c.callStartNexusOperationNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *NexusOperationServiceLayeredClient) callDescribeNexusOperationNoRetry( + ctx context.Context, + request *DescribeNexusOperationRequest, + opts ...grpc.CallOption, +) (*DescribeNexusOperationResponse, error) { + var response *DescribeNexusOperationResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("NexusOperationService.DescribeNexusOperation"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetOperationId(), c.numShards) + op := func(ctx context.Context, client NexusOperationServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.DescribeNexusOperation(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *NexusOperationServiceLayeredClient) DescribeNexusOperation( + ctx context.Context, + request *DescribeNexusOperationRequest, + opts ...grpc.CallOption, +) (*DescribeNexusOperationResponse, error) { + call := func(ctx context.Context) (*DescribeNexusOperationResponse, error) { + return c.callDescribeNexusOperationNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *NexusOperationServiceLayeredClient) callRequestCancelNexusOperationNoRetry( + ctx context.Context, + request *RequestCancelNexusOperationRequest, + opts ...grpc.CallOption, +) (*RequestCancelNexusOperationResponse, error) { + var response *RequestCancelNexusOperationResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("NexusOperationService.RequestCancelNexusOperation"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetOperationId(), c.numShards) + op := func(ctx context.Context, client NexusOperationServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.RequestCancelNexusOperation(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *NexusOperationServiceLayeredClient) RequestCancelNexusOperation( + ctx context.Context, + request *RequestCancelNexusOperationRequest, + opts ...grpc.CallOption, +) (*RequestCancelNexusOperationResponse, error) { + call := func(ctx context.Context) (*RequestCancelNexusOperationResponse, error) { + return c.callRequestCancelNexusOperationNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *NexusOperationServiceLayeredClient) callTerminateNexusOperationNoRetry( + ctx context.Context, + request *TerminateNexusOperationRequest, + opts ...grpc.CallOption, +) (*TerminateNexusOperationResponse, error) { + var response *TerminateNexusOperationResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("NexusOperationService.TerminateNexusOperation"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetOperationId(), c.numShards) + op := func(ctx context.Context, client NexusOperationServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.TerminateNexusOperation(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *NexusOperationServiceLayeredClient) TerminateNexusOperation( + ctx context.Context, + request *TerminateNexusOperationRequest, + opts ...grpc.CallOption, +) (*TerminateNexusOperationResponse, error) { + call := func(ctx context.Context) (*TerminateNexusOperationResponse, error) { + return c.callTerminateNexusOperationNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *NexusOperationServiceLayeredClient) callDeleteNexusOperationNoRetry( + ctx context.Context, + request *DeleteNexusOperationRequest, + opts ...grpc.CallOption, +) (*DeleteNexusOperationResponse, error) { + var response *DeleteNexusOperationResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("NexusOperationService.DeleteNexusOperation"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetOperationId(), c.numShards) + op := func(ctx context.Context, client NexusOperationServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.DeleteNexusOperation(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *NexusOperationServiceLayeredClient) DeleteNexusOperation( + ctx context.Context, + request *DeleteNexusOperationRequest, + opts ...grpc.CallOption, +) (*DeleteNexusOperationResponse, error) { + call := func(ctx context.Context) (*DeleteNexusOperationResponse, error) { + return c.callDeleteNexusOperationNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *NexusOperationServiceLayeredClient) callPollNexusOperationNoRetry( + ctx context.Context, + request *PollNexusOperationRequest, + opts ...grpc.CallOption, +) (*PollNexusOperationResponse, error) { + var response *PollNexusOperationResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("NexusOperationService.PollNexusOperation"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetOperationId(), c.numShards) + op := func(ctx context.Context, client NexusOperationServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.PollNexusOperation(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *NexusOperationServiceLayeredClient) PollNexusOperation( + ctx context.Context, + request *PollNexusOperationRequest, + opts ...grpc.CallOption, +) (*PollNexusOperationResponse, error) { + call := func(ctx context.Context) (*PollNexusOperationResponse, error) { + return c.callPollNexusOperationNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service_grpc.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service_grpc.pb.go new file mode 100644 index 00000000000..39b61ecc69f --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/service_grpc.pb.go @@ -0,0 +1,295 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// plugins: +// - protoc-gen-go-grpc +// - protoc +// source: temporal/server/chasm/lib/nexusoperation/proto/v1/service.proto + +package nexusoperationpb + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + NexusOperationService_StartNexusOperation_FullMethodName = "/temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService/StartNexusOperation" + NexusOperationService_DescribeNexusOperation_FullMethodName = "/temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService/DescribeNexusOperation" + NexusOperationService_RequestCancelNexusOperation_FullMethodName = "/temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService/RequestCancelNexusOperation" + NexusOperationService_TerminateNexusOperation_FullMethodName = "/temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService/TerminateNexusOperation" + NexusOperationService_DeleteNexusOperation_FullMethodName = "/temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService/DeleteNexusOperation" + NexusOperationService_PollNexusOperation_FullMethodName = "/temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService/PollNexusOperation" +) + +// NexusOperationServiceClient is the client API for NexusOperationService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type NexusOperationServiceClient interface { + StartNexusOperation(ctx context.Context, in *StartNexusOperationRequest, opts ...grpc.CallOption) (*StartNexusOperationResponse, error) + DescribeNexusOperation(ctx context.Context, in *DescribeNexusOperationRequest, opts ...grpc.CallOption) (*DescribeNexusOperationResponse, error) + RequestCancelNexusOperation(ctx context.Context, in *RequestCancelNexusOperationRequest, opts ...grpc.CallOption) (*RequestCancelNexusOperationResponse, error) + TerminateNexusOperation(ctx context.Context, in *TerminateNexusOperationRequest, opts ...grpc.CallOption) (*TerminateNexusOperationResponse, error) + DeleteNexusOperation(ctx context.Context, in *DeleteNexusOperationRequest, opts ...grpc.CallOption) (*DeleteNexusOperationResponse, error) + PollNexusOperation(ctx context.Context, in *PollNexusOperationRequest, opts ...grpc.CallOption) (*PollNexusOperationResponse, error) +} + +type nexusOperationServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewNexusOperationServiceClient(cc grpc.ClientConnInterface) NexusOperationServiceClient { + return &nexusOperationServiceClient{cc} +} + +func (c *nexusOperationServiceClient) StartNexusOperation(ctx context.Context, in *StartNexusOperationRequest, opts ...grpc.CallOption) (*StartNexusOperationResponse, error) { + out := new(StartNexusOperationResponse) + err := c.cc.Invoke(ctx, NexusOperationService_StartNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nexusOperationServiceClient) DescribeNexusOperation(ctx context.Context, in *DescribeNexusOperationRequest, opts ...grpc.CallOption) (*DescribeNexusOperationResponse, error) { + out := new(DescribeNexusOperationResponse) + err := c.cc.Invoke(ctx, NexusOperationService_DescribeNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nexusOperationServiceClient) RequestCancelNexusOperation(ctx context.Context, in *RequestCancelNexusOperationRequest, opts ...grpc.CallOption) (*RequestCancelNexusOperationResponse, error) { + out := new(RequestCancelNexusOperationResponse) + err := c.cc.Invoke(ctx, NexusOperationService_RequestCancelNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nexusOperationServiceClient) TerminateNexusOperation(ctx context.Context, in *TerminateNexusOperationRequest, opts ...grpc.CallOption) (*TerminateNexusOperationResponse, error) { + out := new(TerminateNexusOperationResponse) + err := c.cc.Invoke(ctx, NexusOperationService_TerminateNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nexusOperationServiceClient) DeleteNexusOperation(ctx context.Context, in *DeleteNexusOperationRequest, opts ...grpc.CallOption) (*DeleteNexusOperationResponse, error) { + out := new(DeleteNexusOperationResponse) + err := c.cc.Invoke(ctx, NexusOperationService_DeleteNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nexusOperationServiceClient) PollNexusOperation(ctx context.Context, in *PollNexusOperationRequest, opts ...grpc.CallOption) (*PollNexusOperationResponse, error) { + out := new(PollNexusOperationResponse) + err := c.cc.Invoke(ctx, NexusOperationService_PollNexusOperation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NexusOperationServiceServer is the server API for NexusOperationService service. +// All implementations must embed UnimplementedNexusOperationServiceServer +// for forward compatibility +type NexusOperationServiceServer interface { + StartNexusOperation(context.Context, *StartNexusOperationRequest) (*StartNexusOperationResponse, error) + DescribeNexusOperation(context.Context, *DescribeNexusOperationRequest) (*DescribeNexusOperationResponse, error) + RequestCancelNexusOperation(context.Context, *RequestCancelNexusOperationRequest) (*RequestCancelNexusOperationResponse, error) + TerminateNexusOperation(context.Context, *TerminateNexusOperationRequest) (*TerminateNexusOperationResponse, error) + DeleteNexusOperation(context.Context, *DeleteNexusOperationRequest) (*DeleteNexusOperationResponse, error) + PollNexusOperation(context.Context, *PollNexusOperationRequest) (*PollNexusOperationResponse, error) + mustEmbedUnimplementedNexusOperationServiceServer() +} + +// UnimplementedNexusOperationServiceServer must be embedded to have forward compatible implementations. +type UnimplementedNexusOperationServiceServer struct { +} + +func (UnimplementedNexusOperationServiceServer) StartNexusOperation(context.Context, *StartNexusOperationRequest) (*StartNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartNexusOperation not implemented") +} +func (UnimplementedNexusOperationServiceServer) DescribeNexusOperation(context.Context, *DescribeNexusOperationRequest) (*DescribeNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeNexusOperation not implemented") +} +func (UnimplementedNexusOperationServiceServer) RequestCancelNexusOperation(context.Context, *RequestCancelNexusOperationRequest) (*RequestCancelNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RequestCancelNexusOperation not implemented") +} +func (UnimplementedNexusOperationServiceServer) TerminateNexusOperation(context.Context, *TerminateNexusOperationRequest) (*TerminateNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method TerminateNexusOperation not implemented") +} +func (UnimplementedNexusOperationServiceServer) DeleteNexusOperation(context.Context, *DeleteNexusOperationRequest) (*DeleteNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteNexusOperation not implemented") +} +func (UnimplementedNexusOperationServiceServer) PollNexusOperation(context.Context, *PollNexusOperationRequest) (*PollNexusOperationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PollNexusOperation not implemented") +} +func (UnimplementedNexusOperationServiceServer) mustEmbedUnimplementedNexusOperationServiceServer() {} + +// UnsafeNexusOperationServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to NexusOperationServiceServer will +// result in compilation errors. +type UnsafeNexusOperationServiceServer interface { + mustEmbedUnimplementedNexusOperationServiceServer() +} + +func RegisterNexusOperationServiceServer(s grpc.ServiceRegistrar, srv NexusOperationServiceServer) { + s.RegisterService(&NexusOperationService_ServiceDesc, srv) +} + +func _NexusOperationService_StartNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NexusOperationServiceServer).StartNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NexusOperationService_StartNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NexusOperationServiceServer).StartNexusOperation(ctx, req.(*StartNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NexusOperationService_DescribeNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NexusOperationServiceServer).DescribeNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NexusOperationService_DescribeNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NexusOperationServiceServer).DescribeNexusOperation(ctx, req.(*DescribeNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NexusOperationService_RequestCancelNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCancelNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NexusOperationServiceServer).RequestCancelNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NexusOperationService_RequestCancelNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NexusOperationServiceServer).RequestCancelNexusOperation(ctx, req.(*RequestCancelNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NexusOperationService_TerminateNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TerminateNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NexusOperationServiceServer).TerminateNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NexusOperationService_TerminateNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NexusOperationServiceServer).TerminateNexusOperation(ctx, req.(*TerminateNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NexusOperationService_DeleteNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NexusOperationServiceServer).DeleteNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NexusOperationService_DeleteNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NexusOperationServiceServer).DeleteNexusOperation(ctx, req.(*DeleteNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NexusOperationService_PollNexusOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PollNexusOperationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NexusOperationServiceServer).PollNexusOperation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: NexusOperationService_PollNexusOperation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NexusOperationServiceServer).PollNexusOperation(ctx, req.(*PollNexusOperationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// NexusOperationService_ServiceDesc is the grpc.ServiceDesc for NexusOperationService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var NexusOperationService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "temporal.server.chasm.lib.nexusoperation.proto.v1.NexusOperationService", + HandlerType: (*NexusOperationServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "StartNexusOperation", + Handler: _NexusOperationService_StartNexusOperation_Handler, + }, + { + MethodName: "DescribeNexusOperation", + Handler: _NexusOperationService_DescribeNexusOperation_Handler, + }, + { + MethodName: "RequestCancelNexusOperation", + Handler: _NexusOperationService_RequestCancelNexusOperation_Handler, + }, + { + MethodName: "TerminateNexusOperation", + Handler: _NexusOperationService_TerminateNexusOperation_Handler, + }, + { + MethodName: "DeleteNexusOperation", + Handler: _NexusOperationService_DeleteNexusOperation_Handler, + }, + { + MethodName: "PollNexusOperation", + Handler: _NexusOperationService_PollNexusOperation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "temporal/server/chasm/lib/nexusoperation/proto/v1/service.proto", +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/tasks.go-helpers.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/tasks.go-helpers.pb.go new file mode 100644 index 00000000000..264f3bafa48 --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/tasks.go-helpers.pb.go @@ -0,0 +1,265 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package nexusoperationpb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type ScheduleToStartTimeoutTask to the protobuf v3 wire format +func (val *ScheduleToStartTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ScheduleToStartTimeoutTask from the protobuf v3 wire format +func (val *ScheduleToStartTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ScheduleToStartTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ScheduleToStartTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ScheduleToStartTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ScheduleToStartTimeoutTask + switch t := that.(type) { + case *ScheduleToStartTimeoutTask: + that1 = t + case ScheduleToStartTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type StartToCloseTimeoutTask to the protobuf v3 wire format +func (val *StartToCloseTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type StartToCloseTimeoutTask from the protobuf v3 wire format +func (val *StartToCloseTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *StartToCloseTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two StartToCloseTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *StartToCloseTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *StartToCloseTimeoutTask + switch t := that.(type) { + case *StartToCloseTimeoutTask: + that1 = t + case StartToCloseTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ScheduleToCloseTimeoutTask to the protobuf v3 wire format +func (val *ScheduleToCloseTimeoutTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ScheduleToCloseTimeoutTask from the protobuf v3 wire format +func (val *ScheduleToCloseTimeoutTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ScheduleToCloseTimeoutTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ScheduleToCloseTimeoutTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ScheduleToCloseTimeoutTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ScheduleToCloseTimeoutTask + switch t := that.(type) { + case *ScheduleToCloseTimeoutTask: + that1 = t + case ScheduleToCloseTimeoutTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InvocationTask to the protobuf v3 wire format +func (val *InvocationTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InvocationTask from the protobuf v3 wire format +func (val *InvocationTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InvocationTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InvocationTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InvocationTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InvocationTask + switch t := that.(type) { + case *InvocationTask: + that1 = t + case InvocationTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InvocationBackoffTask to the protobuf v3 wire format +func (val *InvocationBackoffTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InvocationBackoffTask from the protobuf v3 wire format +func (val *InvocationBackoffTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InvocationBackoffTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InvocationBackoffTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InvocationBackoffTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InvocationBackoffTask + switch t := that.(type) { + case *InvocationBackoffTask: + that1 = t + case InvocationBackoffTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CancellationTask to the protobuf v3 wire format +func (val *CancellationTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CancellationTask from the protobuf v3 wire format +func (val *CancellationTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CancellationTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CancellationTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CancellationTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CancellationTask + switch t := that.(type) { + case *CancellationTask: + that1 = t + case CancellationTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CancellationBackoffTask to the protobuf v3 wire format +func (val *CancellationBackoffTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CancellationBackoffTask from the protobuf v3 wire format +func (val *CancellationBackoffTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CancellationBackoffTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CancellationBackoffTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CancellationBackoffTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CancellationBackoffTask + switch t := that.(type) { + case *CancellationBackoffTask: + that1 = t + case CancellationBackoffTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/tasks.pb.go b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/tasks.pb.go new file mode 100644 index 00000000000..cdd2aaa62db --- /dev/null +++ b/chasm/lib/nexusoperation/gen/nexusoperationpb/v1/tasks.pb.go @@ -0,0 +1,378 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/nexusoperation/proto/v1/tasks.proto + +package nexusoperationpb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ScheduleToStartTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScheduleToStartTimeoutTask) Reset() { + *x = ScheduleToStartTimeoutTask{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScheduleToStartTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScheduleToStartTimeoutTask) ProtoMessage() {} + +func (x *ScheduleToStartTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScheduleToStartTimeoutTask.ProtoReflect.Descriptor instead. +func (*ScheduleToStartTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescGZIP(), []int{0} +} + +type StartToCloseTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *StartToCloseTimeoutTask) Reset() { + *x = StartToCloseTimeoutTask{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *StartToCloseTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartToCloseTimeoutTask) ProtoMessage() {} + +func (x *StartToCloseTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartToCloseTimeoutTask.ProtoReflect.Descriptor instead. +func (*StartToCloseTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescGZIP(), []int{1} +} + +type ScheduleToCloseTimeoutTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScheduleToCloseTimeoutTask) Reset() { + *x = ScheduleToCloseTimeoutTask{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScheduleToCloseTimeoutTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScheduleToCloseTimeoutTask) ProtoMessage() {} + +func (x *ScheduleToCloseTimeoutTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScheduleToCloseTimeoutTask.ProtoReflect.Descriptor instead. +func (*ScheduleToCloseTimeoutTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescGZIP(), []int{2} +} + +type InvocationTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvocationTask) Reset() { + *x = InvocationTask{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvocationTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvocationTask) ProtoMessage() {} + +func (x *InvocationTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvocationTask.ProtoReflect.Descriptor instead. +func (*InvocationTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescGZIP(), []int{3} +} + +func (x *InvocationTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +type InvocationBackoffTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvocationBackoffTask) Reset() { + *x = InvocationBackoffTask{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvocationBackoffTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvocationBackoffTask) ProtoMessage() {} + +func (x *InvocationBackoffTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvocationBackoffTask.ProtoReflect.Descriptor instead. +func (*InvocationBackoffTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescGZIP(), []int{4} +} + +func (x *InvocationBackoffTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +type CancellationTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CancellationTask) Reset() { + *x = CancellationTask{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CancellationTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancellationTask) ProtoMessage() {} + +func (x *CancellationTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancellationTask.ProtoReflect.Descriptor instead. +func (*CancellationTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescGZIP(), []int{5} +} + +func (x *CancellationTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +type CancellationBackoffTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + Attempt int32 `protobuf:"varint,1,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CancellationBackoffTask) Reset() { + *x = CancellationBackoffTask{} + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CancellationBackoffTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CancellationBackoffTask) ProtoMessage() {} + +func (x *CancellationBackoffTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CancellationBackoffTask.ProtoReflect.Descriptor instead. +func (*CancellationBackoffTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescGZIP(), []int{6} +} + +func (x *CancellationBackoffTask) GetAttempt() int32 { + if x != nil { + return x.Attempt + } + return 0 +} + +var File_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDesc = "" + + "\n" + + "=temporal/server/chasm/lib/nexusoperation/proto/v1/tasks.proto\x121temporal.server.chasm.lib.nexusoperation.proto.v1\"\x1c\n" + + "\x1aScheduleToStartTimeoutTask\"\x19\n" + + "\x17StartToCloseTimeoutTask\"\x1c\n" + + "\x1aScheduleToCloseTimeoutTask\"*\n" + + "\x0eInvocationTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\"1\n" + + "\x15InvocationBackoffTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\",\n" + + "\x10CancellationTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattempt\"3\n" + + "\x17CancellationBackoffTask\x12\x18\n" + + "\aattempt\x18\x01 \x01(\x05R\aattemptBVZTgo.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb;nexusoperationpbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDescData +} + +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_goTypes = []any{ + (*ScheduleToStartTimeoutTask)(nil), // 0: temporal.server.chasm.lib.nexusoperation.proto.v1.ScheduleToStartTimeoutTask + (*StartToCloseTimeoutTask)(nil), // 1: temporal.server.chasm.lib.nexusoperation.proto.v1.StartToCloseTimeoutTask + (*ScheduleToCloseTimeoutTask)(nil), // 2: temporal.server.chasm.lib.nexusoperation.proto.v1.ScheduleToCloseTimeoutTask + (*InvocationTask)(nil), // 3: temporal.server.chasm.lib.nexusoperation.proto.v1.InvocationTask + (*InvocationBackoffTask)(nil), // 4: temporal.server.chasm.lib.nexusoperation.proto.v1.InvocationBackoffTask + (*CancellationTask)(nil), // 5: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationTask + (*CancellationBackoffTask)(nil), // 6: temporal.server.chasm.lib.nexusoperation.proto.v1.CancellationBackoffTask +} +var file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_init() } +func file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_init() { + if File_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_rawDesc)), + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto = out.File + file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_goTypes = nil + file_temporal_server_chasm_lib_nexusoperation_proto_v1_tasks_proto_depIdxs = nil +} diff --git a/chasm/lib/nexusoperation/handler.go b/chasm/lib/nexusoperation/handler.go new file mode 100644 index 00000000000..1ce4b077c70 --- /dev/null +++ b/chasm/lib/nexusoperation/handler.go @@ -0,0 +1,295 @@ +package nexusoperation + +import ( + "context" + "errors" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/contextutil" + "go.temporal.io/server/common/log" +) + +type handler struct { + nexusoperationpb.UnimplementedNexusOperationServiceServer + + config *Config + logger log.Logger +} + +func newHandler(config *Config, logger log.Logger) *handler { + return &handler{ + config: config, + logger: logger, + } +} + +// StartNexusOperation creates a new standalone Nexus operation execution via CHASM. +func (h *handler) StartNexusOperation( + ctx context.Context, + req *nexusoperationpb.StartNexusOperationRequest, +) (response *nexusoperationpb.StartNexusOperationResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + frontendReq := req.GetFrontendRequest() + + result, err := chasm.StartExecution[*Operation]( + ctx, + chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: frontendReq.GetOperationId(), + }, + newStandaloneOperation, + req, + chasm.WithRequestID(frontendReq.GetRequestId()), + chasm.WithBusinessIDPolicy( + idReusePolicyFromProto(frontendReq.GetIdReusePolicy()), + idConflictPolicyFromProto(frontendReq.GetIdConflictPolicy()), + ), + ) + if err != nil { + if alreadyStartedErr, ok := errors.AsType[*chasm.ExecutionAlreadyStartedError](err); ok { + return nil, serviceerror.NewNexusOperationExecutionAlreadyStartedf( + alreadyStartedErr.CurrentRequestID, + alreadyStartedErr.CurrentRunID, + "nexus operation execution already started: request_id=%s, run_id=%s", + alreadyStartedErr.CurrentRequestID, + alreadyStartedErr.CurrentRunID, + ) + } + return nil, err + } + + return &nexusoperationpb.StartNexusOperationResponse{ + FrontendResponse: &workflowservice.StartNexusOperationExecutionResponse{ + RunId: result.ExecutionKey.RunID, + Started: result.Created, + }, + }, nil +} + +// DescribeNexusOperation queries current operation state, optionally as a long-poll that waits +// for any state change. +// +// When used to long-poll, it returns an empty non-error response on context +// deadline expiry, to indicate that the state being waited for was not reached. Callers should +// interpret this as an invitation to resubmit their long-poll request. This response is sent before +// the caller's deadline (see nexusoperation.longPollBuffer) so that it is likely that the caller +// does indeed receive the non-error response. +func (h *handler) DescribeNexusOperation( + ctx context.Context, + req *nexusoperationpb.DescribeNexusOperationRequest, +) (response *nexusoperationpb.DescribeNexusOperationResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + ref := chasm.NewComponentRef[*Operation](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetOperationId(), + RunID: req.GetFrontendRequest().GetRunId(), + }) + + token := req.GetFrontendRequest().GetLongPollToken() + if len(token) == 0 { + // No long poll. + return chasm.ReadComponent(ctx, ref, (*Operation).buildDescribeResponse, req) + } + + // Determine the long poll timeout and buffer. + ns := req.GetFrontendRequest().GetNamespace() + ctx, cancel := contextutil.WithDeadlineBuffer( + ctx, + h.config.LongPollTimeout(ns), + h.config.LongPollBuffer(ns), + ) + defer cancel() + + // Poll for the operation state to change. + response, _, err = chasm.PollComponent(ctx, ref, func( + o *Operation, + ctx chasm.Context, + req *nexusoperationpb.DescribeNexusOperationRequest, + ) (*nexusoperationpb.DescribeNexusOperationResponse, bool, error) { + changed, err := chasm.ExecutionStateChanged(o, ctx, token) + if err != nil { + if errors.Is(err, chasm.ErrMalformedComponentRef) { + return nil, false, serviceerror.NewInvalidArgument("invalid long poll token") + } + if errors.Is(err, chasm.ErrInvalidComponentRef) { + return nil, false, serviceerror.NewInvalidArgument("long poll token does not match execution") + } + return nil, false, err + } + if changed { + response, err := o.buildDescribeResponse(ctx, req) + return response, true, err + } + return nil, false, nil + }, req) + + if err != nil && ctx.Err() != nil { + // Send empty non-error response on deadline expiry: caller should continue long-polling. + return &nexusoperationpb.DescribeNexusOperationResponse{ + FrontendResponse: &workflowservice.DescribeNexusOperationExecutionResponse{}, + }, nil + } + return response, err +} + +// PollNexusOperation long-polls for a Nexus operation to reach a specific stage. +// +// It returns an empty non-error response on context deadline expiry, to indicate that the state +// being waited for was not reached. Callers should interpret this as an invitation to resubmit +// their long-poll request. This response is sent before the caller's +// deadline (see nexusoperation.longPollBuffer) so that it is likely that the caller +// does indeed receive the non-error response. +func (h *handler) PollNexusOperation( + ctx context.Context, + req *nexusoperationpb.PollNexusOperationRequest, +) (response *nexusoperationpb.PollNexusOperationResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + ref := chasm.NewComponentRef[*Operation](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetOperationId(), + RunID: req.GetFrontendRequest().GetRunId(), + }) + + // Determine the long poll timeout and buffer. + ns := req.GetFrontendRequest().GetNamespace() + ctx, cancel := contextutil.WithDeadlineBuffer( + ctx, + h.config.LongPollTimeout(ns), + h.config.LongPollBuffer(ns), + ) + defer cancel() + + // Poll for the wait stage to be reached. + waitStage := req.GetFrontendRequest().GetWaitStage() + response, _, err = chasm.PollComponent(ctx, ref, func( + o *Operation, + ctx chasm.Context, + req *nexusoperationpb.PollNexusOperationRequest, + ) (*nexusoperationpb.PollNexusOperationResponse, bool, error) { + if o.isWaitStageReached(ctx, waitStage) { + response := o.buildPollResponse(ctx) + return response, true, nil + } + return nil, false, nil + }, req) + + if err != nil && ctx.Err() != nil { + // Send an empty non-error response as an invitation to resubmit the long-poll. + return &nexusoperationpb.PollNexusOperationResponse{ + FrontendResponse: &workflowservice.PollNexusOperationExecutionResponse{}, + }, nil + } + return response, err +} + +// RequestCancelNexusOperation requests cancellation of a standalone Nexus operation via CHASM. +func (h *handler) RequestCancelNexusOperation( + ctx context.Context, + req *nexusoperationpb.RequestCancelNexusOperationRequest, +) (response *nexusoperationpb.RequestCancelNexusOperationResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + ref := chasm.NewComponentRef[*Operation](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetOperationId(), + RunID: req.GetFrontendRequest().GetRunId(), + }) + + resp, _, err := chasm.UpdateComponent( + ctx, + ref, + func(o *Operation, ctx chasm.MutableContext, req *nexusoperationpb.RequestCancelNexusOperationRequest) (*nexusoperationpb.RequestCancelNexusOperationResponse, error) { + if err := o.RequestCancel(ctx, &nexusoperationpb.CancellationState{ + RequestId: req.GetFrontendRequest().GetRequestId(), + Identity: req.GetFrontendRequest().GetIdentity(), + Reason: req.GetFrontendRequest().GetReason(), + }); err != nil { + return nil, err + } + return &nexusoperationpb.RequestCancelNexusOperationResponse{}, nil + }, req) + + return resp, err +} + +// TerminateNexusOperation terminates a standalone Nexus operation via CHASM. +func (h *handler) TerminateNexusOperation( + ctx context.Context, + req *nexusoperationpb.TerminateNexusOperationRequest, +) (response *nexusoperationpb.TerminateNexusOperationResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + ref := chasm.NewComponentRef[*Operation](chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: req.GetFrontendRequest().GetOperationId(), + RunID: req.GetFrontendRequest().GetRunId(), + }) + + resp, _, err := chasm.UpdateComponent( + ctx, + ref, + func(o *Operation, ctx chasm.MutableContext, req *nexusoperationpb.TerminateNexusOperationRequest) (*nexusoperationpb.TerminateNexusOperationResponse, error) { + if _, err := o.Terminate(ctx, chasm.TerminateComponentRequest{ + RequestID: req.GetFrontendRequest().GetRequestId(), + Identity: req.GetFrontendRequest().GetIdentity(), + Reason: req.GetFrontendRequest().GetReason(), + }); err != nil { + return nil, err + } + return &nexusoperationpb.TerminateNexusOperationResponse{}, nil + }, req) + + return resp, err +} + +// DeleteNexusOperation terminates the nexus operation if running, then schedules it for deletion. +func (h *handler) DeleteNexusOperation( + ctx context.Context, + req *nexusoperationpb.DeleteNexusOperationRequest, +) (response *nexusoperationpb.DeleteNexusOperationResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + frontendReq := req.GetFrontendRequest() + + key := chasm.ExecutionKey{ + NamespaceID: req.GetNamespaceId(), + BusinessID: frontendReq.GetOperationId(), + RunID: frontendReq.GetRunId(), + } + + if err := chasm.DeleteExecution[*Operation](ctx, key, chasm.DeleteExecutionRequest{ + TerminateComponentRequest: chasm.TerminateComponentRequest{ + Reason: "Delete nexus operation execution", + }, + }); err != nil { + return nil, err + } + + return &nexusoperationpb.DeleteNexusOperationResponse{}, nil +} +func idReusePolicyFromProto(p enumspb.NexusOperationIdReusePolicy) chasm.BusinessIDReusePolicy { + switch p { + case enumspb.NEXUS_OPERATION_ID_REUSE_POLICY_ALLOW_DUPLICATE_FAILED_ONLY: + return chasm.BusinessIDReusePolicyAllowDuplicateFailedOnly + case enumspb.NEXUS_OPERATION_ID_REUSE_POLICY_REJECT_DUPLICATE: + return chasm.BusinessIDReusePolicyRejectDuplicate + default: + return chasm.BusinessIDReusePolicyAllowDuplicate + } +} + +func idConflictPolicyFromProto(p enumspb.NexusOperationIdConflictPolicy) chasm.BusinessIDConflictPolicy { + switch p { + case enumspb.NEXUS_OPERATION_ID_CONFLICT_POLICY_USE_EXISTING: + return chasm.BusinessIDConflictPolicyUseExisting + default: + return chasm.BusinessIDConflictPolicyFail + } +} diff --git a/chasm/lib/nexusoperation/invocation.go b/chasm/lib/nexusoperation/invocation.go new file mode 100644 index 00000000000..42f5d35b52f --- /dev/null +++ b/chasm/lib/nexusoperation/invocation.go @@ -0,0 +1,288 @@ +package nexusoperation + +import ( + "context" + "fmt" + "net/http/httptrace" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + nexuspb "go.temporal.io/api/nexus/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/api/historyservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/resource" +) + +// startArgs holds the arguments needed to start a Nexus operation invocation. +type startArgs struct { + service string + operation string + requestID string + endpointName string + endpointID string + currentTime time.Time + scheduledTime time.Time + scheduleToStartTimeout time.Duration + scheduleToCloseTimeout time.Duration + startToCloseTimeout time.Duration + header map[string]string + payload *commonpb.Payload + nexusLinks []nexus.Link + serializedRef []byte +} + +// invocationTraceContext captures per-call contextual information needed to set up HTTP tracing. +type invocationTraceContext struct { + operationTag string // "StartOperation" or "CancelOperation" + namespaceName string + requestID string + operation string + endpointName string + workflowID string + runID string + attemptStart time.Time + attempt int32 +} + +type invocation interface { + Start( + ctx context.Context, + args startArgs, + options nexus.StartOperationOptions, + ) (*nexusrpc.ClientStartOperationResponse[*commonpb.Payload], error) + Cancel( + ctx context.Context, + args cancelArgs, + options nexus.CancelOperationOptions, + ) error +} + +type invocationTimeout struct { + timeoutType enumspb.TimeoutType +} + +func (i *invocationTimeout) Start( + _ context.Context, + _ startArgs, + _ nexus.StartOperationOptions, +) (*nexusrpc.ClientStartOperationResponse[*commonpb.Payload], error) { + return nil, &operationTimeoutBelowMinError{timeoutType: i.timeoutType} +} + +func (i *invocationTimeout) Cancel( + _ context.Context, + _ cancelArgs, + _ nexus.CancelOperationOptions, +) error { + return &operationTimeoutBelowMinError{timeoutType: i.timeoutType} +} + +type invocationHTTP struct { + client *nexusrpc.HTTPClient + clientTrace *httptrace.ClientTrace +} + +func (b *nexusTaskHandlerBase) newInvocationHTTP( + ctx context.Context, + ns *namespace.Namespace, + endpoint *persistencespb.NexusEndpointEntry, + service string, + traceCtx invocationTraceContext, +) (*invocationHTTP, error) { + client, err := b.clientProvider(ctx, ns.ID().String(), endpoint, service) + if err != nil { + return nil, serviceerror.NewUnavailablef("failed to get a client: %v", err) + } + var clientTrace *httptrace.ClientTrace + if b.httpTraceProvider != nil { + traceLogger := log.With(b.logger, + tag.Operation(traceCtx.operationTag), + tag.WorkflowNamespace(traceCtx.namespaceName), + tag.RequestID(traceCtx.requestID), + tag.NexusOperation(traceCtx.operation), + tag.Endpoint(traceCtx.endpointName), + tag.WorkflowID(traceCtx.workflowID), + tag.WorkflowRunID(traceCtx.runID), + tag.AttemptStart(traceCtx.attemptStart), + tag.Attempt(traceCtx.attempt), + ) + clientTrace = b.httpTraceProvider.NewTrace(traceCtx.attempt, traceLogger) + } + return &invocationHTTP{client: client, clientTrace: clientTrace}, nil +} + +func (i *invocationHTTP) Start( + ctx context.Context, + args startArgs, + options nexus.StartOperationOptions, +) (*nexusrpc.ClientStartOperationResponse[*commonpb.Payload], error) { + if i.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, i.clientTrace) + } + rawResult, callErr := i.client.StartOperation(ctx, args.operation, args.payload, options) + + var result *nexusrpc.ClientStartOperationResponse[*commonpb.Payload] + if callErr == nil { + if rawResult.Pending != nil { + result = &nexusrpc.ClientStartOperationResponse[*commonpb.Payload]{ + Pending: &nexusrpc.OperationHandle[*commonpb.Payload]{ + Operation: rawResult.Pending.Operation, + Token: rawResult.Pending.Token, + }, + Links: rawResult.Links, + } + } else { + var payload *commonpb.Payload + err := rawResult.Successful.Consume(&payload) + if err != nil { + callErr = err + } else { + result = &nexusrpc.ClientStartOperationResponse[*commonpb.Payload]{ + Successful: payload, + Links: rawResult.Links, + } + } + } + } + return result, callErr +} + +func (i *invocationHTTP) Cancel( + ctx context.Context, + args cancelArgs, + options nexus.CancelOperationOptions, +) error { + if i.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, i.clientTrace) + } + handle, err := i.client.NewOperationHandle(args.operation, args.token) + if err != nil { + return serviceerror.NewUnavailablef("failed to get handle for operation: %v", err) + } + return handle.Cancel(ctx, options) +} + +type invocationSystem struct { + ns *namespace.Namespace + chasmRegistry *chasm.Registry + historyClient resource.HistoryClient + config *Config + logger log.Logger +} + +func (b *nexusTaskHandlerBase) newInvocationSystem( + ns *namespace.Namespace, +) *invocationSystem { + return &invocationSystem{ + ns: ns, + chasmRegistry: b.chasmRegistry, + historyClient: b.historyClient, + config: b.config, + logger: b.logger, + } +} + +func (i *invocationSystem) Start( + ctx context.Context, + args startArgs, + options nexus.StartOperationOptions, +) (*nexusrpc.ClientStartOperationResponse[*commonpb.Payload], error) { + protoLinks := commonnexus.ConvertLinksToProto(options.Links) + res, err := i.chasmRegistry.NexusEndpointProcessor.ProcessInput(chasm.NexusOperationProcessorContext{ + Namespace: i.ns, + RequestID: args.requestID, + Links: args.nexusLinks, + ReserializeInputPayload: true, + }, args.service, args.operation, args.payload) + if err != nil { + return nil, fmt.Errorf("%w: %w", errOpProcessorFailed, err) + } + resp, err := i.historyClient.StartNexusOperation(ctx, &historyservice.StartNexusOperationRequest{ + NamespaceId: i.ns.ID().String(), + ShardId: res.RoutingKey.ShardID(i.config.NumHistoryShards), + Request: &nexuspb.StartOperationRequest{ + Service: args.service, + Operation: args.operation, + Payload: res.ReserializedInputPayload, + RequestId: args.requestID, + Callback: options.CallbackURL, + CallbackHeader: options.CallbackHeader, + Links: protoLinks, + }, + }) + if err != nil { + return nil, err + } + + result := &nexusrpc.ClientStartOperationResponse[*commonpb.Payload]{} + switch v := resp.GetResponse().GetVariant().(type) { + case *nexuspb.StartOperationResponse_SyncSuccess: + result.Links = commonnexus.ConvertLinksFromProto(v.SyncSuccess.GetLinks()) + result.Successful = v.SyncSuccess.Payload + case *nexuspb.StartOperationResponse_AsyncSuccess: + result.Links = commonnexus.ConvertLinksFromProto(v.AsyncSuccess.GetLinks()) + result.Pending = &nexusrpc.OperationHandle[*commonpb.Payload]{ + Operation: args.operation, + Token: v.AsyncSuccess.GetOperationToken(), + } + case *nexuspb.StartOperationResponse_Failure: + state := nexus.OperationStateFailed + if v.Failure.GetCanceledFailureInfo() != nil { + state = nexus.OperationStateCanceled + } + nexusFailure, convErr := commonnexus.TemporalFailureToNexusFailure(v.Failure) + if convErr != nil { + i.logger.Error("failed to convert temporal failure to nexus failure", tag.Error(convErr), tag.RequestID(args.requestID)) + he := nexus.NewHandlerErrorf(nexus.HandlerErrorTypeInternal, "internal error (request ID: %s)", args.requestID) + he.RetryBehavior = nexus.HandlerErrorRetryBehaviorRetryable + return nil, he + } + return nil, &nexus.OperationError{ + State: state, + Cause: &nexus.FailureError{Failure: nexusFailure}, + OriginalFailure: &nexusFailure, + } + default: + i.logger.Error(fmt.Sprintf("unexpected response variant type: %T", v), tag.RequestID(args.requestID)) + he := nexus.NewHandlerErrorf(nexus.HandlerErrorTypeInternal, "internal error (request ID: %s)", args.requestID) + he.RetryBehavior = nexus.HandlerErrorRetryBehaviorRetryable + return nil, he + } + + return result, nil +} + +func (i *invocationSystem) Cancel( + ctx context.Context, + args cancelArgs, + _ nexus.CancelOperationOptions, +) error { + res, err := i.chasmRegistry.NexusEndpointProcessor.ProcessInput(chasm.NexusOperationProcessorContext{ + Namespace: i.ns, + RequestID: args.requestID, + // Links are not needed for cancellation. + }, args.service, args.operation, args.payload) + if err != nil { + return fmt.Errorf("%w: %w", errOpProcessorFailed, err) + } + + _, err = i.historyClient.CancelNexusOperation(ctx, &historyservice.CancelNexusOperationRequest{ + NamespaceId: i.ns.ID().String(), + ShardId: res.RoutingKey.ShardID(i.config.NumHistoryShards), + Request: &nexuspb.CancelOperationRequest{ + Service: args.service, + Operation: args.operation, + OperationToken: args.token, + }, + }) + return err +} diff --git a/chasm/lib/nexusoperation/library.go b/chasm/lib/nexusoperation/library.go new file mode 100644 index 00000000000..24f31678842 --- /dev/null +++ b/chasm/lib/nexusoperation/library.go @@ -0,0 +1,121 @@ +package nexusoperation + +import ( + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/dynamicconfig" + "google.golang.org/grpc" +) + +type operationContextKeyType struct{} + +// OperationContextKey is the context key for OperationContext, registered as a CHASM component +// context value. Exported for use in tests that need to set up MockContext. +var OperationContextKey = operationContextKeyType{} + +// OperationContext holds dependencies injected into the chasm.Context for use by Operation methods. +type OperationContext struct { + MetricTagConfig dynamicconfig.TypedPropertyFn[NexusMetricTagConfig] +} + +// componentOnlyLibrary registers just the components without task executors or gRPC handlers. +// Used in the frontend to enable component ref serialization. +type componentOnlyLibrary struct { + chasm.UnimplementedLibrary + metricTagConfig dynamicconfig.TypedPropertyFn[NexusMetricTagConfig] +} + +func newComponentOnlyLibrary(dc *dynamicconfig.Collection) *componentOnlyLibrary { + return &componentOnlyLibrary{ + metricTagConfig: MetricTagConfiguration.Get(dc), + } +} + +func (l *componentOnlyLibrary) Name() string { + return "nexusoperation" +} + +func (l *componentOnlyLibrary) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*Operation]( + "operation", + chasm.WithSearchAttributes( + EndpointSearchAttribute, + ServiceSearchAttribute, + OperationSearchAttribute, + RequestIDSearchAttribute, + StatusSearchAttribute, + ), + chasm.WithBusinessIDAlias("OperationId"), + chasm.WithContextValues(map[any]any{ + OperationContextKey: &OperationContext{ + MetricTagConfig: l.metricTagConfig, + }, + }), + ), + chasm.NewRegistrableComponent[*Cancellation]("cancellation"), + } +} + +type Library struct { + componentOnlyLibrary + + handler *handler + + operationBackoffTaskHandler *operationBackoffTaskHandler + operationInvocationTaskHandler *operationInvocationTaskHandler + operationScheduleToCloseTimeoutTaskHandler *operationScheduleToCloseTimeoutTaskHandler + operationScheduleToStartTimeoutTaskHandler *operationScheduleToStartTimeoutTaskHandler + operationStartToCloseTimeoutTaskHandler *operationStartToCloseTimeoutTaskHandler + + cancellationInvocationTaskHandler *cancellationInvocationTaskHandler + cancellationBackoffTaskHandler *cancellationBackoffTaskHandler +} + +func newLibrary( + handler *handler, + operationBackoffTaskHandler *operationBackoffTaskHandler, + operationInvocationTaskHandler *operationInvocationTaskHandler, + operationScheduleToCloseTimeoutTaskHandler *operationScheduleToCloseTimeoutTaskHandler, + operationScheduleToStartTimeoutTaskHandler *operationScheduleToStartTimeoutTaskHandler, + operationStartToCloseTimeoutTaskHandler *operationStartToCloseTimeoutTaskHandler, + cancellationInvocationTaskHandler *cancellationInvocationTaskHandler, + cancellationBackoffTaskHandler *cancellationBackoffTaskHandler, + dc *dynamicconfig.Collection, +) *Library { + return &Library{ + componentOnlyLibrary: *newComponentOnlyLibrary(dc), + handler: handler, + operationBackoffTaskHandler: operationBackoffTaskHandler, + operationInvocationTaskHandler: operationInvocationTaskHandler, + operationScheduleToCloseTimeoutTaskHandler: operationScheduleToCloseTimeoutTaskHandler, + operationScheduleToStartTimeoutTaskHandler: operationScheduleToStartTimeoutTaskHandler, + operationStartToCloseTimeoutTaskHandler: operationStartToCloseTimeoutTaskHandler, + cancellationInvocationTaskHandler: cancellationInvocationTaskHandler, + cancellationBackoffTaskHandler: cancellationBackoffTaskHandler, + } +} + +func (l *Library) Tasks() []*chasm.RegistrableTask { + return []*chasm.RegistrableTask{ + chasm.NewRegistrableSideEffectTask( + "invocation", + l.operationInvocationTaskHandler, + chasm.WithTaskGroup(TaskGroupName), + ), + chasm.NewRegistrablePureTask("invocationBackoff", l.operationBackoffTaskHandler), + chasm.NewRegistrablePureTask("scheduleToStartTimeout", l.operationScheduleToStartTimeoutTaskHandler), + chasm.NewRegistrablePureTask("startToCloseTimeout", l.operationStartToCloseTimeoutTaskHandler), + chasm.NewRegistrablePureTask("scheduleToCloseTimeout", l.operationScheduleToCloseTimeoutTaskHandler), + chasm.NewRegistrableSideEffectTask( + "cancellation", + l.cancellationInvocationTaskHandler, + chasm.WithTaskGroup(TaskGroupName), + ), + chasm.NewRegistrablePureTask("cancellationBackoff", l.cancellationBackoffTaskHandler), + } +} + +func (l *Library) RegisterServices(server *grpc.Server) { + server.RegisterService(&nexusoperationpb.NexusOperationService_ServiceDesc, l.handler) +} diff --git a/chasm/lib/nexusoperation/metrics.go b/chasm/lib/nexusoperation/metrics.go new file mode 100644 index 00000000000..64f09da06e5 --- /dev/null +++ b/chasm/lib/nexusoperation/metrics.go @@ -0,0 +1,63 @@ +package nexusoperation + +import ( + "go.temporal.io/server/common/metrics" +) + +var OutboundRequestCounter = metrics.NewCounterDef( + "nexus_outbound_requests", + metrics.WithDescription("The number of Nexus outbound requests made by the history service."), +) +var OutboundRequestLatency = metrics.NewTimerDef( + "nexus_outbound_latency", + metrics.WithDescription("Latency of outbound Nexus requests made by the history service."), +) +var NexusOperationSuccessCount = metrics.NewCounterDef( + "nexus_operation_success", + metrics.WithDescription("Nexus Operations successfully completed."), +) +var NexusOperationFailedCount = metrics.NewCounterDef( + "nexus_operation_fail", + metrics.WithDescription("Nexus Operations failures."), +) +var NexusOperationCancelCount = metrics.NewCounterDef( + "nexus_operation_cancel", + metrics.WithDescription("Nexus Operations cancellations."), +) +var NexusOperationTerminateCount = metrics.NewCounterDef( + "nexus_operation_terminate", + metrics.WithDescription("Nexus Operations that were terminated before completion."), +) +var NexusOperationTimeoutCount = metrics.NewCounterDef( + "nexus_operation_timeout", + metrics.WithDescription("Nexus Operations that timed out before completion."), +) + +var NexusOperationScheduleToCloseLatency = metrics.NewTimerDef( + "nexus_operation_schedule_to_close_latency", + metrics.WithDescription("Duration from Nexus Operation scheduled time to terminal state."), +) +var NexusOperationScheduleToStartLatency = metrics.NewTimerDef( + "nexus_operation_schedule_to_start_latency", + metrics.WithDescription("Duration from Nexus Operation scheduled time to started time."), +) +var NexusOperationStartToCloseLatency = metrics.NewTimerDef( + "nexus_operation_start_to_close_latency", + metrics.WithDescription("Duration from Nexus Operation started time to completed time. Only emitted for async operations."), +) + +type NexusMetricTagConfig struct { + // Include service name as a metric tag. Used for caller and handler metrics. + IncludeServiceTag bool + // Include operation name as a metric tag. Used for caller and handler metrics. + IncludeOperationTag bool + // Configuration for mapping request headers to metric tags. Only used for handler metrics. + HeaderTagMappings []NexusHeaderTagMapping +} + +type NexusHeaderTagMapping struct { + // Name of the request header to extract value from + SourceHeader string + // Name of the metric tag to set with the header value + TargetTag string +} diff --git a/chasm/lib/nexusoperation/operation.go b/chasm/lib/nexusoperation/operation.go new file mode 100644 index 00000000000..3f730ebafe3 --- /dev/null +++ b/chasm/lib/nexusoperation/operation.go @@ -0,0 +1,699 @@ +package nexusoperation + +import ( + "fmt" + "strings" + "time" + + "github.com/google/uuid" + "github.com/nexus-rpc/sdk-go/nexus" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + nexuspb "go.temporal.io/api/nexus/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/metrics" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/softassert" + queueserrors "go.temporal.io/server/service/history/queues/errors" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + EndpointSearchAttribute = chasm.NewSearchAttributeKeyword("Endpoint", chasm.SearchAttributeFieldKeyword01) + ServiceSearchAttribute = chasm.NewSearchAttributeKeyword("Service", chasm.SearchAttributeFieldKeyword02) + OperationSearchAttribute = chasm.NewSearchAttributeKeyword("Operation", chasm.SearchAttributeFieldKeyword03) + RequestIDSearchAttribute = chasm.NewSearchAttributeKeyword("RequestId", chasm.SearchAttributeFieldKeyword04) + StatusSearchAttribute = chasm.NewSearchAttributeKeyword("ExecutionStatus", chasm.SearchAttributeFieldLowCardinalityKeyword01) +) + +var _ chasm.Component = (*Operation)(nil) +var _ chasm.RootComponent = (*Operation)(nil) +var _ chasm.StateMachine[nexusoperationpb.OperationStatus] = (*Operation)(nil) +var _ chasm.VisibilitySearchAttributesProvider = (*Operation)(nil) +var _ chasm.NexusCompletionHandler = (*Operation)(nil) + +// ErrCancellationAlreadyRequested is returned when a cancellation has already been requested for an operation. +var ErrCancellationAlreadyRequested = serviceerror.NewFailedPrecondition("cancellation already requested") + +// ErrOperationAlreadyCompleted is returned when trying to cancel an operation that has already completed. +var ErrOperationAlreadyCompleted = serviceerror.NewFailedPrecondition("operation already completed") + +const ( + // standaloneOperationWorkflowTypeName is the workflow type for tagging standalone operations. + // Used as the WorkflowTypeTag in metrics emitted from standalone operations. + // Do not change. It is exposed in metrics. + standaloneOperationWorkflowTypeName = "__temporal_standalone_nexus_operation__" +) + +// InvocationData contains data needed to invoke a Nexus operation. +type InvocationData struct { + // Input is the operation input payload. + Input *commonpb.Payload + // Header contains the Nexus headers for the operation. + Header map[string]string + // NexusLinks are the links to the caller(s) that scheduled this operation. + NexusLinks []nexus.Link +} + +// TaskGroupName groups invocation and cancellation together for the outbound queue +const TaskGroupName = "nexus" + +// OperationStore defines the interface that must be implemented by any parent component that wants to manage Nexus operations. +// It's the responsibility of the parent component to apply the appropriate state transitions to the operation. +type OperationStore interface { + OnNexusOperationStarted(ctx chasm.MutableContext, operation *Operation, operationToken string, startTime *time.Time, links []*commonpb.Link) error + OnNexusOperationCanceled(ctx chasm.MutableContext, operation *Operation, cause *failurepb.Failure) error + OnNexusOperationFailed(ctx chasm.MutableContext, operation *Operation, cause *failurepb.Failure) error + OnNexusOperationTimedOut(ctx chasm.MutableContext, operation *Operation, cause *failurepb.Failure, fromAttempt bool) error + OnNexusOperationCompleted(ctx chasm.MutableContext, operation *Operation, result *commonpb.Payload, links []*commonpb.Link) error + OnNexusOperationCancellationCompleted(ctx chasm.MutableContext, operation *Operation) error + OnNexusOperationCancellationFailed(ctx chasm.MutableContext, operation *Operation, cause *failurepb.Failure) error + // NexusOperationInvocationData loads invocation data (Input, Header, NexusLinks) from the scheduled history event. + NexusOperationInvocationData(ctx chasm.Context, operation *Operation) (InvocationData, error) + WorkflowTypeName() string +} + +// Operation is a CHASM component that represents a Nexus operation. +type Operation struct { + chasm.UnimplementedComponent + + // Persisted internal state + *nexusoperationpb.OperationState + + // Pointer to an implementation of the "store". For a workflow-based Nexus operation + // this is a parent pointer back to the workflow. For a standalone Nexus operation this is nil. + Store chasm.ParentPtr[OperationStore] + + RequestData chasm.Field[*nexusoperationpb.OperationRequestData] + // Cancellation is a child component that manages sending the cancel request to the Nexus endpoint. + // Created when cancellation is requested, nil otherwise. + Cancellation chasm.Field[*Cancellation] + Outcome chasm.Field[*nexusoperationpb.OperationOutcome] + Visibility chasm.Field[*chasm.Visibility] +} + +// NewOperation creates a new Operation component with the given persisted state. +func NewOperation(state *nexusoperationpb.OperationState) *Operation { + return &Operation{OperationState: state} +} + +func newStandaloneOperation( + ctx chasm.MutableContext, + req *nexusoperationpb.StartNexusOperationRequest, +) (*Operation, error) { + frontendReq := req.GetFrontendRequest() + op := NewOperation(&nexusoperationpb.OperationState{ + EndpointId: req.GetEndpointId(), + Endpoint: frontendReq.GetEndpoint(), + Service: frontendReq.GetService(), + Operation: frontendReq.GetOperation(), + ScheduleToCloseTimeout: frontendReq.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: frontendReq.GetScheduleToStartTimeout(), + StartToCloseTimeout: frontendReq.GetStartToCloseTimeout(), + ScheduledTime: timestamppb.New(ctx.Now(nil)), + RequestId: uuid.NewString(), + }) + op.RequestData = chasm.NewDataField(ctx, &nexusoperationpb.OperationRequestData{ + Input: frontendReq.GetInput(), + NexusHeader: frontendReq.GetNexusHeader(), + UserMetadata: frontendReq.GetUserMetadata(), + Identity: frontendReq.GetIdentity(), + }) + op.Visibility = chasm.NewComponentField(ctx, chasm.NewVisibilityWithData( + ctx, + frontendReq.GetSearchAttributes().GetIndexedFields(), + nil, + )) + if err := TransitionScheduled.Apply(op, ctx, EventScheduled{}); err != nil { + return nil, err + } + return op, nil +} + +// LifecycleState maps the operation's status to a CHASM lifecycle state. +func (o *Operation) LifecycleState(_ chasm.Context) chasm.LifecycleState { + switch o.Status { + case nexusoperationpb.OPERATION_STATUS_SUCCEEDED: + return chasm.LifecycleStateCompleted + case nexusoperationpb.OPERATION_STATUS_FAILED, + nexusoperationpb.OPERATION_STATUS_CANCELED, + nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + nexusoperationpb.OPERATION_STATUS_TERMINATED: + return chasm.LifecycleStateFailed + default: + return chasm.LifecycleStateRunning + } +} + +func (o *Operation) ContextMetadata(_ chasm.Context) map[string]string { + return nil +} + +// StateMachineState returns the current operation status. +func (o *Operation) StateMachineState() nexusoperationpb.OperationStatus { + return o.Status +} + +// SetStateMachineState sets the operation status. +func (o *Operation) SetStateMachineState(status nexusoperationpb.OperationStatus) { + o.Status = status +} + +// RequestCancel requests cancellation of the operation. It creates a Cancellation child component and, if the +// operation has already started, schedules the cancellation request to be sent to the Nexus endpoint. +func (o *Operation) RequestCancel( + ctx chasm.MutableContext, + req *nexusoperationpb.CancellationState, +) error { + if !TransitionCanceled.Possible(o) { + return ErrOperationAlreadyCompleted + } + + if existingCancellation, ok := o.Cancellation.TryGet(ctx); ok { + existingReqID := existingCancellation.GetRequestId() + newReqID := req.GetRequestId() + if existingReqID != newReqID { + return fmt.Errorf("%w with request ID %s", ErrCancellationAlreadyRequested, existingReqID) + } + return nil + } + + cancel := newCancellation(req) + o.Cancellation = chasm.NewComponentField(ctx, cancel) + // Once started, the handler returns a token that can be used in the cancellation request. + // Until then, no need to schedule the cancellation. + if o.Status == nexusoperationpb.OPERATION_STATUS_STARTED { + return TransitionCancellationScheduled.Apply(cancel, ctx, EventCancellationScheduled{ + Destination: o.GetEndpoint(), + }) + } + return nil +} + +// onStarted applies the started transition or delegates to the store if one is present. +func (o *Operation) onStarted(ctx chasm.MutableContext, operationToken string, startTime *time.Time, links []*commonpb.Link) error { + if store, ok := o.Store.TryGet(ctx); ok { + return store.OnNexusOperationStarted(ctx, o, operationToken, startTime, links) + } + o.Links = append(o.Links, links...) + return TransitionStarted.Apply(o, ctx, EventStarted{ + OperationToken: operationToken, + StartTime: startTime, + }) +} + +// onCompleted applies the succeeded transition or delegates to the store if one is present. +func (o *Operation) onCompleted(ctx chasm.MutableContext, result *commonpb.Payload, links []*commonpb.Link) error { + if store, ok := o.Store.TryGet(ctx); ok { + return store.OnNexusOperationCompleted(ctx, o, result, links) + } + o.Links = append(o.Links, links...) + return TransitionSucceeded.Apply(o, ctx, EventSucceeded{Result: result}) +} + +// onFailed applies the failed transition or delegates to the store if one is present. +func (o *Operation) onFailed(ctx chasm.MutableContext, cause *failurepb.Failure) error { + if store, ok := o.Store.TryGet(ctx); ok { + return store.OnNexusOperationFailed(ctx, o, cause) + } + return TransitionFailed.Apply(o, ctx, EventFailed{Failure: cause}) +} + +// onCanceled applies the canceled transition or delegates to the store if one is present. +func (o *Operation) onCanceled(ctx chasm.MutableContext, cause *failurepb.Failure) error { + if store, ok := o.Store.TryGet(ctx); ok { + return store.OnNexusOperationCanceled(ctx, o, cause) + } + return TransitionCanceled.Apply(o, ctx, EventCanceled{Failure: cause}) +} + +// onTimedOut applies the timed out transition or delegates to the store if one is present. +func (o *Operation) onTimedOut(ctx chasm.MutableContext, cause *failurepb.Failure, fromAttempt bool) error { + if store, ok := o.Store.TryGet(ctx); ok { + return store.OnNexusOperationTimedOut(ctx, o, cause, fromAttempt) + } + return TransitionTimedOut.Apply(o, ctx, EventTimedOut{ + Failure: cause, + FromAttempt: fromAttempt, + }) +} + +// HandleNexusCompletion handles the outcome of an asynchronous completion callback. +func (o *Operation) HandleNexusCompletion( + ctx chasm.MutableContext, + completion *persistencespb.ChasmNexusCompletion, +) error { + // Request ID lets us reject a stale or misrouted completion. + if completion.GetRequestId() != "" && o.GetRequestId() != completion.GetRequestId() { + return serviceerror.NewNotFound("operation not found") + } + + links := completion.GetLinks() + + // For completion-before-start, apply the started transition first. + if o.GetStatus() == nexusoperationpb.OPERATION_STATUS_SCHEDULED { + startTime := timestamp.TimeValuePtr(completion.GetStartTime()) + if err := o.onStarted(ctx, completion.GetOperationToken(), startTime, links); err != nil { + return err + } + // Links belong only to the synthetic started event. + links = nil + } + + switch outcome := completion.Outcome.(type) { + case *persistencespb.ChasmNexusCompletion_Success: + return o.onCompleted(ctx, outcome.Success, nil) + case *persistencespb.ChasmNexusCompletion_Failure: + if outcome.Failure.GetCanceledFailureInfo() != nil { + return o.onCanceled(ctx, outcome.Failure) + } + return o.onFailed(ctx, outcome.Failure) + default: + return serviceerror.NewInvalidArgument("invalid completion outcome") + } +} + +// loadStartArgs is a ReadComponent callback that loads the start arguments from the operation. +func (o *Operation) loadStartArgs( + ctx chasm.Context, + _ chasm.NoValue, +) (startArgs, error) { + var ( + invocationData InvocationData + err error + ) + if store, ok := o.Store.TryGet(ctx); ok { + invocationData, err = store.NexusOperationInvocationData(ctx, o) + if err != nil { + return startArgs{}, err + } + } else { + requestData := o.RequestData.Get(ctx) + invocationData = InvocationData{ + Input: requestData.GetInput(), + Header: requestData.GetNexusHeader(), + } + } + invocationData.NexusLinks = append(invocationData.NexusLinks, + commonnexus.ConvertLinkNexusOperationToNexusLink(&commonpb.Link_NexusOperation{ + Namespace: ctx.NamespaceEntry().Name().String(), + OperationId: ctx.ExecutionKey().BusinessID, + RunId: ctx.ExecutionKey().RunID, + })) + + serializedRef, err := ctx.Ref(o) + if err != nil { + return startArgs{}, err + } + + return startArgs{ + endpointName: o.GetEndpoint(), + endpointID: o.GetEndpointId(), + service: o.GetService(), + operation: o.GetOperation(), + requestID: o.GetRequestId(), + currentTime: ctx.Now(o), + scheduledTime: o.GetScheduledTime().AsTime(), + scheduleToCloseTimeout: o.GetScheduleToCloseTimeout().AsDuration(), + scheduleToStartTimeout: o.GetScheduleToStartTimeout().AsDuration(), + startToCloseTimeout: o.GetStartToCloseTimeout().AsDuration(), + payload: invocationData.Input, + header: invocationData.Header, + nexusLinks: invocationData.NexusLinks, + serializedRef: serializedRef, + }, nil +} + +// saveInvocationResultInput is the input to the Operation.saveInvocationResult method used in UpdateComponent. +type saveInvocationResultInput struct { + result invocationResult + retryPolicy backoff.RetryPolicy +} + +// saveInvocationResult handles the outcome of the initial start call. +func (o *Operation) saveInvocationResult( + ctx chasm.MutableContext, + input saveInvocationResultInput, +) (chasm.NoValue, error) { + switch r := input.result.(type) { + case invocationResultOK: + links := convertResponseLinks(r.response.Links, ctx.Logger()) + if r.response.Pending != nil { + // An async operation transitions to STARTED here; + // HandleNexusCompletion will apply its outcome from the completion callback. + return nil, o.onStarted(ctx, r.response.Pending.Token, nil, links) + } + return nil, o.onCompleted(ctx, r.response.Successful, links) + case invocationResultCancel: + return nil, o.onCanceled(ctx, r.failure) + case invocationResultFail: + return nil, o.onFailed(ctx, r.failure) + case invocationResultTimeout: + return nil, o.onTimedOut(ctx, r.failure, true) + case invocationResultRetry: + return nil, transitionAttemptFailed.Apply(o, ctx, EventAttemptFailed{ + Failure: r.failure, + RetryPolicy: input.retryPolicy, + }) + default: + return nil, queueserrors.NewUnprocessableTaskError(fmt.Sprintf("unrecognized invocation result %T", r)) + } +} + +// resolveUnsuccessfully finalizes the operation. When fromAttempt is true, the failure is recorded as +// LastAttemptFailure. Otherwise the failure is recorded as the terminal Outcome. +func (o *Operation) resolveUnsuccessfully(ctx chasm.MutableContext, failure *failurepb.Failure, closeTime time.Time, fromAttempt bool) error { + softassert.That(ctx.Logger(), failure != nil, "resolveUnsuccessfully called with nil failure") + if fromAttempt { + o.LastAttemptCompleteTime = timestamppb.New(ctx.Now(o)) + o.LastAttemptFailure = failure + } else { + o.getOrCreateOutcome(ctx).Variant = &nexusoperationpb.OperationOutcome_Failed_{ + Failed: &nexusoperationpb.OperationOutcome_Failed{Failure: failure}, + } + } + o.ClosedTime = timestamppb.New(closeTime) + + // NextAttemptScheduleTime is only valid in BACKING_OFF; clear on close + o.NextAttemptScheduleTime = nil + return nil +} + +func (o *Operation) getOrCreateOutcome(ctx chasm.MutableContext) *nexusoperationpb.OperationOutcome { + if outcome, ok := o.Outcome.TryGet(ctx); ok { + return outcome + } + outcome := &nexusoperationpb.OperationOutcome{} + o.Outcome = chasm.NewDataField(ctx, outcome) + return outcome +} + +func (o *Operation) Terminate( + ctx chasm.MutableContext, + req chasm.TerminateComponentRequest, +) (chasm.TerminateComponentResponse, error) { + if o.GetTerminateState() != nil { + if existingReqID := o.TerminateState.GetRequestId(); existingReqID != req.RequestID { + return chasm.TerminateComponentResponse{}, + serviceerror.NewFailedPreconditionf("already terminated with request ID %s", existingReqID) + } + return chasm.TerminateComponentResponse{}, nil + } + + return chasm.TerminateComponentResponse{}, TransitionTerminated.Apply(o, ctx, EventTerminated{ + TerminateComponentRequest: req, + }) +} + +func (o *Operation) SearchAttributes(_ chasm.Context) []chasm.SearchAttributeKeyValue { + return []chasm.SearchAttributeKeyValue{ + EndpointSearchAttribute.Value(o.Endpoint), + ServiceSearchAttribute.Value(o.Service), + OperationSearchAttribute.Value(o.Operation), + RequestIDSearchAttribute.Value(o.RequestId), + StatusSearchAttribute.Value(operationExecutionStatus(o.Status).String()), + } +} + +func (o *Operation) buildDescribeResponse( + ctx chasm.Context, + req *nexusoperationpb.DescribeNexusOperationRequest, +) (*nexusoperationpb.DescribeNexusOperationResponse, error) { + token, err := ctx.Ref(o) + if err != nil { + return nil, err + } + + resp := &workflowservice.DescribeNexusOperationExecutionResponse{ + RunId: ctx.ExecutionKey().RunID, + Info: o.buildExecutionInfo(ctx), + LongPollToken: token, + } + if req.GetFrontendRequest().GetIncludeInput() { + resp.Input = o.RequestData.Get(ctx).GetInput() + } + if req.GetFrontendRequest().GetIncludeOutcome() && o.isClosed() { + if successful, failure := o.outcome(ctx); successful != nil { + resp.Outcome = &workflowservice.DescribeNexusOperationExecutionResponse_Result{ + Result: successful, + } + } else if failure != nil { + resp.Outcome = &workflowservice.DescribeNexusOperationExecutionResponse_Failure{ + Failure: failure, + } + } + } + return &nexusoperationpb.DescribeNexusOperationResponse{FrontendResponse: resp}, nil +} + +func (o *Operation) buildPollResponse( + ctx chasm.Context, +) *nexusoperationpb.PollNexusOperationResponse { + resp := &workflowservice.PollNexusOperationExecutionResponse{ + RunId: ctx.ExecutionKey().RunID, + OperationToken: o.OperationToken, + } + + if o.isClosed() { + resp.WaitStage = enumspb.NEXUS_OPERATION_WAIT_STAGE_CLOSED + if successful, failure := o.outcome(ctx); successful != nil { + resp.Outcome = &workflowservice.PollNexusOperationExecutionResponse_Result{ + Result: successful, + } + } else if failure != nil { + resp.Outcome = &workflowservice.PollNexusOperationExecutionResponse_Failure{ + Failure: failure, + } + } + } else { + resp.WaitStage = enumspb.NEXUS_OPERATION_WAIT_STAGE_STARTED + } + + return &nexusoperationpb.PollNexusOperationResponse{ + FrontendResponse: resp, + } +} + +func (o *Operation) outcome(ctx chasm.Context) (*commonpb.Payload, *failurepb.Failure) { + if !o.isClosed() { + return nil, nil + } + + outcome, hasOutcome := o.Outcome.TryGet(ctx) + + switch { + case !hasOutcome: + return nil, o.LastAttemptFailure + case outcome.GetSuccessful() != nil: + return outcome.GetSuccessful().GetResult(), nil + case outcome.GetFailed() != nil: + return nil, outcome.GetFailed().GetFailure() + default: + softassert.Fail(ctx.Logger(), "operation outcome has no variant set") + return nil, o.LastAttemptFailure + } +} + +func (o *Operation) isWaitStageReached(_ chasm.Context, waitStage enumspb.NexusOperationWaitStage) bool { + switch waitStage { + case enumspb.NEXUS_OPERATION_WAIT_STAGE_STARTED: + return o.Status == nexusoperationpb.OPERATION_STATUS_STARTED || o.isClosed() + case enumspb.NEXUS_OPERATION_WAIT_STAGE_CLOSED: + return o.isClosed() + default: + return false + } +} + +func (o *Operation) isClosed() bool { + return o.LifecycleState(nil).IsClosed() +} + +func (o *Operation) buildExecutionInfo(ctx chasm.Context) *nexuspb.NexusOperationExecutionInfo { + requestData := o.RequestData.Get(ctx) + key := ctx.ExecutionKey() + info := &nexuspb.NexusOperationExecutionInfo{ + OperationId: key.BusinessID, + RunId: key.RunID, + Endpoint: o.Endpoint, + Service: o.Service, + Operation: o.Operation, + Status: operationExecutionStatus(o.Status), + State: PendingOperationState(o.Status), + ScheduleToCloseTimeout: o.ScheduleToCloseTimeout, + ScheduleToStartTimeout: o.ScheduleToStartTimeout, + StartToCloseTimeout: o.StartToCloseTimeout, + Attempt: o.Attempt, + ScheduleTime: o.ScheduledTime, + LastAttemptCompleteTime: o.LastAttemptCompleteTime, + LastAttemptFailure: o.LastAttemptFailure, + NextAttemptScheduleTime: o.NextAttemptScheduleTime, + RequestId: o.RequestId, + OperationToken: o.OperationToken, + StateTransitionCount: ctx.ExecutionInfo().StateTransitionCount, + SearchAttributes: &commonpb.SearchAttributes{ + IndexedFields: o.Visibility.Get(ctx).CustomSearchAttributes(ctx), + }, + NexusHeader: requestData.GetNexusHeader(), + UserMetadata: requestData.GetUserMetadata(), + Links: o.Links, + Identity: requestData.GetIdentity(), + } + + if o.ScheduledTime != nil { + if o.ScheduleToCloseTimeout != nil { + info.ExpirationTime = timestamppb.New(o.ScheduledTime.AsTime().Add(o.ScheduleToCloseTimeout.AsDuration())) + } + if closeTime := o.closeTime(ctx); closeTime != nil { + info.CloseTime = closeTime + info.ExecutionDuration = durationpb.New(closeTime.AsTime().Sub(o.ScheduledTime.AsTime())) + } else { + info.ExecutionDuration = durationpb.New(ctx.Now(o).Sub(o.ScheduledTime.AsTime())) + } + } + + return info +} + +// metricsHandler returns a metrics handler enriched with nexus operation tags. +func (o *Operation) metricsHandler(ctx chasm.Context) metrics.Handler { + namespaceName := ctx.NamespaceEntry().Name().String() + + wftt := standaloneOperationWorkflowTypeName + if store, ok := o.Store.TryGet(ctx); ok { + wftt = store.WorkflowTypeName() + } + tags := []metrics.Tag{ + metrics.NamespaceTag(namespaceName), + metrics.NexusEndpointTag(o.GetEndpoint()), + metrics.WorkflowTypeTag(wftt), + } + + opCtx, ok := ctx.Value(OperationContextKey).(*OperationContext) + if !ok { + softassert.Fail(ctx.Logger(), "operation context missing") + } else { + conf := opCtx.MetricTagConfig() + if conf.IncludeServiceTag { + tags = append(tags, metrics.NexusServiceTag(o.GetService())) + } + if conf.IncludeOperationTag { + tags = append(tags, metrics.NexusOperationTag(o.GetOperation())) + } + } + + return ctx.MetricsHandler().WithTags(tags...) +} + +func (o *Operation) emitOnSucceededMetrics(ctx chasm.Context, closeTime time.Time) { + outcomeTag := metrics.OutcomeTag( + strings.ToLower(nexusoperationpb.OPERATION_STATUS_SUCCEEDED.String()), + ) + handler := o.metricsHandler(ctx) + NexusOperationSuccessCount.With(handler).Record(1) + o.emitLatencyMetrics(handler, closeTime, outcomeTag) +} + +func (o *Operation) emitOnFailedMetrics(ctx chasm.Context, closeTime time.Time) { + outcomeTag := metrics.OutcomeTag( + strings.ToLower(nexusoperationpb.OPERATION_STATUS_FAILED.String()), + ) + handler := o.metricsHandler(ctx) + NexusOperationFailedCount.With(handler).Record(1) + o.emitLatencyMetrics(handler, closeTime, outcomeTag) +} + +func (o *Operation) emitOnCanceledMetrics(ctx chasm.Context, closeTime time.Time) { + outcomeTag := metrics.OutcomeTag( + strings.ToLower(nexusoperationpb.OPERATION_STATUS_CANCELED.String()), + ) + handler := o.metricsHandler(ctx) + NexusOperationCancelCount.With(handler).Record(1) + o.emitLatencyMetrics(handler, closeTime, outcomeTag) +} + +func (o *Operation) emitOnTimedOutMetrics(ctx chasm.Context, closeTime time.Time, timeoutType string) { + outcomeTag := metrics.OutcomeTag( + strings.ToLower(nexusoperationpb.OPERATION_STATUS_TIMED_OUT.String()), + ) + handler := o.metricsHandler(ctx) + NexusOperationTimeoutCount.With(handler).Record(1, metrics.TimeoutTypeTag(timeoutType)) + o.emitLatencyMetrics(handler, closeTime, outcomeTag) +} + +func (o *Operation) emitOnTerminatedMetrics(ctx chasm.Context, closeTime time.Time) { + outcomeTag := metrics.OutcomeTag( + strings.ToLower(nexusoperationpb.OPERATION_STATUS_TERMINATED.String()), + ) + handler := o.metricsHandler(ctx) + NexusOperationTerminateCount.With(handler).Record(1) + o.emitLatencyMetrics(handler, closeTime, outcomeTag) +} + +// emitLatencyMetrics emits schedule-to-close, schedule-to-start, and start-to-close latencies. +func (o *Operation) emitLatencyMetrics(handler metrics.Handler, closeTime time.Time, outcomeTag metrics.Tag) { + scheduledTime := o.GetScheduledTime().AsTime() + NexusOperationScheduleToCloseLatency.With(handler).Record(closeTime.Sub(scheduledTime), outcomeTag) + + startedTime := o.GetStartedTime() + if startedTime != nil { + // Async operation that was started. + // Schedule-to-start latency is emitted in TransitionStarted. + NexusOperationStartToCloseLatency.With(handler).Record(closeTime.Sub(startedTime.AsTime()), outcomeTag) + } else { + // Sync operation or operation that never started. + // For sync ops, schedule-to-start equals schedule-to-close. + NexusOperationScheduleToStartLatency.With(handler).Record(closeTime.Sub(scheduledTime)) + } +} + +func (o *Operation) closeTime(ctx chasm.Context) *timestamppb.Timestamp { + if !o.LifecycleState(ctx).IsClosed() { + return nil + } + return o.ClosedTime +} + +func operationExecutionStatus(status nexusoperationpb.OperationStatus) enumspb.NexusOperationExecutionStatus { + switch status { + case nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + nexusoperationpb.OPERATION_STATUS_STARTED: + return enumspb.NEXUS_OPERATION_EXECUTION_STATUS_RUNNING + case nexusoperationpb.OPERATION_STATUS_SUCCEEDED: + return enumspb.NEXUS_OPERATION_EXECUTION_STATUS_COMPLETED + case nexusoperationpb.OPERATION_STATUS_FAILED: + return enumspb.NEXUS_OPERATION_EXECUTION_STATUS_FAILED + case nexusoperationpb.OPERATION_STATUS_CANCELED: + return enumspb.NEXUS_OPERATION_EXECUTION_STATUS_CANCELED + case nexusoperationpb.OPERATION_STATUS_TIMED_OUT: + return enumspb.NEXUS_OPERATION_EXECUTION_STATUS_TIMED_OUT + case nexusoperationpb.OPERATION_STATUS_TERMINATED: + return enumspb.NEXUS_OPERATION_EXECUTION_STATUS_TERMINATED + default: + return enumspb.NEXUS_OPERATION_EXECUTION_STATUS_UNSPECIFIED + } +} + +// PendingOperationState maps a nexus operation status to the corresponding pending API state. +// Returns PENDING_NEXUS_OPERATION_STATE_UNSPECIFIED for non-pending or unspecified statuses. +func PendingOperationState(status nexusoperationpb.OperationStatus) enumspb.PendingNexusOperationState { + switch status { + case nexusoperationpb.OPERATION_STATUS_SCHEDULED: + return enumspb.PENDING_NEXUS_OPERATION_STATE_SCHEDULED + case nexusoperationpb.OPERATION_STATUS_BACKING_OFF: + return enumspb.PENDING_NEXUS_OPERATION_STATE_BACKING_OFF + case nexusoperationpb.OPERATION_STATUS_STARTED: + return enumspb.PENDING_NEXUS_OPERATION_STATE_STARTED + default: + return enumspb.PENDING_NEXUS_OPERATION_STATE_UNSPECIFIED + } +} diff --git a/chasm/lib/nexusoperation/operation_statemachine.go b/chasm/lib/nexusoperation/operation_statemachine.go new file mode 100644 index 00000000000..ba6f2498992 --- /dev/null +++ b/chasm/lib/nexusoperation/operation_statemachine.go @@ -0,0 +1,303 @@ +package nexusoperation + +import ( + "time" + + commonpb "go.temporal.io/api/common/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/backoff" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// EventScheduled is triggered when the operation is meant to be scheduled - immediately after initialization. +type EventScheduled struct { +} + +var TransitionScheduled = chasm.NewTransition( + []nexusoperationpb.OperationStatus{nexusoperationpb.OPERATION_STATUS_UNSPECIFIED}, + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + func(o *Operation, ctx chasm.MutableContext, event EventScheduled) error { + o.Attempt++ + // Emit an invocation task to start the operation. + // The destination is the endpoint name, which routes the task to the correct outbound queue. + ctx.AddTask(o, chasm.TaskAttributes{Destination: o.GetEndpoint()}, &nexusoperationpb.InvocationTask{ + Attempt: o.Attempt, + }) + + // Emit a schedule-to-start timeout task if configured + if o.ScheduleToStartTimeout != nil && o.ScheduleToStartTimeout.AsDuration() != 0 { + deadline := o.ScheduledTime.AsTime().Add(o.ScheduleToStartTimeout.AsDuration()) + ctx.AddTask(o, chasm.TaskAttributes{ + ScheduledTime: deadline, + }, &nexusoperationpb.ScheduleToStartTimeoutTask{}) + } + + // Emit a schedule-to-close timeout task if configured + if o.ScheduleToCloseTimeout != nil && o.ScheduleToCloseTimeout.AsDuration() != 0 { + deadline := o.ScheduledTime.AsTime().Add(o.ScheduleToCloseTimeout.AsDuration()) + ctx.AddTask(o, chasm.TaskAttributes{ + ScheduledTime: deadline, + }, &nexusoperationpb.ScheduleToCloseTimeoutTask{}) + } + + return nil + }, +) + +// EventAttemptFailed is triggered when an invocation attempt is failed with a retryable error. +type EventAttemptFailed struct { + Failure *failurepb.Failure + RetryPolicy backoff.RetryPolicy +} + +var transitionAttemptFailed = chasm.NewTransition( + []nexusoperationpb.OperationStatus{nexusoperationpb.OPERATION_STATUS_SCHEDULED}, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + func(o *Operation, ctx chasm.MutableContext, event EventAttemptFailed) error { + currentTime := ctx.Now(o) + + // Record the attempt + o.LastAttemptCompleteTime = timestamppb.New(currentTime) + o.LastAttemptFailure = event.Failure + + // Compute next retry delay + // Use 0 for elapsed time as we don't limit the retry by time (for now) + // The last argument (error) is ignored + nextDelay := event.RetryPolicy.ComputeNextDelay(0, int(o.Attempt), nil) + nextAttemptScheduleTime := currentTime.Add(nextDelay) + o.NextAttemptScheduleTime = timestamppb.New(nextAttemptScheduleTime) + + // Emit a backoff task to retry after the delay + ctx.AddTask(o, chasm.TaskAttributes{ + ScheduledTime: nextAttemptScheduleTime, + }, &nexusoperationpb.InvocationBackoffTask{ + Attempt: o.Attempt, + }) + + return nil + }, +) + +// EventRescheduled is triggered when the operation is meant to be rescheduled after backing off from a previous +// attempt. +type EventRescheduled struct { +} + +var transitionRescheduled = chasm.NewTransition( + []nexusoperationpb.OperationStatus{nexusoperationpb.OPERATION_STATUS_BACKING_OFF}, + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + func(o *Operation, ctx chasm.MutableContext, event EventRescheduled) error { + o.Attempt++ + // Clear the next attempt schedule time + o.NextAttemptScheduleTime = nil + + // Emit a new invocation task for the retry attempt + ctx.AddTask(o, chasm.TaskAttributes{Destination: o.GetEndpoint()}, &nexusoperationpb.InvocationTask{ + Attempt: o.Attempt, + }) + + return nil + }, +) + +// EventStarted is triggered when an invocation attempt succeeds and the handler indicates that it started an +// asynchronous operation. +type EventStarted struct { + OperationToken string + // If not nil, uses the provided time instead of the current component time. + // Used when a completion comes in before start is recorded (rare race). + StartTime *time.Time +} + +var TransitionStarted = chasm.NewTransition( + []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + }, + nexusoperationpb.OPERATION_STATUS_STARTED, + func(o *Operation, ctx chasm.MutableContext, event EventStarted) error { + startTime := ctx.Now(o) + if event.StartTime != nil { + startTime = *event.StartTime + } + + o.StartedTime = timestamppb.New(startTime) + // Also consider this the completion of an attempt even if the task's saveResult call lost the race with + // the async completion. + o.LastAttemptCompleteTime = o.StartedTime + o.LastAttemptFailure = nil + // Clear the next attempt schedule time when leaving BACKING_OFF state. + // This field is only valid in BACKING_OFF state. + o.NextAttemptScheduleTime = nil + + // Store the operation token for async completion. + o.OperationToken = event.OperationToken + + // Emit schedule-to-start latency + metricsHandler := o.metricsHandler(ctx) + NexusOperationScheduleToStartLatency.With(metricsHandler).Record(startTime.Sub(o.GetScheduledTime().AsTime())) + + // Emit a start-to-close timeout task if configured. + if o.StartToCloseTimeout != nil && o.StartToCloseTimeout.AsDuration() != 0 { + deadline := startTime.Add(o.StartToCloseTimeout.AsDuration()) + ctx.AddTask(o, chasm.TaskAttributes{ + ScheduledTime: deadline, + }, &nexusoperationpb.StartToCloseTimeoutTask{}) + } + + // If cancellation was already requested, schedule sending the cancellation request now that we have + // an operation token. + cancellation, ok := o.Cancellation.TryGet(ctx) + if ok && cancellation.StateMachineState() == nexusoperationpb.CANCELLATION_STATUS_UNSPECIFIED { + return TransitionCancellationScheduled.Apply(cancellation, ctx, EventCancellationScheduled{ + Destination: o.GetEndpoint(), + }) + } + + return nil + }, +) + +// EventSucceeded is triggered when an invocation attempt succeeds. +type EventSucceeded struct { + // If not nil, uses the provided time instead of the current component time. + // Used when a completion comes in before start is recorded (rare race). + CompleteTime *time.Time + Result *commonpb.Payload +} + +var TransitionSucceeded = chasm.NewTransition( + []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_STARTED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + }, + nexusoperationpb.OPERATION_STATUS_SUCCEEDED, + func(o *Operation, ctx chasm.MutableContext, event EventSucceeded) error { + // Clear the next attempt schedule time when leaving BACKING_OFF state. This field is only valid in + // BACKING_OFF state. + closeTime := ctx.Now(o) + if event.CompleteTime != nil { + closeTime = *event.CompleteTime + } + o.NextAttemptScheduleTime = nil + o.ClosedTime = timestamppb.New(closeTime) + + o.getOrCreateOutcome(ctx).Variant = &nexusoperationpb.OperationOutcome_Successful_{ + Successful: &nexusoperationpb.OperationOutcome_Successful{Result: event.Result}, + } + + o.emitOnSucceededMetrics(ctx, closeTime) + // Terminal state - no tasks to emit. + return nil + }, +) + +// EventFailed is triggered when an invocation attempt is failed with a non retryable error. +type EventFailed struct { + // If not nil, uses the provided time instead of the current component time. + // Used when a completion comes in before start is recorded (rare race). + CompleteTime *time.Time + Failure *failurepb.Failure +} + +var TransitionFailed = chasm.NewTransition( + []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_STARTED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + }, + nexusoperationpb.OPERATION_STATUS_FAILED, + func(o *Operation, ctx chasm.MutableContext, event EventFailed) error { + closeTime := ctx.Now(o) + if event.CompleteTime != nil { + closeTime = *event.CompleteTime + } + // Attempts only execute in SCHEDULED, so that status identifies attempt-originated failures. + fromAttempt := o.GetStatus() == nexusoperationpb.OPERATION_STATUS_SCHEDULED + o.emitOnFailedMetrics(ctx, closeTime) + return o.resolveUnsuccessfully(ctx, event.Failure, closeTime, fromAttempt) + }, +) + +// EventCanceled is triggered when an operation is completed as canceled. +type EventCanceled struct { + // If not nil, uses the provided time instead of the current component time. + // Used when a completion comes in before start is recorded (rare race). + CompleteTime *time.Time + Failure *failurepb.Failure +} + +var TransitionCanceled = chasm.NewTransition( + []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_STARTED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + }, + nexusoperationpb.OPERATION_STATUS_CANCELED, + func(o *Operation, ctx chasm.MutableContext, event EventCanceled) error { + closeTime := ctx.Now(o) + if event.CompleteTime != nil { + closeTime = *event.CompleteTime + } + o.emitOnCanceledMetrics(ctx, closeTime) + // Attempts only execute in SCHEDULED, so that status identifies attempt-originated cancels. + fromAttempt := o.GetStatus() == nexusoperationpb.OPERATION_STATUS_SCHEDULED + return o.resolveUnsuccessfully(ctx, event.Failure, closeTime, fromAttempt) + }, +) + +// EventTerminated is triggered when the operation is terminated by user request. +type EventTerminated struct { + chasm.TerminateComponentRequest +} + +var TransitionTerminated = chasm.NewTransition( + []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_STARTED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + nexusoperationpb.OPERATION_STATUS_CANCELED, + }, + nexusoperationpb.OPERATION_STATUS_TERMINATED, + func(o *Operation, ctx chasm.MutableContext, event EventTerminated) error { + closeTime := ctx.Now(o) + o.TerminateState = &nexusoperationpb.NexusOperationTerminateState{ + RequestId: event.RequestID, + } + failure := &failurepb.Failure{ + Message: event.Reason, + FailureInfo: &failurepb.Failure_TerminatedFailureInfo{ + TerminatedFailureInfo: &failurepb.TerminatedFailureInfo{ + Identity: event.Identity, + }, + }, + } + o.emitOnTerminatedMetrics(ctx, closeTime) + return o.resolveUnsuccessfully(ctx, failure, closeTime, false) + }, +) + +// EventTimedOut is triggered when a timeout is triggered for an operation. +type EventTimedOut struct { + Failure *failurepb.Failure + // FromAttempt is true when the failure came from an invocation attempt. + FromAttempt bool +} + +var TransitionTimedOut = chasm.NewTransition( + []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_STARTED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + }, + nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + func(o *Operation, ctx chasm.MutableContext, event EventTimedOut) error { + closeTime := ctx.Now(o) + timeoutType := event.Failure.GetTimeoutFailureInfo().GetTimeoutType().String() + o.emitOnTimedOutMetrics(ctx, closeTime, timeoutType) + return o.resolveUnsuccessfully(ctx, event.Failure, closeTime, event.FromAttempt) + }, +) diff --git a/chasm/lib/nexusoperation/operation_statemachine_test.go b/chasm/lib/nexusoperation/operation_statemachine_test.go new file mode 100644 index 00000000000..132586046f6 --- /dev/null +++ b/chasm/lib/nexusoperation/operation_statemachine_test.go @@ -0,0 +1,871 @@ +package nexusoperation + +import ( + "context" + "maps" + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + failurepb "go.temporal.io/api/failure/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/metrics/metricstest" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/testing/protorequire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + defaultTime = time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC) + defaultScheduleToCloseTimeout = 10 * time.Minute + defaultScheduleToStartTimeout = 5 * time.Minute +) + +func newTestOperation() *Operation { + ctx := &chasm.MockMutableContext{} + op := &Operation{ + OperationState: &nexusoperationpb.OperationState{ + Status: nexusoperationpb.OPERATION_STATUS_UNSPECIFIED, + EndpointId: "endpoint-id", + Endpoint: "test-endpoint", + Service: "test-service", + Operation: "test-operation", + ScheduledTime: timestamppb.New(defaultTime), + ScheduleToCloseTimeout: durationpb.New(defaultScheduleToCloseTimeout), + RequestId: "request-id", + Attempt: 0, + }, + } + op.Outcome = chasm.NewDataField(ctx, &nexusoperationpb.OperationOutcome{}) + return op +} + +func TestTransitionScheduled(t *testing.T) { + testCases := []struct { + name string + scheduleToCloseTimeout time.Duration + scheduleToStartTimeout time.Duration + expectedTasks []chasm.MockTask + }{ + { + name: "schedules invocation and schedule-to-close timeout tasks", + scheduleToCloseTimeout: defaultScheduleToCloseTimeout, + expectedTasks: []chasm.MockTask{ + { + Attributes: chasm.TaskAttributes{Destination: "test-endpoint"}, + Payload: &nexusoperationpb.InvocationTask{Attempt: 1}, + }, + { + Attributes: chasm.TaskAttributes{ScheduledTime: defaultTime.Add(defaultScheduleToCloseTimeout)}, + Payload: &nexusoperationpb.ScheduleToCloseTimeoutTask{}, + }, + }, + }, + { + name: "schedules invocation and schedule-to-start timeout tasks", + scheduleToStartTimeout: defaultScheduleToStartTimeout, + expectedTasks: []chasm.MockTask{ + { + Attributes: chasm.TaskAttributes{Destination: "test-endpoint"}, + Payload: &nexusoperationpb.InvocationTask{Attempt: 1}, + }, + { + Attributes: chasm.TaskAttributes{ScheduledTime: defaultTime.Add(defaultScheduleToStartTimeout)}, + Payload: &nexusoperationpb.ScheduleToStartTimeoutTask{}, + }, + }, + }, + { + name: "schedules invocation and both timeout tasks", + scheduleToCloseTimeout: defaultScheduleToCloseTimeout, + scheduleToStartTimeout: defaultScheduleToStartTimeout, + expectedTasks: []chasm.MockTask{ + { + Attributes: chasm.TaskAttributes{Destination: "test-endpoint"}, + Payload: &nexusoperationpb.InvocationTask{Attempt: 1}, + }, + { + Attributes: chasm.TaskAttributes{ScheduledTime: defaultTime.Add(defaultScheduleToStartTimeout)}, + Payload: &nexusoperationpb.ScheduleToStartTimeoutTask{}, + }, + { + Attributes: chasm.TaskAttributes{ScheduledTime: defaultTime.Add(defaultScheduleToCloseTimeout)}, + Payload: &nexusoperationpb.ScheduleToCloseTimeoutTask{}, + }, + }, + }, + { + name: "schedules only invocation task when no timeouts set", + expectedTasks: []chasm.MockTask{ + { + Attributes: chasm.TaskAttributes{Destination: "test-endpoint"}, + Payload: &nexusoperationpb.InvocationTask{Attempt: 1}, + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + operation := newTestOperation() + operation.ScheduleToCloseTimeout = durationpb.New(tc.scheduleToCloseTimeout) + operation.ScheduleToStartTimeout = durationpb.New(tc.scheduleToStartTimeout) + + err := TransitionScheduled.Apply(operation, ctx, EventScheduled{}) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SCHEDULED, operation.Status) + + require.Len(t, ctx.Tasks, len(tc.expectedTasks)) + for i, expectedTask := range tc.expectedTasks { + actualTask := ctx.Tasks[i] + require.Equal(t, expectedTask.Attributes, actualTask.Attributes) + protorequire.ProtoEqual(t, expectedTask.Payload.(proto.Message), actualTask.Payload.(proto.Message)) + } + }) + } +} + +func TestTransitionAttemptFailed(t *testing.T) { + testCases := []struct { + name string + startingAttemptCount int32 + expectedAttempt int32 + minRetryInterval time.Duration + maxRetryInterval time.Duration + retryPolicy backoff.RetryPolicy + }{ + { + name: "first retry", + startingAttemptCount: 1, + expectedAttempt: 1, + minRetryInterval: 500 * time.Millisecond, // With jitter, minimum is ~50% of base + maxRetryInterval: 1500 * time.Millisecond, // With jitter, maximum is ~150% of base + retryPolicy: backoff.NewExponentialRetryPolicy(time.Second), + }, + { + name: "second retry", + startingAttemptCount: 2, + expectedAttempt: 2, + minRetryInterval: 1 * time.Second, + maxRetryInterval: 3 * time.Second, + retryPolicy: backoff.NewExponentialRetryPolicy(time.Second), + }, + { + name: "third retry", + startingAttemptCount: 3, + expectedAttempt: 3, + minRetryInterval: 2 * time.Second, + maxRetryInterval: 6 * time.Second, + retryPolicy: backoff.NewExponentialRetryPolicy(time.Second), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + operation := newTestOperation() + operation.Attempt = tc.startingAttemptCount + operation.Status = nexusoperationpb.OPERATION_STATUS_SCHEDULED + + failure := &failurepb.Failure{ + Message: "test failure", + } + + event := EventAttemptFailed{ + Failure: failure, + RetryPolicy: tc.retryPolicy, + } + + err := transitionAttemptFailed.Apply(operation, ctx, event) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_BACKING_OFF, operation.Status) + require.Equal(t, tc.expectedAttempt, operation.Attempt) + require.Equal(t, defaultTime, operation.LastAttemptCompleteTime.AsTime()) + require.Equal(t, failure, operation.LastAttemptFailure) + require.NotNil(t, operation.NextAttemptScheduleTime) + require.True(t, operation.NextAttemptScheduleTime.AsTime().After(defaultTime)) + + // Verify retry interval is within expected range (due to jitter) + actualInterval := operation.NextAttemptScheduleTime.AsTime().Sub(defaultTime) + require.GreaterOrEqual(t, actualInterval, tc.minRetryInterval, "retry interval %v should be >= %v", actualInterval, tc.minRetryInterval) + require.LessOrEqual(t, actualInterval, tc.maxRetryInterval, "retry interval %v should be <= %v", actualInterval, tc.maxRetryInterval) + + // Verify backoff task + require.Len(t, ctx.Tasks, 1) + backoffTask, ok := ctx.Tasks[0].Payload.(*nexusoperationpb.InvocationBackoffTask) + require.True(t, ok, "expected InvocationBackoffTask") + require.Equal(t, tc.expectedAttempt, backoffTask.Attempt) + require.Equal(t, operation.NextAttemptScheduleTime.AsTime(), ctx.Tasks[0].Attributes.ScheduledTime) + }) + } +} + +func TestTransitionRescheduled(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + operation := newTestOperation() + operation.Status = nexusoperationpb.OPERATION_STATUS_BACKING_OFF + operation.Attempt = 2 + operation.NextAttemptScheduleTime = timestamppb.New(defaultTime.Add(time.Minute)) + + event := EventRescheduled{} + + err := transitionRescheduled.Apply(operation, ctx, event) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SCHEDULED, operation.Status) + require.Equal(t, int32(3), operation.Attempt) + + // Verify NextAttemptScheduleTime was cleared + require.Nil(t, operation.NextAttemptScheduleTime) + + // Verify invocation task + require.Len(t, ctx.Tasks, 1) + invTask, ok := ctx.Tasks[0].Payload.(*nexusoperationpb.InvocationTask) + require.True(t, ok, "expected InvocationTask") + require.Equal(t, int32(3), invTask.Attempt) +} + +func TestTransitionStarted(t *testing.T) { + defaultStartToCloseTimeout := 5 * time.Minute + customStartTime := defaultTime.Add(time.Minute) + + testCases := []struct { + name string + startToCloseTimeout time.Duration + startTime *time.Time + pendingCancellation bool + expectedTasks []chasm.MockTask + }{ + { + name: "emits start-to-close timeout task", + startToCloseTimeout: defaultStartToCloseTimeout, + expectedTasks: []chasm.MockTask{ + { + Attributes: chasm.TaskAttributes{ScheduledTime: defaultTime.Add(defaultStartToCloseTimeout)}, + Payload: &nexusoperationpb.StartToCloseTimeoutTask{}, + }, + }, + }, + { + name: "start-to-close timeout uses event StartTime", + startToCloseTimeout: defaultStartToCloseTimeout, + startTime: &customStartTime, + expectedTasks: []chasm.MockTask{ + { + Attributes: chasm.TaskAttributes{ScheduledTime: customStartTime.Add(defaultStartToCloseTimeout)}, + Payload: &nexusoperationpb.StartToCloseTimeoutTask{}, + }, + }, + }, + { + name: "schedules pending cancellation", + pendingCancellation: true, + expectedTasks: []chasm.MockTask{ + { + Attributes: chasm.TaskAttributes{Destination: "test-endpoint"}, + Payload: &nexusoperationpb.CancellationTask{Attempt: 1}, + }, + }, + }, + { + name: "no tasks when timeout not set", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0, + ) + }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{}), + }), + }, + } + + operation := newTestOperation() + operation.Status = nexusoperationpb.OPERATION_STATUS_SCHEDULED + operation.Attempt = 1 + operation.StartToCloseTimeout = durationpb.New(tc.startToCloseTimeout) + if tc.pendingCancellation { + cancellation := newCancellation( + &nexusoperationpb.CancellationState{ + Status: nexusoperationpb.CANCELLATION_STATUS_UNSPECIFIED, + RequestedTime: timestamppb.New(defaultTime), + }, + ) + cancellation.Operation = chasm.NewMockParentPtr[*Operation](operation) + operation.Cancellation = chasm.NewComponentField[*Cancellation](nil, cancellation) + } + + err := TransitionStarted.Apply(operation, ctx, EventStarted{ + OperationToken: "test-token", + StartTime: tc.startTime, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_STARTED, operation.Status) + + require.Len(t, ctx.Tasks, len(tc.expectedTasks)) + for i, expectedTask := range tc.expectedTasks { + actualTask := ctx.Tasks[i] + require.Equal(t, expectedTask.Attributes, actualTask.Attributes) + protorequire.ProtoEqual(t, expectedTask.Payload.(proto.Message), actualTask.Payload.(proto.Message)) + } + }) + } +} + +func TestTransitionSucceeded(t *testing.T) { + customCompleteTime := defaultTime.Add(time.Minute) + + testCases := []struct { + name string + completeTime *time.Time + result *commonpb.Payload + expectedClosedTime time.Time + }{ + { + name: "uses default time", + result: mustToPayload(t, "result"), + expectedClosedTime: defaultTime, + }, + { + name: "uses event CompleteTime", + completeTime: &customCompleteTime, + result: mustToPayload(t, "result"), + expectedClosedTime: customCompleteTime, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0, + ) + }, + HandleMetricsHandler: func() metrics.Handler { return metricsHandler }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{ + IncludeServiceTag: true, + IncludeOperationTag: true, + }), + }), + }, + } + + operation := newTestOperation() + operation.Status = nexusoperationpb.OPERATION_STATUS_STARTED + operation.StartedTime = + timestamppb.New(operation.ScheduledTime.AsTime().Add(time.Second)) + + err := TransitionSucceeded.Apply(operation, ctx, EventSucceeded{ + CompleteTime: tc.completeTime, + Result: tc.result, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SUCCEEDED, operation.Status) + require.Equal(t, tc.expectedClosedTime, operation.ClosedTime.AsTime()) + + outcome, ok := operation.Outcome.TryGet(ctx) + require.True(t, ok) + require.NotNil(t, outcome.GetSuccessful()) + protorequire.ProtoEqual(t, tc.result, outcome.GetSuccessful().GetResult()) + require.Empty(t, ctx.Tasks) + + countTags := map[string]string{ + "namespace": "ns-name", + "nexus_endpoint": "test-endpoint", + "nexus_service": "test-service", + "nexus_operation": "test-operation", + "workflowType": standaloneOperationWorkflowTypeName, + } + latencyTags := maps.Clone(countTags) + latencyTags["outcome"] = "succeeded" + + require.Equal(t, metricstest.CaptureSnapshot{ + NexusOperationSuccessCount.Name(): { + {Value: int64(1), Tags: countTags}, + }, + NexusOperationScheduleToCloseLatency.Name(): { + {Value: tc.expectedClosedTime.Sub(operation.ScheduledTime.AsTime()), Tags: latencyTags}, + }, + NexusOperationStartToCloseLatency.Name(): { + {Value: tc.expectedClosedTime.Sub(operation.StartedTime.AsTime()), Tags: latencyTags}, + }, + }, capture.Snapshot()) + }) + } +} + +func TestTransitionFailed(t *testing.T) { + customCompleteTime := defaultTime.Add(time.Minute) + failure := &failurepb.Failure{Message: "test failure"} + + for _, tc := range []struct { + name string + fromStatus nexusoperationpb.OperationStatus + event EventFailed + prepare func(*Operation) + assert func(*testing.T, *chasm.MockMutableContext, *Operation) + }{ + { + name: "from scheduled records last attempt failure", + fromStatus: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + event: EventFailed{Failure: failure}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + protorequire.ProtoEqual(t, failure, operation.LastAttemptFailure) + require.Nil(t, operation.Outcome.Get(ctx).GetVariant()) + }, + }, + { + name: "from non-scheduled stores outcome failure", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + event: EventFailed{Failure: failure}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + protorequire.ProtoEqual(t, failure, operation.Outcome.Get(ctx).GetFailed().GetFailure()) + require.Nil(t, operation.LastAttemptFailure) + }, + }, + { + name: "uses default time", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + event: EventFailed{Failure: failure}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + require.Equal(t, defaultTime, operation.ClosedTime.AsTime()) + }, + }, + { + name: "uses event CompleteTime", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + event: EventFailed{Failure: failure, CompleteTime: &customCompleteTime}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + require.Equal(t, customCompleteTime, operation.ClosedTime.AsTime()) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0, + ) + }, + HandleMetricsHandler: func() metrics.Handler { return metricsHandler }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{ + IncludeServiceTag: true, + IncludeOperationTag: true, + }), + }), + }, + } + + operation := newTestOperation() + operation.Status = tc.fromStatus + if tc.prepare != nil { + tc.prepare(operation) + } + + err := TransitionFailed.Apply(operation, ctx, tc.event) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_FAILED, operation.Status) + require.Nil(t, operation.NextAttemptScheduleTime) + require.Empty(t, ctx.Tasks) + tc.assert(t, ctx, operation) + + countTags := map[string]string{ + "namespace": "ns-name", + "nexus_endpoint": "test-endpoint", + "nexus_service": "test-service", + "nexus_operation": "test-operation", + "workflowType": standaloneOperationWorkflowTypeName, + } + latencyTags := maps.Clone(countTags) + latencyTags["outcome"] = "failed" + latency := operation.ClosedTime.AsTime().Sub(operation.ScheduledTime.AsTime()) + require.Equal(t, metricstest.CaptureSnapshot{ + NexusOperationFailedCount.Name(): { + {Value: int64(1), Tags: countTags}, + }, + NexusOperationScheduleToCloseLatency.Name(): { + {Value: latency, Tags: latencyTags}, + }, + NexusOperationScheduleToStartLatency.Name(): { + {Value: latency, Tags: countTags}, + }, + }, capture.Snapshot()) + }) + } +} + +func TestTransitionCanceled(t *testing.T) { + customCompleteTime := defaultTime.Add(time.Minute) + failure := &failurepb.Failure{Message: "canceled"} + + for _, tc := range []struct { + name string + fromStatus nexusoperationpb.OperationStatus + event EventCanceled + prepare func(*Operation) + assert func(*testing.T, *chasm.MockMutableContext, *Operation) + }{ + { + name: "from scheduled records last attempt failure", + fromStatus: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + event: EventCanceled{Failure: failure}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + protorequire.ProtoEqual(t, failure, operation.LastAttemptFailure) + require.Nil(t, operation.Outcome.Get(ctx).GetVariant()) + }, + }, + { + name: "from non-scheduled stores outcome failure", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + event: EventCanceled{Failure: failure}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + protorequire.ProtoEqual(t, failure, operation.Outcome.Get(ctx).GetFailed().GetFailure()) + require.Nil(t, operation.LastAttemptFailure) + }, + }, + { + name: "uses default time", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + event: EventCanceled{Failure: failure}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + require.Equal(t, defaultTime, operation.ClosedTime.AsTime()) + }, + }, + { + name: "uses event CompleteTime", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + event: EventCanceled{Failure: failure, CompleteTime: &customCompleteTime}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + require.Equal(t, customCompleteTime, operation.ClosedTime.AsTime()) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0, + ) + }, + HandleMetricsHandler: func() metrics.Handler { return metricsHandler }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{ + IncludeServiceTag: true, + IncludeOperationTag: true, + }), + }), + }, + } + + operation := newTestOperation() + operation.Status = tc.fromStatus + if tc.prepare != nil { + tc.prepare(operation) + } + + err := TransitionCanceled.Apply(operation, ctx, tc.event) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_CANCELED, operation.Status) + require.Nil(t, operation.NextAttemptScheduleTime) + require.Empty(t, ctx.Tasks) + tc.assert(t, ctx, operation) + + countTags := map[string]string{ + "namespace": "ns-name", + "nexus_endpoint": "test-endpoint", + "nexus_service": "test-service", + "nexus_operation": "test-operation", + "workflowType": standaloneOperationWorkflowTypeName, + } + latencyTags := maps.Clone(countTags) + latencyTags["outcome"] = "canceled" + latency := operation.ClosedTime.AsTime().Sub(operation.ScheduledTime.AsTime()) + require.Equal(t, metricstest.CaptureSnapshot{ + NexusOperationCancelCount.Name(): { + {Value: int64(1), Tags: countTags}, + }, + NexusOperationScheduleToCloseLatency.Name(): { + {Value: latency, Tags: latencyTags}, + }, + NexusOperationScheduleToStartLatency.Name(): { + {Value: latency, Tags: countTags}, + }, + }, capture.Snapshot()) + }) + } +} + +func TestTransitionTimedOut(t *testing.T) { + timeoutFailure := &failurepb.Failure{Message: "operation timed out"} + attemptFailure := &failurepb.Failure{Message: "attempt timed out"} + + for _, tc := range []struct { + name string + fromStatus nexusoperationpb.OperationStatus + event EventTimedOut + prepare func(*Operation) + assert func(*testing.T, *chasm.MockMutableContext, *Operation) + }{ + { + name: "when not from attempt stores outcome failure", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + event: EventTimedOut{Failure: timeoutFailure}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + protorequire.ProtoEqual(t, timeoutFailure, operation.Outcome.Get(ctx).GetFailed().GetFailure()) + require.Nil(t, operation.LastAttemptFailure) + require.Nil(t, operation.LastAttemptCompleteTime) + }, + }, + { + name: "when from attempt records last attempt failure", + fromStatus: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + event: EventTimedOut{Failure: attemptFailure, FromAttempt: true}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + protorequire.ProtoEqual(t, attemptFailure, operation.LastAttemptFailure) + require.Nil(t, operation.Outcome.Get(ctx).GetFailed()) + }, + }, + { + name: "uses default time", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + event: EventTimedOut{Failure: timeoutFailure}, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + require.Equal(t, defaultTime, operation.ClosedTime.AsTime()) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0, + ) + }, + HandleMetricsHandler: func() metrics.Handler { return metricsHandler }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{ + IncludeServiceTag: true, + IncludeOperationTag: true, + }), + }), + }, + } + + operation := newTestOperation() + operation.Status = tc.fromStatus + if tc.prepare != nil { + tc.prepare(operation) + } + + err := TransitionTimedOut.Apply(operation, ctx, tc.event) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_TIMED_OUT, operation.Status) + require.Nil(t, operation.NextAttemptScheduleTime) + require.Empty(t, ctx.Tasks) + tc.assert(t, ctx, operation) + + countTags := map[string]string{ + "namespace": "ns-name", + "nexus_endpoint": "test-endpoint", + "nexus_service": "test-service", + "nexus_operation": "test-operation", + "workflowType": standaloneOperationWorkflowTypeName, + } + timeoutCountTags := maps.Clone(countTags) + timeoutCountTags["timeout_type"] = tc.event.Failure.GetTimeoutFailureInfo().GetTimeoutType().String() + latencyTags := maps.Clone(countTags) + latencyTags["outcome"] = "timedout" + latency := operation.ClosedTime.AsTime().Sub(operation.ScheduledTime.AsTime()) + require.Equal(t, metricstest.CaptureSnapshot{ + NexusOperationTimeoutCount.Name(): { + {Value: int64(1), Tags: timeoutCountTags}, + }, + NexusOperationScheduleToCloseLatency.Name(): { + {Value: latency, Tags: latencyTags}, + }, + NexusOperationScheduleToStartLatency.Name(): { + {Value: latency, Tags: countTags}, + }, + }, capture.Snapshot()) + }) + } +} + +func TestTransitionTerminated(t *testing.T) { + event := EventTerminated{TerminateComponentRequest: chasm.TerminateComponentRequest{ + RequestID: "terminate-request-id", + Reason: "test reason", + Identity: "test-identity", + }} + + for _, tc := range []struct { + name string + fromStatus nexusoperationpb.OperationStatus + prepare func(*Operation) + assert func(*testing.T, *chasm.MockMutableContext, *Operation) + }{ + { + name: "without prior last attempt failure", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + require.Nil(t, operation.LastAttemptFailure) + }, + }, + { + name: "preserves prior last attempt failure", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + prepare: func(operation *Operation) { + operation.LastAttemptFailure = &failurepb.Failure{Message: "prior attempt failure"} + }, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + protorequire.ProtoEqual(t, &failurepb.Failure{Message: "prior attempt failure"}, operation.LastAttemptFailure) + }, + }, + { + name: "uses default time", + fromStatus: nexusoperationpb.OPERATION_STATUS_STARTED, + assert: func(t *testing.T, ctx *chasm.MockMutableContext, operation *Operation) { + require.Equal(t, defaultTime, operation.ClosedTime.AsTime()) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest( + &persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0, + ) + }, + HandleMetricsHandler: func() metrics.Handler { return metricsHandler }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{ + IncludeServiceTag: true, + IncludeOperationTag: true, + }), + }), + }, + } + operation := newTestOperation() + operation.Status = tc.fromStatus + if tc.prepare != nil { + tc.prepare(operation) + } + + err := TransitionTerminated.Apply(operation, ctx, event) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_TERMINATED, operation.Status) + protorequire.ProtoEqual(t, &nexusoperationpb.NexusOperationTerminateState{ + RequestId: "terminate-request-id", + }, operation.TerminateState) + tc.assert(t, ctx, operation) + + protorequire.ProtoEqual(t, &nexusoperationpb.OperationOutcome{ + Variant: &nexusoperationpb.OperationOutcome_Failed_{ + Failed: &nexusoperationpb.OperationOutcome_Failed{ + Failure: &failurepb.Failure{ + Message: "test reason", + FailureInfo: &failurepb.Failure_TerminatedFailureInfo{ + TerminatedFailureInfo: &failurepb.TerminatedFailureInfo{ + Identity: "test-identity", + }, + }, + }, + }, + }, + }, operation.Outcome.Get(ctx)) + require.Empty(t, ctx.Tasks) + + countTags := map[string]string{ + "namespace": "ns-name", + "nexus_endpoint": "test-endpoint", + "nexus_service": "test-service", + "nexus_operation": "test-operation", + "workflowType": standaloneOperationWorkflowTypeName, + } + latencyTags := maps.Clone(countTags) + latencyTags["outcome"] = "terminated" + latency := operation.ClosedTime.AsTime().Sub(operation.ScheduledTime.AsTime()) + require.Equal(t, metricstest.CaptureSnapshot{ + NexusOperationTerminateCount.Name(): { + {Value: int64(1), Tags: countTags}, + }, + NexusOperationScheduleToCloseLatency.Name(): { + {Value: latency, Tags: latencyTags}, + }, + NexusOperationScheduleToStartLatency.Name(): { + {Value: latency, Tags: countTags}, + }, + }, capture.Snapshot()) + }) + } +} diff --git a/chasm/lib/nexusoperation/operation_tasks.go b/chasm/lib/nexusoperation/operation_tasks.go new file mode 100644 index 00000000000..681ea278297 --- /dev/null +++ b/chasm/lib/nexusoperation/operation_tasks.go @@ -0,0 +1,491 @@ +package nexusoperation + +import ( + "context" + "errors" + "fmt" + "maps" + "sync/atomic" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/resource" + queueserrors "go.temporal.io/server/service/history/queues/errors" + "go.uber.org/fx" +) + +// operationTaskHandlerOptions is the fx parameter object for common options supplied to all operation task handlers. +type operationTaskHandlerOptions struct { + fx.In + + Config *Config + + MetricsHandler metrics.Handler + Logger log.Logger +} + +// operationInvocationTaskHandlerOptions is the fx parameter object for the invocation task executor. +type operationInvocationTaskHandlerOptions struct { + fx.In + + Config *Config + NamespaceRegistry namespace.Registry + MetricsHandler metrics.Handler + Logger log.Logger + CallbackTokenGenerator *commonnexus.CallbackTokenGenerator + ClientProvider ClientProvider + EndpointRegistry commonnexus.EndpointRegistry + HTTPTraceProvider commonnexus.HTTPClientTraceProvider + HistoryClient resource.HistoryClient + ChasmRegistry *chasm.Registry +} + +type operationInvocationTaskHandler struct { + chasm.SideEffectTaskHandlerBase[*nexusoperationpb.InvocationTask] + + config *Config + namespaceRegistry namespace.Registry + metricsHandler metrics.Handler + logger log.Logger + callbackTokenGenerator *commonnexus.CallbackTokenGenerator + clientProvider ClientProvider + endpointRegistry commonnexus.EndpointRegistry + httpTraceProvider commonnexus.HTTPClientTraceProvider + historyClient resource.HistoryClient + chasmRegistry *chasm.Registry +} + +func newOperationInvocationTaskHandler(opts operationInvocationTaskHandlerOptions) *operationInvocationTaskHandler { + return &operationInvocationTaskHandler{ + config: opts.Config, + namespaceRegistry: opts.NamespaceRegistry, + metricsHandler: opts.MetricsHandler, + logger: opts.Logger, + callbackTokenGenerator: opts.CallbackTokenGenerator, + clientProvider: opts.ClientProvider, + endpointRegistry: opts.EndpointRegistry, + httpTraceProvider: opts.HTTPTraceProvider, + historyClient: opts.HistoryClient, + chasmRegistry: opts.ChasmRegistry, + } +} + +func (h *operationInvocationTaskHandler) Validate( + _ chasm.Context, + op *Operation, + _ chasm.TaskAttributes, + task *nexusoperationpb.InvocationTask, +) (bool, error) { + isValid := op.Status == nexusoperationpb.OPERATION_STATUS_SCHEDULED && op.GetAttempt() == task.GetAttempt() + return isValid, nil +} + +func (h *operationInvocationTaskHandler) Execute( + ctx context.Context, + opRef chasm.ComponentRef, + attrs chasm.TaskAttributes, + task *nexusoperationpb.InvocationTask, +) error { + ns, err := h.namespaceRegistry.GetNamespaceByID(namespace.ID(opRef.NamespaceID)) + if err != nil { + return serviceerror.NewNotFoundf("failed to get namespace by ID: %v", err) + } + + args, err := chasm.ReadComponent(ctx, opRef, (*Operation).loadStartArgs, nil) + if err != nil { + return err + } + + endpoint, err := h.resolveEndpoint(ctx, ns, args) + if err != nil { + if _, ok := errors.AsType[*serviceerror.NotFound](err); ok { + h.logger.Error("endpoint not found while processing invocation task", tag.Error(err)) + handlerErr := nexus.NewHandlerErrorf(nexus.HandlerErrorTypeNotFound, "endpoint not registered") + result, err := newInvocationResult(nil, handlerErr) + if err != nil { + return fmt.Errorf("failed to construct invocation result: %w", err) + } + _, _, err = chasm.UpdateComponent(ctx, opRef, (*Operation).saveInvocationResult, saveInvocationResultInput{ + result: result, + retryPolicy: h.config.RetryPolicy(), + }) + return err + } + return err + } + + callbackURL, err := buildCallbackURL(h.config.UseSystemCallbackURL(), h.config.CallbackURLTemplate(), ns, endpoint) + if err != nil { + return fmt.Errorf("failed to build callback URL: %w", err) + } + + token, err := h.generateCallbackToken(args.serializedRef, args.requestID) + if err != nil { + return err + } + + elapsed := args.currentTime.Sub(args.scheduledTime) + callTimeout := h.config.RequestTimeout(ns.Name().String(), attrs.Destination) + var timeoutType enumspb.TimeoutType + // Adjust timeout based on remaining operation timeouts. + // ScheduleToStart takes precedence over ScheduleToClose since it is already capped by it. + if args.scheduleToStartTimeout > 0 { + callTimeout = min(callTimeout, args.scheduleToStartTimeout-elapsed) + timeoutType = enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START + } else if args.scheduleToCloseTimeout > 0 { + callTimeout = min(callTimeout, args.scheduleToCloseTimeout-elapsed) + timeoutType = enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE + } + + // Inform the handler of the operation timeout via header. + // StartToClose takes precedence over ScheduleToClose since it is already capped by it. + opTimeout := maxDuration + if args.startToCloseTimeout > 0 { + opTimeout = args.startToCloseTimeout + } + if args.scheduleToCloseTimeout > 0 { + opTimeout = min(args.scheduleToCloseTimeout-elapsed, opTimeout) + } + header := buildRequestHeader(args.header) + // Set the operation timeout header if not already set. + if opTimeoutHeader := header.Get(nexus.HeaderOperationTimeout); opTimeout != maxDuration && opTimeoutHeader == "" { + header.Set(nexus.HeaderOperationTimeout, commonnexus.FormatDuration(opTimeout)) + } + // If this request is handled by a newer server that supports Nexus failure serialization, trigger that behavior. + if h.config.UseNewFailureWireFormat(ns.Name().String()) { + header.Set(nexusrpc.HeaderTemporalNexusFailureSupport, "true") + } + + callCtx, cancel := context.WithTimeout(ctx, callTimeout) + defer cancel() + // Set this value on the parent context so that our custom HTTP caller can mutate it since we cannot + // access response headers directly. + callCtx = context.WithValue(callCtx, commonnexus.FailureSourceContextKey, &atomic.Value{}) + + options := nexus.StartOperationOptions{ + Header: header, + CallbackURL: callbackURL, + RequestID: args.requestID, + CallbackHeader: nexus.Header{ + commonnexus.CallbackTokenHeader: token, + }, + Links: args.nexusLinks, + } + + invocation, err := h.newInvocation(callCtx, ns, endpoint, opRef, args, task, callTimeout, timeoutType) + if err != nil { + return fmt.Errorf("failed to construct invocation: %w", err) + } + startTime := time.Now() // nolint:forbidigo // Time can be used for timing metrics. + response, callErr := invocation.Start(callCtx, args, options) + callDuration := time.Since(startTime) + if validationErr := h.validateStartResult(ns, response); validationErr != nil { + callErr = validationErr + } + failureSource := failureSourceFromContext(callCtx) + + h.recordStartCallOutcome(callCtx, ns, endpoint, args, response, callErr, callDuration, failureSource) + + result, err := newInvocationResult(response, callErr) + if err != nil { + return fmt.Errorf("failed to construct invocation result: %w", err) + } + _, _, saveErr := chasm.UpdateComponent(ctx, opRef, (*Operation).saveInvocationResult, saveInvocationResultInput{ + result: result, + retryPolicy: h.config.RetryPolicy(), + }) + + if callErr != nil && isDestinationDown(callErr) { + saveErr = queueserrors.NewDestinationDownError(callErr.Error(), saveErr) + } + + return saveErr +} + +func buildRequestHeader(header map[string]string) nexus.Header { + if header == nil { + return make(nexus.Header, 2) // To set the failure support and timeout headers. + } + return nexus.Header(maps.Clone(header)) +} + +func (h *operationInvocationTaskHandler) resolveEndpoint( + ctx context.Context, + ns *namespace.Namespace, + args startArgs, +) (*persistencespb.NexusEndpointEntry, error) { + // Skip endpoint lookup for system-internal operations. + if args.endpointName == commonnexus.SystemEndpoint { + return nil, nil + } + // This happens when we accept the ScheduleNexusOperation command when the endpoint is not found in the + // registry as indicated by the EndpointNotFoundAlwaysNonRetryable dynamic config. + // The config has been removed but we keep this check for backward compatibility. + if args.endpointID == "" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeNotFound, "endpoint not registered") + } + return lookupEndpoint(ctx, h.endpointRegistry, ns.ID(), args.endpointID, args.endpointName) +} + +func (h *operationInvocationTaskHandler) newInvocation( + ctx context.Context, + ns *namespace.Namespace, + endpoint *persistencespb.NexusEndpointEntry, + opRef chasm.ComponentRef, + args startArgs, + task *nexusoperationpb.InvocationTask, + callTimeout time.Duration, + timeoutType enumspb.TimeoutType, +) (invocation, error) { + base := nexusTaskHandlerBase{ + config: h.config, + namespaceRegistry: h.namespaceRegistry, + metricsHandler: h.metricsHandler, + logger: h.logger, + clientProvider: h.clientProvider, + endpointRegistry: h.endpointRegistry, + httpTraceProvider: h.httpTraceProvider, + historyClient: h.historyClient, + chasmRegistry: h.chasmRegistry, + } + return base.newInvocation( + ctx, + ns, + endpoint, + args.endpointName, + args.service, + callTimeout, + timeoutType, + invocationTraceContext{ + operationTag: "StartOperation", + namespaceName: ns.Name().String(), + requestID: args.requestID, + operation: args.operation, + endpointName: args.endpointName, + workflowID: opRef.BusinessID, + runID: opRef.RunID, + attemptStart: args.currentTime.UTC(), + attempt: task.GetAttempt(), + }, + ) +} + +func (h *operationInvocationTaskHandler) validateStartResult( + ns *namespace.Namespace, + result *nexusrpc.ClientStartOperationResponse[*commonpb.Payload], +) error { + if result == nil { + return nil + } + tokenLimit := h.config.MaxOperationTokenLength(ns.Name().String()) + if result.Pending != nil && len(result.Pending.Token) > tokenLimit { + return fmt.Errorf("%w: length exceeds allowed limit (%d/%d)", ErrInvalidOperationToken, len(result.Pending.Token), tokenLimit) + } + if result.Successful != nil && result.Successful.Size() > h.config.PayloadSizeLimit(ns.Name().String()) { + return ErrResponseBodyTooLarge + } + return nil +} + +func (h *operationInvocationTaskHandler) recordStartCallOutcome( + callCtx context.Context, + ns *namespace.Namespace, + endpoint *persistencespb.NexusEndpointEntry, + args startArgs, + response *nexusrpc.ClientStartOperationResponse[*commonpb.Payload], + callErr error, + callDuration time.Duration, + failureSource string, +) { + methodTag := metrics.NexusMethodTag("StartOperation") + namespaceTag := metrics.NamespaceTag(ns.Name().String()) + var destTag metrics.Tag + if endpoint != nil { + destTag = metrics.DestinationTag(endpoint.Endpoint.Spec.GetName()) + } else { + destTag = metrics.DestinationTag(args.endpointName) + } + outcomeTag := metrics.OutcomeTag(startCallOutcomeTag(callCtx, response, callErr)) + failureSourceTag := metrics.FailureSourceTag(failureSource) + OutboundRequestCounter.With(h.metricsHandler).Record(1, namespaceTag, destTag, methodTag, outcomeTag, failureSourceTag) + OutboundRequestLatency.With(h.metricsHandler).Record(callDuration, namespaceTag, destTag, methodTag, outcomeTag, failureSourceTag) + + if callErr != nil { + _, isTimeoutBelowMin := errors.AsType[*operationTimeoutBelowMinError](callErr) + if failureSource == commonnexus.FailureSourceWorker || isTimeoutBelowMin { + h.logger.Debug("Nexus StartOperation request failed", tag.Error(callErr)) + } else { + h.logger.Error("Nexus StartOperation request failed", tag.Error(callErr)) + } + } +} + +type operationBackoffTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config + + metricsHandler metrics.Handler + logger log.Logger +} + +func newOperationBackoffTaskHandler(opts operationTaskHandlerOptions) *operationBackoffTaskHandler { + return &operationBackoffTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + logger: opts.Logger, + } +} + +func (h *operationBackoffTaskHandler) Validate( + ctx chasm.Context, + op *Operation, + attrs chasm.TaskAttributes, + task *nexusoperationpb.InvocationBackoffTask, +) (bool, error) { + return op.Status == nexusoperationpb.OPERATION_STATUS_BACKING_OFF && op.GetAttempt() == task.GetAttempt(), nil +} + +func (h *operationBackoffTaskHandler) Execute( + ctx chasm.MutableContext, + op *Operation, + attrs chasm.TaskAttributes, + task *nexusoperationpb.InvocationBackoffTask, +) error { + return transitionRescheduled.Apply(op, ctx, EventRescheduled{}) +} + +type operationScheduleToStartTimeoutTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config + + metricsHandler metrics.Handler + logger log.Logger +} + +func newOperationScheduleToStartTimeoutTaskHandler(opts operationTaskHandlerOptions) *operationScheduleToStartTimeoutTaskHandler { + return &operationScheduleToStartTimeoutTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + logger: opts.Logger, + } +} + +func (h *operationScheduleToStartTimeoutTaskHandler) Validate( + ctx chasm.Context, + op *Operation, + attrs chasm.TaskAttributes, + task *nexusoperationpb.ScheduleToStartTimeoutTask, +) (bool, error) { + return TransitionStarted.Possible(op), nil +} + +func (h *operationScheduleToStartTimeoutTaskHandler) Execute( + ctx chasm.MutableContext, + op *Operation, + attrs chasm.TaskAttributes, + task *nexusoperationpb.ScheduleToStartTimeoutTask, +) error { + return op.onTimedOut(ctx, &failurepb.Failure{ + Message: "operation timed out", + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_START, + }, + }, + }, false) +} + +type operationStartToCloseTimeoutTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config + + metricsHandler metrics.Handler + logger log.Logger +} + +func newOperationStartToCloseTimeoutTaskHandler(opts operationTaskHandlerOptions) *operationStartToCloseTimeoutTaskHandler { + return &operationStartToCloseTimeoutTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + logger: opts.Logger, + } +} + +func (h *operationStartToCloseTimeoutTaskHandler) Validate( + ctx chasm.Context, + op *Operation, + attrs chasm.TaskAttributes, + task *nexusoperationpb.StartToCloseTimeoutTask, +) (bool, error) { + return op.Status == nexusoperationpb.OPERATION_STATUS_STARTED, nil +} + +func (h *operationStartToCloseTimeoutTaskHandler) Execute( + ctx chasm.MutableContext, + op *Operation, + attrs chasm.TaskAttributes, + task *nexusoperationpb.StartToCloseTimeoutTask, +) error { + return op.onTimedOut(ctx, &failurepb.Failure{ + Message: "operation timed out", + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_START_TO_CLOSE, + }, + }, + }, false) +} + +type operationScheduleToCloseTimeoutTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config + + metricsHandler metrics.Handler + logger log.Logger +} + +func newOperationScheduleToCloseTimeoutTaskHandler(opts operationTaskHandlerOptions) *operationScheduleToCloseTimeoutTaskHandler { + return &operationScheduleToCloseTimeoutTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + logger: opts.Logger, + } +} + +func (h *operationScheduleToCloseTimeoutTaskHandler) Validate( + ctx chasm.Context, + op *Operation, + attrs chasm.TaskAttributes, + task *nexusoperationpb.ScheduleToCloseTimeoutTask, +) (bool, error) { + return TransitionTimedOut.Possible(op), nil +} + +func (h *operationScheduleToCloseTimeoutTaskHandler) Execute( + ctx chasm.MutableContext, + op *Operation, + attrs chasm.TaskAttributes, + task *nexusoperationpb.ScheduleToCloseTimeoutTask, +) error { + return op.onTimedOut(ctx, &failurepb.Failure{ + Message: "operation timed out", + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: enumspb.TIMEOUT_TYPE_SCHEDULE_TO_CLOSE, + }, + }, + }, false) +} diff --git a/chasm/lib/nexusoperation/operation_tasks_test.go b/chasm/lib/nexusoperation/operation_tasks_test.go new file mode 100644 index 00000000000..70266a1c2e3 --- /dev/null +++ b/chasm/lib/nexusoperation/operation_tasks_test.go @@ -0,0 +1,1231 @@ +package nexusoperation + +import ( + "cmp" + "context" + "encoding/json" + "fmt" + "slices" + "testing" + "text/template" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + nexuspb "go.temporal.io/api/nexus/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/metrics/metricstest" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/nexus/nexustest" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/testing/protorequire" + queueserrors "go.temporal.io/server/service/history/queues/errors" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// invocationTaskTestEnv holds the test infrastructure for invocation task handler tests. +type invocationTaskTestEnv struct { + t *testing.T + ctrl *gomock.Controller + handler *operationInvocationTaskHandler + op *Operation + mockEngine *chasm.MockEngine + timeSource *clock.EventTimeSource + nsRegistry *namespace.MockRegistry +} + +func newInvocationTaskTestEnv( + t *testing.T, + op *Operation, + invocationData InvocationData, + endpointReg nexustest.FakeEndpointRegistry, + clientProvider ClientProvider, + metricsHandler metrics.Handler, + requestTimeout time.Duration, +) *invocationTaskTestEnv { + t.Helper() + + ctrl := gomock.NewController(t) + timeSource := clock.NewEventTimeSource() + timeSource.Update(time.Now()) + + nsRegistry := namespace.NewMockRegistry(ctrl) + nsRegistry.EXPECT().GetNamespaceByID(namespace.ID("ns-id")).Return( + namespace.NewNamespaceForTest(&persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0), nil) + nsRegistry.EXPECT().GetNamespaceName(namespace.ID("ns-id")).Return(namespace.Name("ns-name"), nil).AnyTimes() + + callbackTmpl, err := template.New("callback").Parse("http://localhost/callback") + require.NoError(t, err) + + handler := &operationInvocationTaskHandler{ + config: &Config{ + RequestTimeout: dynamicconfig.GetDurationPropertyFnFilteredByDestination(requestTimeout), + MaxOperationTokenLength: dynamicconfig.GetIntPropertyFnFilteredByNamespace(10), + MinRequestTimeout: dynamicconfig.GetDurationPropertyFnFilteredByNamespace(time.Millisecond), + PayloadSizeLimit: dynamicconfig.GetIntPropertyFnFilteredByNamespace(2 * 1024 * 1024), + CallbackURLTemplate: dynamicconfig.GetTypedPropertyFn(callbackTmpl), + UseSystemCallbackURL: dynamicconfig.GetBoolPropertyFn(false), + UseNewFailureWireFormat: dynamicconfig.GetBoolPropertyFnFilteredByNamespace(true), + RetryPolicy: dynamicconfig.GetTypedPropertyFn[backoff.RetryPolicy]( + backoff.NewExponentialRetryPolicy(time.Second), + ), + }, + namespaceRegistry: nsRegistry, + metricsHandler: metricsHandler, + logger: log.NewNoopLogger(), + clientProvider: clientProvider, + endpointRegistry: endpointReg, + callbackTokenGenerator: commonnexus.NewCallbackTokenGenerator(), + } + + // Set up CHASM tree with mock store as parent of the operation. + logger := log.NewNoopLogger() + registry := chasm.NewRegistry(logger) + require.NoError(t, registry.Register(&chasm.CoreLibrary{})) + require.NoError(t, registry.Register(&mockStoreLibrary{})) + require.NoError(t, registry.Register(&Library{})) + + nodeBackend := &chasm.MockNodeBackend{ + HandleNextTransitionCount: func() int64 { return 2 }, + HandleGetCurrentVersion: func() int64 { return 1 }, + HandleCurrentVersionedTransition: func() *persistencespb.VersionedTransition { + return &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + } + }, + } + + root := chasm.NewEmptyTree(registry, timeSource, nodeBackend, chasm.DefaultPathEncoder, logger, metrics.NoopMetricsHandler) + ctx := chasm.NewMutableContext(context.Background(), root) + require.NoError(t, root.SetRootComponent(&mockStoreComponent{ + invocationData: invocationData, + Op: chasm.NewComponentField(ctx, op), + })) + _, err = root.CloseTransaction() + require.NoError(t, err) + + mockEngine := chasm.NewMockEngine(ctrl) + + return &invocationTaskTestEnv{ + t: t, + ctrl: ctrl, + handler: handler, + op: op, + mockEngine: mockEngine, + timeSource: timeSource, + nsRegistry: nsRegistry, + } +} + +func (e *invocationTaskTestEnv) setupReadComponent() { + e.mockEngine.EXPECT().ReadComponent( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).DoAndReturn(func(_ context.Context, _ chasm.ComponentRef, readFn func(chasm.Context, chasm.Component) error, _ ...chasm.TransitionOption) error { + executionKey := chasm.ExecutionKey{ + NamespaceID: "ns-id", + BusinessID: "wf-id", + RunID: "run-id", + } + mockCtx := &chasm.MockContext{ + HandleExecutionKey: func() chasm.ExecutionKey { + return executionKey + }, + HandleNow: func(_ chasm.Component) time.Time { + return e.timeSource.Now() + }, + HandleRef: func(_ chasm.Component) ([]byte, error) { + return []byte{}, nil + }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest(&persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0) + }, + } + return readFn(mockCtx, e.op) + }) +} + +func (e *invocationTaskTestEnv) setupUpdateComponent() { + e.mockEngine.EXPECT().UpdateComponent( + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).DoAndReturn(func(_ context.Context, _ chasm.ComponentRef, updateFn func(chasm.MutableContext, chasm.Component) error, _ ...chasm.TransitionOption) ([]byte, error) { + mockCtx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(_ chasm.Component) time.Time { + return e.timeSource.Now() + }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{ + NamespaceID: "ns-id", + BusinessID: "wf-id", + RunID: "run-id", + } + }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest(&persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0) + }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{}), + }), + }, + } + err := updateFn(mockCtx, e.op) + return nil, err + }) +} + +func (e *invocationTaskTestEnv) execute(task *nexusoperationpb.InvocationTask) error { + ref := chasm.NewComponentRef[*Operation](chasm.ExecutionKey{ + NamespaceID: "ns-id", + BusinessID: "wf-id", + RunID: "run-id", + }) + engineCtx := chasm.NewEngineContext(context.Background(), e.mockEngine) + return e.handler.Execute(engineCtx, ref, chasm.TaskAttributes{Destination: "endpoint"}, task) +} + +func TestInvocationTaskHandler_HTTP(t *testing.T) { + handlerLink := &commonpb.Link_WorkflowEvent{ + Namespace: "handler-ns", + WorkflowId: "handler-wf-id", + RunId: "handler-run-id", + Reference: &commonpb.Link_WorkflowEvent_EventRef{ + EventRef: &commonpb.Link_WorkflowEvent_EventReference{ + EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, + }, + }, + } + handlerNexusLink := commonnexus.ConvertLinkWorkflowEventToNexusLink(handlerLink) + + cases := []struct { + name string + header nexus.Header + onStartOperation func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) + expectedMetricOutcome string + checkOutcome func(t *testing.T, op *Operation) + requestTimeout time.Duration + schedToCloseTimeout time.Duration + startToCloseTimeout time.Duration + schedToStartTimeout time.Duration + destinationDown bool + endpointNotFound bool + }{ + { + name: "async start", + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + if len(options.Links) != 2 { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "expected 2 links, got %d", len(options.Links)) + } + workflowEventLinkIdx := slices.IndexFunc(options.Links, func(link nexus.Link) bool { + return link.Type == string((&commonpb.Link_WorkflowEvent{}).ProtoReflect().Descriptor().FullName()) + }) + if workflowEventLinkIdx == -1 { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "missing workflow event link") + } + link, err := commonnexus.ConvertNexusLinkToLinkWorkflowEvent(options.Links[workflowEventLinkIdx]) + if err != nil { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "failed to convert link: %v", err) + } + expectedLink := &commonpb.Link_WorkflowEvent{ + Namespace: "ns-name", + WorkflowId: "wf-id", + RunId: "run-id", + Reference: &commonpb.Link_WorkflowEvent_EventRef{ + EventRef: &commonpb.Link_WorkflowEvent_EventReference{ + EventId: 1, + EventType: enumspb.EVENT_TYPE_NEXUS_OPERATION_SCHEDULED, + }, + }, + } + if !proto.Equal(expectedLink, link) { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "link mismatch: got %v, want %v", link, expectedLink) + } + protoLinks := commonnexus.ConvertLinksToProto(options.Links) + if protoLinks[1].GetType() != "temporal.api.common.v1.Link.NexusOperation" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "unexpected nexus operation link type: %v", protoLinks[1].GetType()) + } + if protoLinks[1].GetUrl() != "temporal:///namespaces/ns-name/nexus-operations/wf-id?runID=run-id" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "unexpected nexus operation link URL: %v", protoLinks[1].GetUrl()) + } + nexus.AddHandlerLinks(ctx, handlerNexusLink) + return &nexus.HandlerStartOperationResultAsync{ + OperationToken: "op-token", + }, nil + }, + expectedMetricOutcome: "pending", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_STARTED, op.Status) + require.Equal(t, "op-token", op.OperationToken) + }, + }, + { + name: "sync start", + schedToCloseTimeout: time.Hour, + header: nexus.Header{nexus.HeaderOperationTimeout: commonnexus.FormatDuration(time.Millisecond)}, + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + if service != "service" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid service name") + } + if operation != "operation" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid operation name") + } + if options.CallbackHeader.Get("temporal-callback-token") == "" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "empty callback token") + } + if options.CallbackURL != "http://localhost/callback" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid callback URL") + } + if options.Header.Get(nexus.HeaderOperationTimeout) != "1ms" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid operation timeout header: %s", options.Header.Get(nexus.HeaderOperationTimeout)) + } + var v string + if err := input.Consume(&v); err != nil || v != "input" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid input") + } + return &nexus.HandlerStartOperationResultSync[any]{Value: "result"}, nil + }, + expectedMetricOutcome: "successful", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SUCCEEDED, op.Status) + }, + }, + { + name: "sync failed", + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + return nil, &nexus.OperationError{ + State: nexus.OperationStateFailed, + Message: "operation failed from handler", + Cause: &nexus.FailureError{ + Failure: nexus.Failure{Message: "cause", Metadata: map[string]string{"encoding": "json/plain"}, Details: json.RawMessage("\"details\"")}, + }, + } + }, + expectedMetricOutcome: "operation-unsuccessful:failed", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_FAILED, op.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "operation failed from handler", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + Type: "OperationError", + NonRetryable: true, + }, + }, + Cause: &failurepb.Failure{ + Message: "cause", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + Type: "NexusFailure", + Details: &commonpb.Payloads{Payloads: []*commonpb.Payload{{ + Metadata: map[string][]byte{"encoding": []byte("json/plain")}, + Data: []byte(`{"metadata":{"encoding":"json/plain"},"details":"details"}`), + }}}, + }, + }, + }, + }, op.LastAttemptFailure) + }, + }, + { + name: "sync canceled", + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + return nil, &nexus.OperationError{ + State: nexus.OperationStateCanceled, + Message: "operation canceled from handler", + Cause: &nexus.FailureError{ + Failure: nexus.Failure{Message: "cause", Metadata: map[string]string{"encoding": "json/plain"}, Details: json.RawMessage("\"details\"")}, + }, + } + }, + expectedMetricOutcome: "operation-unsuccessful:canceled", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_CANCELED, op.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "operation canceled from handler", + FailureInfo: &failurepb.Failure_CanceledFailureInfo{ + CanceledFailureInfo: &failurepb.CanceledFailureInfo{}, + }, + Cause: &failurepb.Failure{ + Message: "cause", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{ + Type: "NexusFailure", + Details: &commonpb.Payloads{Payloads: []*commonpb.Payload{{ + Metadata: map[string][]byte{"encoding": []byte("json/plain")}, + Data: []byte(`{"metadata":{"encoding":"json/plain"},"details":"details"}`), + }}}, + }, + }, + }, + }, op.LastAttemptFailure) + }, + }, + { + name: "transient error", + destinationDown: true, + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeInternal, "internal server error") + }, + expectedMetricOutcome: "handler-error:INTERNAL", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_BACKING_OFF, op.Status) + require.Equal(t, string(nexus.HandlerErrorTypeInternal), op.LastAttemptFailure.GetNexusHandlerFailureInfo().GetType()) + require.Equal(t, "internal server error", op.LastAttemptFailure.Message) + }, + }, + { + name: "invocation timeout by request timeout", + requestTimeout: 2 * time.Millisecond, + schedToCloseTimeout: time.Hour, + destinationDown: true, + expectedMetricOutcome: "request-timeout", + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + time.Sleep(time.Millisecond * 100) //nolint:forbidigo + return &nexus.HandlerStartOperationResultAsync{OperationToken: "op-token"}, nil + }, + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_BACKING_OFF, op.Status) + require.NotNil(t, op.LastAttemptFailure.GetServerFailureInfo()) + require.Equal(t, "request timed out", op.LastAttemptFailure.Message) + }, + }, + { + name: "invocation timeout by ScheduleToCloseTimeout", + schedToCloseTimeout: 10 * time.Millisecond, + destinationDown: true, + expectedMetricOutcome: "request-timeout", + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + opTimeout, err := time.ParseDuration(options.Header.Get(nexus.HeaderOperationTimeout)) + if err != nil || opTimeout > 10*time.Millisecond { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid operation timeout header: %s", options.Header.Get(nexus.HeaderOperationTimeout)) + } + time.Sleep(time.Millisecond * 100) //nolint:forbidigo + return &nexus.HandlerStartOperationResultAsync{OperationToken: "op-token"}, nil + }, + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_BACKING_OFF, op.Status) + require.NotNil(t, op.LastAttemptFailure.GetServerFailureInfo()) + require.Equal(t, "request timed out", op.LastAttemptFailure.Message) + }, + }, + { + name: "invocation timeout by ScheduleToStartTimeout", + schedToStartTimeout: 10 * time.Millisecond, + destinationDown: true, + expectedMetricOutcome: "request-timeout", + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + if options.Header.Get(nexus.HeaderOperationTimeout) != "" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "operation timeout header should not be set, got: %s", options.Header.Get(nexus.HeaderOperationTimeout)) + } + time.Sleep(time.Millisecond * 100) //nolint:forbidigo + return &nexus.HandlerStartOperationResultAsync{OperationToken: "op-token"}, nil + }, + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_BACKING_OFF, op.Status) + require.NotNil(t, op.LastAttemptFailure.GetServerFailureInfo()) + require.Equal(t, "request timed out", op.LastAttemptFailure.Message) + }, + }, + { + name: "operation timeout header set by StartToCloseTimeout", + startToCloseTimeout: 1 * time.Minute, + expectedMetricOutcome: "pending", + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + if options.Header.Get(nexus.HeaderOperationTimeout) != "60000ms" { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "invalid operation timeout header: %s", options.Header.Get(nexus.HeaderOperationTimeout)) + } + return &nexus.HandlerStartOperationResultAsync{OperationToken: "op-token"}, nil + }, + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_STARTED, op.Status) + }, + }, + { + name: "ScheduleToCloseTimeout less than MinRequestTimeout", + schedToCloseTimeout: time.Microsecond, + expectedMetricOutcome: "operation-timeout", + onStartOperation: nil, // Should not be called. + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_TIMED_OUT, op.Status) + }, + }, + { + name: "endpoint not found", + endpointNotFound: true, + onStartOperation: nil, // Should not be called. + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_FAILED, op.Status) + }, + }, + { + name: "token too long", + onStartOperation: func(ctx context.Context, service, operation string, input *nexus.LazyValue, options nexus.StartOperationOptions) (nexus.HandlerStartOperationResult[any], error) { + return &nexus.HandlerStartOperationResultAsync{OperationToken: "12345678901"}, nil + }, + expectedMetricOutcome: "invalid-operation-token", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_FAILED, op.Status) + require.NotNil(t, op.LastAttemptFailure.GetServerFailureInfo()) + require.True(t, op.LastAttemptFailure.GetServerFailureInfo().GetNonRetryable()) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + listenAddr := nexustest.AllocListenAddress() + h := nexustest.Handler{} + if tc.onStartOperation != nil { + h.OnStartOperation = tc.onStartOperation + } + nexustest.NewNexusServer(t, listenAddr, h) + + op := &Operation{ + OperationState: &nexusoperationpb.OperationState{ + Status: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + EndpointId: "endpoint-id", + Endpoint: "endpoint", + Service: "service", + Operation: "operation", + ScheduledTime: timestamppb.Now(), + ScheduleToCloseTimeout: durationpb.New(tc.schedToCloseTimeout), + ScheduleToStartTimeout: durationpb.New(tc.schedToStartTimeout), + StartToCloseTimeout: durationpb.New(tc.startToCloseTimeout), + RequestId: "request-id", + Attempt: 1, + }, + } + endpointReg := nexustest.FakeEndpointRegistry{ + OnGetByID: func(ctx context.Context, endpointID string) (*persistencespb.NexusEndpointEntry, error) { + if tc.endpointNotFound { + return nil, serviceerror.NewNotFound("endpoint not found") + } + return endpointEntry, nil + }, + OnGetByName: func(ctx context.Context, namespaceID namespace.ID, endpointName string) (*persistencespb.NexusEndpointEntry, error) { + if tc.endpointNotFound { + return nil, serviceerror.NewNotFound("endpoint not found") + } + return endpointEntry, nil + }, + } + + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + clientProvider := func(ctx context.Context, namespaceID string, entry *persistencespb.NexusEndpointEntry, service string) (*nexusrpc.HTTPClient, error) { + return nexusrpc.NewHTTPClient(nexusrpc.HTTPClientOptions{ + BaseURL: "http://" + listenAddr, + Service: service, + Serializer: commonnexus.PayloadSerializer, + }) + } + + callerLink := commonnexus.ConvertLinkWorkflowEventToNexusLink(&commonpb.Link_WorkflowEvent{ + Namespace: "ns-name", + WorkflowId: "wf-id", + RunId: "run-id", + Reference: &commonpb.Link_WorkflowEvent_EventRef{ + EventRef: &commonpb.Link_WorkflowEvent_EventReference{ + EventId: 1, + EventType: enumspb.EVENT_TYPE_NEXUS_OPERATION_SCHEDULED, + }, + }, + }) + + env := newInvocationTaskTestEnv(t, op, + InvocationData{ + Input: mustToPayload(t, "input"), + Header: tc.header, + NexusLinks: []nexus.Link{callerLink}, + }, + endpointReg, clientProvider, metricsHandler, cmp.Or(tc.requestTimeout, time.Hour)) + + env.setupReadComponent() + env.setupUpdateComponent() + + err := env.execute(&nexusoperationpb.InvocationTask{Attempt: 1}) + if tc.destinationDown { + var destinationDownErr *queueserrors.DestinationDownError + require.ErrorAs(t, err, &destinationDownErr) + } else { + require.NoError(t, err) + } + tc.checkOutcome(t, op) + + if tc.expectedMetricOutcome != "" { + snap := capture.Snapshot() + counterRecordings := snap[OutboundRequestCounter.Name()] + require.Len(t, counterRecordings, 1) + require.Equal(t, int64(1), counterRecordings[0].Value) + require.Equal(t, "ns-name", counterRecordings[0].Tags["namespace"]) + require.Equal(t, "endpoint", counterRecordings[0].Tags["destination"]) + require.Equal(t, "StartOperation", counterRecordings[0].Tags["method"]) + require.Equal(t, tc.expectedMetricOutcome, counterRecordings[0].Tags["outcome"]) + require.Equal(t, "_unknown_", counterRecordings[0].Tags["failure_source"]) + + timerRecordings := snap[OutboundRequestLatency.Name()] + require.Len(t, timerRecordings, 1) + require.Equal(t, tc.expectedMetricOutcome, timerRecordings[0].Tags["outcome"]) + } + }) + } +} + +func TestInvocationTaskHandler_Validate(t *testing.T) { + testCases := []struct { + name string + status nexusoperationpb.OperationStatus + opAttempt int32 + taskAttempt int32 + valid bool + }{ + { + name: "valid when scheduled and attempt matches", + status: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + opAttempt: 1, + taskAttempt: 1, + valid: true, + }, + { + name: "invalid when scheduled but attempt mismatches", + status: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + opAttempt: 2, + taskAttempt: 1, + valid: false, + }, + { + name: "invalid when started", + status: nexusoperationpb.OPERATION_STATUS_STARTED, + opAttempt: 1, + taskAttempt: 1, + valid: false, + }, + } + + handler := &operationInvocationTaskHandler{} + ctx := &chasm.MockContext{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + op := newTestOperation() + op.Status = tc.status + op.Attempt = tc.opAttempt + + valid, err := handler.Validate(ctx, op, chasm.TaskAttributes{}, &nexusoperationpb.InvocationTask{Attempt: tc.taskAttempt}) + require.NoError(t, err) + require.Equal(t, tc.valid, valid) + }) + } +} + +func TestBackoffTaskHandler_Validate(t *testing.T) { + testCases := []struct { + name string + status nexusoperationpb.OperationStatus + attempt int32 + task *nexusoperationpb.InvocationBackoffTask + valid bool + }{ + { + name: "valid when backing off and attempt matches", + status: nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + attempt: 2, + task: &nexusoperationpb.InvocationBackoffTask{Attempt: 2}, + valid: true, + }, + { + name: "invalid when backing off but attempt mismatches", + status: nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + attempt: 2, + task: &nexusoperationpb.InvocationBackoffTask{Attempt: 1}, + valid: false, + }, + { + name: "invalid when scheduled", + status: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + attempt: 1, + task: &nexusoperationpb.InvocationBackoffTask{Attempt: 1}, + valid: false, + }, + } + + handler := &operationBackoffTaskHandler{} + ctx := &chasm.MockContext{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + op := newTestOperation() + op.Status = tc.status + op.Attempt = tc.attempt + + valid, err := handler.Validate(ctx, op, chasm.TaskAttributes{}, tc.task) + require.NoError(t, err) + require.Equal(t, tc.valid, valid) + }) + } +} + +func TestBackoffTaskHandler_Execute(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + }, + } + + op := newTestOperation() + op.Status = nexusoperationpb.OPERATION_STATUS_BACKING_OFF + op.Attempt = 2 + + handler := &operationBackoffTaskHandler{} + err := handler.Execute(ctx, op, chasm.TaskAttributes{}, &nexusoperationpb.InvocationBackoffTask{Attempt: 2}) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SCHEDULED, op.Status) + // Verify invocation task was emitted + require.Len(t, ctx.Tasks, 1) + _, ok := ctx.Tasks[0].Payload.(*nexusoperationpb.InvocationTask) + require.True(t, ok, "expected InvocationTask") +} + +func TestScheduleToStartTimeoutTaskHandler_Validate(t *testing.T) { + testCases := []struct { + name string + status nexusoperationpb.OperationStatus + valid bool + }{ + { + name: "valid when scheduled", + status: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + valid: true, + }, + { + name: "valid when backing off", + status: nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + valid: true, + }, + { + name: "invalid when started", + status: nexusoperationpb.OPERATION_STATUS_STARTED, + valid: false, + }, + { + name: "invalid when succeeded", + status: nexusoperationpb.OPERATION_STATUS_SUCCEEDED, + valid: false, + }, + { + name: "invalid when timed out", + status: nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + valid: false, + }, + } + + handler := &operationScheduleToStartTimeoutTaskHandler{} + ctx := &chasm.MockContext{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + op := newTestOperation() + op.Status = tc.status + + valid, err := handler.Validate(ctx, op, chasm.TaskAttributes{}, &nexusoperationpb.ScheduleToStartTimeoutTask{}) + require.NoError(t, err) + require.Equal(t, tc.valid, valid) + }) + } +} + +func TestScheduleToStartTimeoutTaskHandler_Execute(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{NamespaceID: "ns-id"} + }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest(&persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0) + }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{}), + }), + }, + } + + op := newTestOperation() + op.Status = nexusoperationpb.OPERATION_STATUS_SCHEDULED + + handler := &operationScheduleToStartTimeoutTaskHandler{} + err := handler.Execute(ctx, op, chasm.TaskAttributes{}, &nexusoperationpb.ScheduleToStartTimeoutTask{}) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_TIMED_OUT, op.Status) + require.Empty(t, ctx.Tasks) +} + +func TestStartToCloseTimeoutTaskHandler_Validate(t *testing.T) { + testCases := []struct { + name string + status nexusoperationpb.OperationStatus + valid bool + }{ + { + name: "valid when started", + status: nexusoperationpb.OPERATION_STATUS_STARTED, + valid: true, + }, + { + name: "invalid when scheduled", + status: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + valid: false, + }, + { + name: "invalid when backing off", + status: nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + valid: false, + }, + { + name: "invalid when succeeded", + status: nexusoperationpb.OPERATION_STATUS_SUCCEEDED, + valid: false, + }, + { + name: "invalid when timed out", + status: nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + valid: false, + }, + } + + handler := &operationStartToCloseTimeoutTaskHandler{} + ctx := &chasm.MockContext{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + op := newTestOperation() + op.Status = tc.status + + valid, err := handler.Validate(ctx, op, chasm.TaskAttributes{}, &nexusoperationpb.StartToCloseTimeoutTask{}) + require.NoError(t, err) + require.Equal(t, tc.valid, valid) + }) + } +} + +func TestStartToCloseTimeoutTaskHandler_Execute(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{NamespaceID: "ns-id"} + }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest(&persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0) + }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{}), + }), + }, + } + + op := newTestOperation() + op.Status = nexusoperationpb.OPERATION_STATUS_STARTED + + handler := &operationStartToCloseTimeoutTaskHandler{} + err := handler.Execute(ctx, op, chasm.TaskAttributes{}, &nexusoperationpb.StartToCloseTimeoutTask{}) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_TIMED_OUT, op.Status) + require.Empty(t, ctx.Tasks) +} + +func TestScheduleToCloseTimeoutTaskHandler_Validate(t *testing.T) { + testCases := []struct { + name string + status nexusoperationpb.OperationStatus + valid bool + }{ + { + name: "valid when scheduled", + status: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + valid: true, + }, + { + name: "valid when started", + status: nexusoperationpb.OPERATION_STATUS_STARTED, + valid: true, + }, + { + name: "invalid when succeeded", + status: nexusoperationpb.OPERATION_STATUS_SUCCEEDED, + valid: false, + }, + { + name: "invalid when timed out", + status: nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + valid: false, + }, + } + + handler := &operationScheduleToCloseTimeoutTaskHandler{} + ctx := &chasm.MockContext{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + op := newTestOperation() + op.Status = tc.status + + valid, err := handler.Validate(ctx, op, chasm.TaskAttributes{}, &nexusoperationpb.ScheduleToCloseTimeoutTask{}) + require.NoError(t, err) + require.Equal(t, tc.valid, valid) + }) + } +} + +func TestScheduleToCloseTimeoutTaskHandler_Execute(t *testing.T) { + ctx := &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{NamespaceID: "ns-id"} + }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest(&persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0) + }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{}), + }), + }, + } + + op := newTestOperation() + op.Status = nexusoperationpb.OPERATION_STATUS_SCHEDULED + + handler := &operationScheduleToCloseTimeoutTaskHandler{} + err := handler.Execute(ctx, op, chasm.TaskAttributes{}, &nexusoperationpb.ScheduleToCloseTimeoutTask{}) + require.NoError(t, err) + + require.Equal(t, nexusoperationpb.OPERATION_STATUS_TIMED_OUT, op.Status) + require.Empty(t, ctx.Tasks) +} + +// testStartProcessor implements chasm.NexusOperationProcessor[string] for system endpoint start tests. +type testStartProcessor struct{} + +func (p *testStartProcessor) ProcessInput( + _ chasm.NexusOperationProcessorContext, + _ string, +) (*chasm.NexusOperationProcessorResult, error) { + return &chasm.NexusOperationProcessorResult{ + RoutingKey: chasm.NexusOperationRoutingKeyRandom{}, + }, nil +} + +// testStartProcessorWithInput implements chasm.NexusOperationProcessor[*testProcessorInput] +// for system endpoint tests that verify input re-serialization. +type testProcessorInput struct { + Value string +} + +type testStartProcessorWithInput struct{} + +func (p *testStartProcessorWithInput) ProcessInput( + _ chasm.NexusOperationProcessorContext, + input *testProcessorInput, +) (*chasm.NexusOperationProcessorResult, error) { + input.Value = "processed:" + input.Value + return &chasm.NexusOperationProcessorResult{ + RoutingKey: chasm.NexusOperationRoutingKeyRandom{}, + }, nil +} + +func TestInvocationTaskHandler_SystemEndpoint(t *testing.T) { + handlerLink := &commonpb.Link_WorkflowEvent{ + Namespace: "handler-ns", + WorkflowId: "handler-wf-id", + RunId: "handler-run-id", + Reference: &commonpb.Link_WorkflowEvent_EventRef{ + EventRef: &commonpb.Link_WorkflowEvent_EventReference{ + EventType: enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_STARTED, + }, + }, + } + + cases := []struct { + name string + setupHistoryClient func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient + setupChasmRegistry func() *chasm.Registry + input *commonpb.Payload + expectedMetricOutcome string + checkOutcome func(t *testing.T, op *Operation) + }{ + { + name: "async start", + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().StartNexusOperation(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *historyservice.StartNexusOperationRequest, _ ...grpc.CallOption) (*historyservice.StartNexusOperationResponse, error) { + require.Len(t, request.GetRequest().GetLinks(), 2) + require.Equal(t, "temporal.api.common.v1.Link.WorkflowEvent", request.GetRequest().GetLinks()[0].GetType()) + require.Equal(t, "temporal.api.common.v1.Link.NexusOperation", request.GetRequest().GetLinks()[1].GetType()) + + return &historyservice.StartNexusOperationResponse{ + Response: &nexuspb.StartOperationResponse{ + Variant: &nexuspb.StartOperationResponse_AsyncSuccess{ + AsyncSuccess: &nexuspb.StartOperationResponse_Async{ + OperationToken: "system-op-token", + Links: commonnexus.ConvertLinksToProto([]nexus.Link{ + commonnexus.ConvertLinkWorkflowEventToNexusLink(handlerLink), + }), + }, + }, + }, + }, nil + }) + return client + }, + setupChasmRegistry: func() *chasm.Registry { + reg := chasm.NewRegistry(log.NewNoopLogger()) + serviceProc := chasm.NewNexusServiceProcessor("service") + serviceProc.MustRegisterOperation("operation", + chasm.NewRegisterableNexusOperationProcessor(&testStartProcessor{})) + reg.NexusEndpointProcessor.MustRegisterServiceProcessor(serviceProc) + return reg + }, + expectedMetricOutcome: "pending", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_STARTED, op.Status) + require.Equal(t, "system-op-token", op.OperationToken) + }, + }, + { + name: "sync start", + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().StartNexusOperation(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(_ context.Context, request *historyservice.StartNexusOperationRequest, opts ...grpc.CallOption) (*historyservice.StartNexusOperationResponse, error) { + require.Len(t, request.GetRequest().GetLinks(), 2) + require.Equal(t, "temporal.api.common.v1.Link.WorkflowEvent", request.GetRequest().GetLinks()[0].GetType()) + require.Equal(t, "temporal.api.common.v1.Link.NexusOperation", request.GetRequest().GetLinks()[1].GetType()) + + var input testProcessorInput + if err := payloads.Decode(&commonpb.Payloads{Payloads: []*commonpb.Payload{request.Request.Payload}}, &input); err != nil { + return nil, err + } + if input.Value != "processed:test" { + return nil, fmt.Errorf("unexpected input: %v", input.Value) + } + return &historyservice.StartNexusOperationResponse{ + Response: &nexuspb.StartOperationResponse{ + Variant: &nexuspb.StartOperationResponse_SyncSuccess{ + SyncSuccess: &nexuspb.StartOperationResponse_Sync{ + Payload: mustToPayload(t, "result"), + }, + }, + }, + }, nil + }) + return client + }, + setupChasmRegistry: func() *chasm.Registry { + reg := chasm.NewRegistry(log.NewNoopLogger()) + serviceProc := chasm.NewNexusServiceProcessor("service") + serviceProc.MustRegisterOperation("operation", + chasm.NewRegisterableNexusOperationProcessor(&testStartProcessorWithInput{})) + reg.NexusEndpointProcessor.MustRegisterServiceProcessor(serviceProc) + return reg + }, + input: mustToPayload(t, testProcessorInput{"test"}), + expectedMetricOutcome: "successful", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SUCCEEDED, op.Status) + }, + }, + { + name: "operation error", + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().StartNexusOperation(gomock.Any(), gomock.Any(), gomock.Any()).Return( + &historyservice.StartNexusOperationResponse{ + Response: &nexuspb.StartOperationResponse{ + Variant: &nexuspb.StartOperationResponse_Failure{ + Failure: &failurepb.Failure{ + Message: "operation failed", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{}, + }, + }, + }, + }, + }, nil, + ) + return client + }, + setupChasmRegistry: func() *chasm.Registry { + reg := chasm.NewRegistry(log.NewNoopLogger()) + serviceProc := chasm.NewNexusServiceProcessor("service") + serviceProc.MustRegisterOperation("operation", + chasm.NewRegisterableNexusOperationProcessor(&testStartProcessor{})) + reg.NexusEndpointProcessor.MustRegisterServiceProcessor(serviceProc) + return reg + }, + expectedMetricOutcome: "operation-unsuccessful:failed", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_FAILED, op.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "operation failed", + FailureInfo: &failurepb.Failure_ApplicationFailureInfo{ + ApplicationFailureInfo: &failurepb.ApplicationFailureInfo{}, + }, + }, op.LastAttemptFailure) + }, + }, + { + name: "history service error - retryable", + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + client := historyservicemock.NewMockHistoryServiceClient(ctrl) + client.EXPECT().StartNexusOperation(gomock.Any(), gomock.Any(), gomock.Any()).Return( + nil, serviceerror.NewUnavailable("service unavailable"), + ) + return client + }, + setupChasmRegistry: func() *chasm.Registry { + reg := chasm.NewRegistry(log.NewNoopLogger()) + serviceProc := chasm.NewNexusServiceProcessor("service") + serviceProc.MustRegisterOperation("operation", + chasm.NewRegisterableNexusOperationProcessor(&testStartProcessor{})) + reg.NexusEndpointProcessor.MustRegisterServiceProcessor(serviceProc) + return reg + }, + expectedMetricOutcome: "service-error:Unavailable", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_BACKING_OFF, op.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: "Unavailable: service unavailable", + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{}, + }, + }, op.LastAttemptFailure) + }, + }, + { + name: "chasm processor error", + setupHistoryClient: func(ctrl *gomock.Controller) *historyservicemock.MockHistoryServiceClient { + // Should not be called if processor fails. + return historyservicemock.NewMockHistoryServiceClient(ctrl) + }, + setupChasmRegistry: func() *chasm.Registry { + // Don't register a processor so ProcessInput fails. + return chasm.NewRegistry(log.NewNoopLogger()) + }, + expectedMetricOutcome: "operation-processor-failed", + checkOutcome: func(t *testing.T, op *Operation) { + require.Equal(t, nexusoperationpb.OPERATION_STATUS_FAILED, op.Status) + protorequire.ProtoEqual(t, &failurepb.Failure{ + Message: `service "service" not found`, + FailureInfo: &failurepb.Failure_NexusHandlerFailureInfo{ + NexusHandlerFailureInfo: &failurepb.NexusHandlerFailureInfo{ + Type: string(nexus.HandlerErrorTypeNotFound), + }, + }, + }, op.LastAttemptFailure) + }, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + op := &Operation{ + OperationState: &nexusoperationpb.OperationState{ + Status: nexusoperationpb.OPERATION_STATUS_SCHEDULED, + Endpoint: commonnexus.SystemEndpoint, + Service: "service", + Operation: "operation", + ScheduledTime: timestamppb.Now(), + ScheduleToCloseTimeout: durationpb.New(time.Hour), + RequestId: "request-id", + Attempt: 1, + }, + } + + metricsHandler := metricstest.NewCaptureHandler() + capture := metricsHandler.StartCapture() + defer metricsHandler.StopCapture(capture) + + input := tc.input + if input == nil { + input = mustToPayload(t, "test") + } + + callerLink := commonnexus.ConvertLinkWorkflowEventToNexusLink(&commonpb.Link_WorkflowEvent{ + Namespace: "ns-name", + WorkflowId: "wf-id", + RunId: "run-id", + Reference: &commonpb.Link_WorkflowEvent_EventRef{ + EventRef: &commonpb.Link_WorkflowEvent_EventReference{ + EventId: 1, + EventType: enumspb.EVENT_TYPE_NEXUS_OPERATION_SCHEDULED, + }, + }, + }) + + env := newInvocationTaskTestEnv(t, op, + InvocationData{Input: input, NexusLinks: []nexus.Link{callerLink}}, + nexustest.FakeEndpointRegistry{}, nil, metricsHandler, time.Hour) + + // Set up system endpoint dependencies. + historyClient := tc.setupHistoryClient(env.ctrl) + env.handler.historyClient = historyClient + env.handler.config.NumHistoryShards = 4 + env.handler.config.MaxOperationTokenLength = dynamicconfig.GetIntPropertyFnFilteredByNamespace(1000) + env.handler.chasmRegistry = tc.setupChasmRegistry() + + env.setupReadComponent() + env.setupUpdateComponent() + + err := env.execute(&nexusoperationpb.InvocationTask{Attempt: 1}) + var destinationDownErr *queueserrors.DestinationDownError + require.NotErrorAs(t, err, &destinationDownErr) + require.NoError(t, err) + + tc.checkOutcome(t, op) + + snap := capture.Snapshot() + counterRecordings := snap[OutboundRequestCounter.Name()] + require.Len(t, counterRecordings, 1) + require.Equal(t, int64(1), counterRecordings[0].Value) + require.Equal(t, "ns-name", counterRecordings[0].Tags["namespace"]) + require.Equal(t, commonnexus.SystemEndpoint, counterRecordings[0].Tags["destination"]) + require.Equal(t, "StartOperation", counterRecordings[0].Tags["method"]) + require.Equal(t, tc.expectedMetricOutcome, counterRecordings[0].Tags["outcome"]) + + timerRecordings := snap[OutboundRequestLatency.Name()] + require.Len(t, timerRecordings, 1) + require.Equal(t, tc.expectedMetricOutcome, timerRecordings[0].Tags["outcome"]) + }) + } +} diff --git a/chasm/lib/nexusoperation/operation_test.go b/chasm/lib/nexusoperation/operation_test.go new file mode 100644 index 00000000000..9e65ceb4de9 --- /dev/null +++ b/chasm/lib/nexusoperation/operation_test.go @@ -0,0 +1,334 @@ +package nexusoperation + +import ( + "context" + "slices" + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/testing/protorequire" + "go.temporal.io/server/common/testing/protoutils" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestIsWaitStageReached(t *testing.T) { + t.Parallel() + + ctx := &chasm.MockContext{} + allStatuses := protoutils.EnumValues[nexusoperationpb.OperationStatus]() + + tests := []struct { + name string + waitStage enumspb.NexusOperationWaitStage + reached []nexusoperationpb.OperationStatus + notReached []nexusoperationpb.OperationStatus + }{ + { + name: "Unspecified", + waitStage: enumspb.NEXUS_OPERATION_WAIT_STAGE_UNSPECIFIED, + notReached: allStatuses, + }, + { + name: "Started", + waitStage: enumspb.NEXUS_OPERATION_WAIT_STAGE_STARTED, + reached: []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_STARTED, + nexusoperationpb.OPERATION_STATUS_SUCCEEDED, + nexusoperationpb.OPERATION_STATUS_FAILED, + nexusoperationpb.OPERATION_STATUS_CANCELED, + nexusoperationpb.OPERATION_STATUS_TERMINATED, + nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + }, + notReached: []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_UNSPECIFIED, + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + }, + }, + { + name: "Closed", + waitStage: enumspb.NEXUS_OPERATION_WAIT_STAGE_CLOSED, + reached: []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_SUCCEEDED, + nexusoperationpb.OPERATION_STATUS_FAILED, + nexusoperationpb.OPERATION_STATUS_CANCELED, + nexusoperationpb.OPERATION_STATUS_TERMINATED, + nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + }, + notReached: []nexusoperationpb.OperationStatus{ + nexusoperationpb.OPERATION_STATUS_UNSPECIFIED, + nexusoperationpb.OPERATION_STATUS_SCHEDULED, + nexusoperationpb.OPERATION_STATUS_BACKING_OFF, + nexusoperationpb.OPERATION_STATUS_STARTED, + }, + }, + } + + coveredWaitStages := []enumspb.NexusOperationWaitStage{} + for _, tt := range tests { + coveredWaitStages = append(coveredWaitStages, tt.waitStage) + t.Run(tt.name, func(t *testing.T) { + op := newTestOperation() + + coveredStatuses := append(slices.Clone(tt.reached), tt.notReached...) + require.ElementsMatch(t, allStatuses, coveredStatuses) + + for _, status := range tt.reached { + op.Status = status + require.Truef(t, op.isWaitStageReached(ctx, tt.waitStage), "expected %s to match %s", status, tt.waitStage) + } + + for _, status := range tt.notReached { + op.Status = status + require.Falsef(t, op.isWaitStageReached(ctx, tt.waitStage), "expected %s not to match %s", status, tt.waitStage) + } + }) + } + + allWaitStages := protoutils.EnumValues[enumspb.NexusOperationWaitStage]() + require.ElementsMatch(t, allWaitStages, coveredWaitStages) +} + +func newScheduledTestOperation(t *testing.T, ctx *chasm.MockMutableContext) *Operation { + t.Helper() + op := newTestOperation() + require.NoError(t, TransitionScheduled.Apply(op, ctx, EventScheduled{})) + return op +} + +func TestHandleNexusCompletion(t *testing.T) { + newStartedOp := func(t *testing.T, ctx *chasm.MockMutableContext) *Operation { + t.Helper() + op := newScheduledTestOperation(t, ctx) + require.NoError(t, TransitionStarted.Apply(op, ctx, EventStarted{OperationToken: "tok"})) + return op + } + ctrl := gomock.NewController(t) + nsRegistry := namespace.NewMockRegistry(ctrl) + nsRegistry.EXPECT().GetNamespaceName(namespace.ID("ns-id")).Return(namespace.Name("ns-name"), nil).AnyTimes() + + newCtx := func() *chasm.MockMutableContext { + return &chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNow: func(chasm.Component) time.Time { return defaultTime }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{NamespaceID: "ns-id"} + }, + HandleNamespaceEntry: func() *namespace.Namespace { + return namespace.NewNamespaceForTest(&persistencespb.NamespaceInfo{Name: "ns-name"}, nil, false, nil, 0) + }, + GoCtx: context.WithValue(context.Background(), OperationContextKey, &OperationContext{ + MetricTagConfig: dynamicconfig.GetTypedPropertyFn(NexusMetricTagConfig{}), + }), + }, + } + } + + t.Run("Success", func(t *testing.T) { + t.Run("AfterStarted", func(t *testing.T) { + ctx := newCtx() + op := newStartedOp(t, ctx) + err := op.HandleNexusCompletion(ctx, &persistencespb.ChasmNexusCompletion{ + RequestId: op.GetRequestId(), + Outcome: &persistencespb.ChasmNexusCompletion_Success{ + Success: mustToPayload(t, "result"), + }, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SUCCEEDED, op.GetStatus()) + }) + + t.Run("CompletionBeforeStart", func(t *testing.T) { + ctx := newCtx() + op := newScheduledTestOperation(t, ctx) + startTime := defaultTime.Add(-time.Second) + err := op.HandleNexusCompletion(ctx, &persistencespb.ChasmNexusCompletion{ + StartTime: timestamppb.New(startTime), + RequestId: op.GetRequestId(), + OperationToken: "tok", + Outcome: &persistencespb.ChasmNexusCompletion_Success{ + Success: mustToPayload(t, "result"), + }, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SUCCEEDED, op.GetStatus()) + require.Equal(t, "tok", op.GetOperationToken()) + require.Equal(t, startTime, op.GetStartedTime().AsTime()) + }) + + t.Run("CompletionBeforeStartWithoutStartTime", func(t *testing.T) { + ctx := newCtx() + op := newScheduledTestOperation(t, ctx) + err := op.HandleNexusCompletion(ctx, &persistencespb.ChasmNexusCompletion{ + RequestId: op.GetRequestId(), + OperationToken: "tok", + Outcome: &persistencespb.ChasmNexusCompletion_Success{ + Success: mustToPayload(t, "result"), + }, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SUCCEEDED, op.GetStatus()) + require.Equal(t, "tok", op.GetOperationToken()) + require.Equal(t, defaultTime, op.GetStartedTime().AsTime()) + }) + }) + + t.Run("Failure", func(t *testing.T) { + t.Run("AfterStarted", func(t *testing.T) { + ctx := newCtx() + op := newStartedOp(t, ctx) + err := op.HandleNexusCompletion(ctx, &persistencespb.ChasmNexusCompletion{ + RequestId: op.GetRequestId(), + Outcome: &persistencespb.ChasmNexusCompletion_Failure{ + Failure: &failurepb.Failure{Message: "oops"}, + }, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_FAILED, op.GetStatus()) + }) + + t.Run("CompletionBeforeStart", func(t *testing.T) { + ctx := newCtx() + op := newScheduledTestOperation(t, ctx) + startTime := defaultTime.Add(-time.Second) + err := op.HandleNexusCompletion(ctx, &persistencespb.ChasmNexusCompletion{ + StartTime: timestamppb.New(startTime), + RequestId: op.GetRequestId(), + OperationToken: "tok", + Outcome: &persistencespb.ChasmNexusCompletion_Failure{ + Failure: &failurepb.Failure{Message: "oops"}, + }, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_FAILED, op.GetStatus()) + require.Equal(t, "tok", op.GetOperationToken()) + require.Equal(t, startTime, op.GetStartedTime().AsTime()) + }) + }) + + t.Run("Canceled", func(t *testing.T) { + t.Run("AfterStarted", func(t *testing.T) { + ctx := newCtx() + op := newStartedOp(t, ctx) + err := op.HandleNexusCompletion(ctx, &persistencespb.ChasmNexusCompletion{ + RequestId: op.GetRequestId(), + Outcome: &persistencespb.ChasmNexusCompletion_Failure{ + Failure: &failurepb.Failure{ + Message: "canceled", + FailureInfo: &failurepb.Failure_CanceledFailureInfo{ + CanceledFailureInfo: &failurepb.CanceledFailureInfo{}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_CANCELED, op.GetStatus()) + }) + + t.Run("CompletionBeforeStart", func(t *testing.T) { + ctx := newCtx() + op := newScheduledTestOperation(t, ctx) + startTime := defaultTime.Add(-time.Second) + err := op.HandleNexusCompletion(ctx, &persistencespb.ChasmNexusCompletion{ + StartTime: timestamppb.New(startTime), + RequestId: op.GetRequestId(), + OperationToken: "tok", + Outcome: &persistencespb.ChasmNexusCompletion_Failure{ + Failure: &failurepb.Failure{ + Message: "canceled", + FailureInfo: &failurepb.Failure_CanceledFailureInfo{ + CanceledFailureInfo: &failurepb.CanceledFailureInfo{}, + }, + }, + }, + }) + require.NoError(t, err) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_CANCELED, op.GetStatus()) + require.Equal(t, "tok", op.GetOperationToken()) + require.Equal(t, startTime, op.GetStartedTime().AsTime()) + }) + }) +} + +func TestDescribeOutcome(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + status nexusoperationpb.OperationStatus + outcome *nexusoperationpb.OperationOutcome // nil means no outcome set + lastAttemptFail *failurepb.Failure + expectedResult *commonpb.Payload + expectedFailure *failurepb.Failure + }{ + { + name: "Successful", + status: nexusoperationpb.OPERATION_STATUS_SUCCEEDED, + outcome: &nexusoperationpb.OperationOutcome{ + Variant: &nexusoperationpb.OperationOutcome_Successful_{ + Successful: &nexusoperationpb.OperationOutcome_Successful{Result: payload.EncodeString("result")}, + }, + }, + expectedResult: payload.EncodeString("result"), + }, + { + name: "Failed", + status: nexusoperationpb.OPERATION_STATUS_FAILED, + outcome: &nexusoperationpb.OperationOutcome{ + Variant: &nexusoperationpb.OperationOutcome_Failed_{ + Failed: &nexusoperationpb.OperationOutcome_Failed{ + Failure: &failurepb.Failure{Message: "outcome failure"}, + }, + }, + }, + expectedFailure: &failurepb.Failure{Message: "outcome failure"}, + }, + { + name: "NoOutcome_FallsBackToLastAttemptFailure", + status: nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + lastAttemptFail: &failurepb.Failure{Message: "last attempt failure"}, + expectedFailure: &failurepb.Failure{Message: "last attempt failure"}, + }, + { + name: "Outcome_PreferredOverLastAttemptFailure", + status: nexusoperationpb.OPERATION_STATUS_TIMED_OUT, + outcome: &nexusoperationpb.OperationOutcome{ + Variant: &nexusoperationpb.OperationOutcome_Failed_{ + Failed: &nexusoperationpb.OperationOutcome_Failed{ + Failure: &failurepb.Failure{Message: "operation timed out"}, + }, + }, + }, + lastAttemptFail: &failurepb.Failure{Message: "last attempt failure"}, + expectedFailure: &failurepb.Failure{Message: "operation timed out"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := &chasm.MockMutableContext{} + op := NewOperation(&nexusoperationpb.OperationState{ + Status: tc.status, + LastAttemptFailure: tc.lastAttemptFail, + }) + if tc.outcome != nil { + op.Outcome = chasm.NewDataField(ctx, tc.outcome) + } + + result, failure := op.outcome(ctx) + protorequire.ProtoEqual(t, tc.expectedResult, result) + protorequire.ProtoEqual(t, tc.expectedFailure, failure) + }) + } +} diff --git a/chasm/lib/nexusoperation/proto/v1/operation.proto b/chasm/lib/nexusoperation/proto/v1/operation.proto new file mode 100644 index 00000000000..48b03c98f58 --- /dev/null +++ b/chasm/lib/nexusoperation/proto/v1/operation.proto @@ -0,0 +1,149 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.nexusoperation.proto.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; +import "temporal/api/sdk/v1/user_metadata.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb;nexusoperationpb"; + +message OperationState { + // Current status of the operation. + OperationStatus status = 1; + // Endpoint ID - used internally to avoid failing requests when endpoint is renamed. + string endpoint_id = 2; + // Endpoint name - resolved from the endpoint registry for this workflow's namespace. + string endpoint = 3; + // Service name. + string service = 4; + // Operation name. + string operation = 5; + // The time when the operation was scheduled. + google.protobuf.Timestamp scheduled_time = 6; + // The time when the operation was started. Only set for asynchronous operations after a successful StartOperation + // call. Taken from the component time or the time reported in an async completion request, whichever happens first. + google.protobuf.Timestamp started_time = 7; + // The time when the operation reached a terminal state. Taken from the component time or the time reported in an + // async completion request, whichever happens first. + google.protobuf.Timestamp closed_time = 8; + // Schedule-to-start timeout for this operation. + google.protobuf.Duration schedule_to_start_timeout = 9; + // Start-to-close timeout for this operation. + google.protobuf.Duration start_to_close_timeout = 10; + // Schedule-to-close timeout for this operation. + google.protobuf.Duration schedule_to_close_timeout = 11; + // Unique request ID allocated for all retry attempts of the StartOperation request. + string request_id = 12; + // Opaque data injected by the parent (e.g. workflow) for its own bookkeeping. + // The operation component itself does not interpret this field. + google.protobuf.Any parent_data = 13; + // The number of attempts made to deliver the start operation request. + // This number is approximate, it is incremented when a task is added to the history queue. + // In practice, there could be more attempts if a task is executed but fails to commit, or less attempts if a task was + // never executed. + int32 attempt = 14; + // The time when the last attempt completed. + google.protobuf.Timestamp last_attempt_complete_time = 15; + // The last attempt's failure, if any. + temporal.api.failure.v1.Failure last_attempt_failure = 16; + // The time when the next attempt is scheduled (only set when in BACKING_OFF state). + google.protobuf.Timestamp next_attempt_schedule_time = 17; + // Operation token - only set for asynchronous operations after a successful StartOperation call. + string operation_token = 18; + // Explicit terminate request state for standalone operations. + NexusOperationTerminateState terminate_state = 19; + // Links are only populated for standalone operations. Workflow-backed operations derive links from history events. + repeated temporal.api.common.v1.Link links = 20; +} + +message NexusOperationTerminateState { + string request_id = 1; +} + +message OperationOutcome { + message Successful { + temporal.api.common.v1.Payload result = 1; + } + + message Failed { + temporal.api.failure.v1.Failure failure = 1; + } + + oneof variant { + Successful successful = 1; + Failed failed = 2; + } +} + +enum OperationStatus { + // Default value, unspecified status. + OPERATION_STATUS_UNSPECIFIED = 0; + // Operation is in the queue waiting to be executed or is currently executing. + OPERATION_STATUS_SCHEDULED = 1; + // Operation has failed with a retryable error and is backing off before the next attempt. + OPERATION_STATUS_BACKING_OFF = 2; + // Operation was started and will complete asynchronously. + OPERATION_STATUS_STARTED = 3; + // Operation succeeded. + // This may happen either as a response to a start request or as reported via callback. + OPERATION_STATUS_SUCCEEDED = 4; + // Operation failed either when a start request encounters a non-retryable error or as reported via callback. + OPERATION_STATUS_FAILED = 5; + // Operation completed as canceled (may have not ever been delivered). + // This may happen either as a response to a start request or as reported via callback. + OPERATION_STATUS_CANCELED = 6; + // Operation timed out - exceeded the user supplied schedule-to-close timeout. + // Any attempts to complete the operation in this status will be ignored. + OPERATION_STATUS_TIMED_OUT = 7; + OPERATION_STATUS_TERMINATED = 8; +} + +message CancellationState { + // Current status of the cancellation request. + CancellationStatus status = 1; + // The time when cancellation was requested. + google.protobuf.Timestamp requested_time = 2; + // The number of attempts made to deliver the cancel operation request. + // This number represents a minimum bound since the attempt is incremented after the request completes. + int32 attempt = 3; + // The time when the last attempt completed. + google.protobuf.Timestamp last_attempt_complete_time = 4; + // The last attempt's failure, if any. + temporal.api.failure.v1.Failure last_attempt_failure = 5; + // The time when the next attempt is scheduled (only set when in BACKING_OFF state). + google.protobuf.Timestamp next_attempt_schedule_time = 6; + // Opaque data injected by the parent (e.g. workflow) for its own bookkeeping. + // The cancellation component itself does not interpret this field. + google.protobuf.Any parent_data = 7; + string request_id = 8; + string identity = 9; + string reason = 10; +} + +enum CancellationStatus { + // Default value, unspecified status. + CANCELLATION_STATUS_UNSPECIFIED = 0; + // Cancellation request is in the queue waiting to be executed or is currently executing. + CANCELLATION_STATUS_SCHEDULED = 1; + // Cancellation request has failed with a retryable error and is backing off before the next attempt. + CANCELLATION_STATUS_BACKING_OFF = 2; + // Cancellation request succeeded. + CANCELLATION_STATUS_SUCCEEDED = 3; + // Cancellation request failed with a non-retryable error. + CANCELLATION_STATUS_FAILED = 4; + // The associated operation timed out - exceeded the user supplied schedule-to-close timeout. + CANCELLATION_STATUS_TIMED_OUT = 5; + // Cancellation request is blocked (eg: by circuit breaker). + CANCELLATION_STATUS_BLOCKED = 6; +} + +message OperationRequestData { + temporal.api.common.v1.Payload input = 1; + map nexus_header = 2; + temporal.api.sdk.v1.UserMetadata user_metadata = 3; + string identity = 4; +} diff --git a/chasm/lib/nexusoperation/proto/v1/request_response.proto b/chasm/lib/nexusoperation/proto/v1/request_response.proto new file mode 100644 index 00000000000..31fee790e63 --- /dev/null +++ b/chasm/lib/nexusoperation/proto/v1/request_response.proto @@ -0,0 +1,62 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.nexusoperation.proto.v1; + +import "temporal/api/workflowservice/v1/request_response.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb;nexusoperationpb"; + +message StartNexusOperationRequest { + string namespace_id = 1; + string endpoint_id = 2; + + temporal.api.workflowservice.v1.StartNexusOperationExecutionRequest frontend_request = 3; +} + +message StartNexusOperationResponse { + temporal.api.workflowservice.v1.StartNexusOperationExecutionResponse frontend_response = 1; +} + +message DescribeNexusOperationRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.DescribeNexusOperationExecutionRequest frontend_request = 2; +} + +message DescribeNexusOperationResponse { + temporal.api.workflowservice.v1.DescribeNexusOperationExecutionResponse frontend_response = 1; +} + +message RequestCancelNexusOperationRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.RequestCancelNexusOperationExecutionRequest frontend_request = 2; +} + +message RequestCancelNexusOperationResponse {} + +message TerminateNexusOperationRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.TerminateNexusOperationExecutionRequest frontend_request = 2; +} + +message TerminateNexusOperationResponse {} + +message DeleteNexusOperationRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.DeleteNexusOperationExecutionRequest frontend_request = 2; +} + +message DeleteNexusOperationResponse {} + +message PollNexusOperationRequest { + string namespace_id = 1; + + temporal.api.workflowservice.v1.PollNexusOperationExecutionRequest frontend_request = 2; +} + +message PollNexusOperationResponse { + temporal.api.workflowservice.v1.PollNexusOperationExecutionResponse frontend_response = 1; +} diff --git a/chasm/lib/nexusoperation/proto/v1/service.proto b/chasm/lib/nexusoperation/proto/v1/service.proto new file mode 100644 index 00000000000..f7e767e484c --- /dev/null +++ b/chasm/lib/nexusoperation/proto/v1/service.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.nexusoperation.proto.v1; + +import "chasm/lib/nexusoperation/proto/v1/request_response.proto"; +import "temporal/server/api/common/v1/api_category.proto"; +import "temporal/server/api/routing/v1/extension.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb;nexusoperationpb"; + +service NexusOperationService { + rpc StartNexusOperation(StartNexusOperationRequest) returns (StartNexusOperationResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.operation_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc DescribeNexusOperation(DescribeNexusOperationRequest) returns (DescribeNexusOperationResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.operation_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_LONG_POLL; + } + + rpc RequestCancelNexusOperation(RequestCancelNexusOperationRequest) returns (RequestCancelNexusOperationResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.operation_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc TerminateNexusOperation(TerminateNexusOperationRequest) returns (TerminateNexusOperationResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.operation_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc DeleteNexusOperation(DeleteNexusOperationRequest) returns (DeleteNexusOperationResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.operation_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc PollNexusOperation(PollNexusOperationRequest) returns (PollNexusOperationResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.operation_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_LONG_POLL; + } +} diff --git a/chasm/lib/nexusoperation/proto/v1/tasks.proto b/chasm/lib/nexusoperation/proto/v1/tasks.proto new file mode 100644 index 00000000000..e67a1c630ee --- /dev/null +++ b/chasm/lib/nexusoperation/proto/v1/tasks.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.nexusoperation.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb;nexusoperationpb"; + +message ScheduleToStartTimeoutTask {} + +message StartToCloseTimeoutTask {} + +message ScheduleToCloseTimeoutTask {} + +message InvocationTask { + int32 attempt = 1; +} + +message InvocationBackoffTask { + int32 attempt = 1; +} + +message CancellationTask { + int32 attempt = 1; +} + +message CancellationBackoffTask { + int32 attempt = 1; +} diff --git a/chasm/lib/nexusoperation/task_handler_base.go b/chasm/lib/nexusoperation/task_handler_base.go new file mode 100644 index 00000000000..2d3d3a28a70 --- /dev/null +++ b/chasm/lib/nexusoperation/task_handler_base.go @@ -0,0 +1,201 @@ +package nexusoperation + +import ( + "context" + "errors" + "fmt" + "strings" + "sync/atomic" + "text/template" + "time" + + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/resource" + "go.uber.org/fx" +) + +// ClientProvider provides a nexus client for a given endpoint. +type ClientProvider func(ctx context.Context, namespaceID string, entry *persistencespb.NexusEndpointEntry, service string) (*nexusrpc.HTTPClient, error) + +// commonTaskHandlerOptions is the fx parameter object for common options supplied to common task handlers. +type commonTaskHandlerOptions struct { + fx.In + + Config *Config + + MetricsHandler metrics.Handler + Logger log.Logger +} + +// InvocationTaskHandlerOptions groups the common dependencies shared by the invocation and cancellation task +// handlers. It embeds fx.In so that dig can flatten its fields when embedded in other dig.In structs. +type InvocationTaskHandlerOptions struct { + fx.In + + Config *Config + NamespaceRegistry namespace.Registry + MetricsHandler metrics.Handler + Logger log.Logger + ClientProvider ClientProvider + EndpointRegistry commonnexus.EndpointRegistry + HTTPTraceProvider commonnexus.HTTPClientTraceProvider + HistoryClient resource.HistoryClient + ChasmRegistry *chasm.Registry +} + +func (o InvocationTaskHandlerOptions) toBase() nexusTaskHandlerBase { + return nexusTaskHandlerBase{ + config: o.Config, + namespaceRegistry: o.NamespaceRegistry, + metricsHandler: o.MetricsHandler, + logger: o.Logger, + clientProvider: o.ClientProvider, + endpointRegistry: o.EndpointRegistry, + httpTraceProvider: o.HTTPTraceProvider, + historyClient: o.HistoryClient, + chasmRegistry: o.ChasmRegistry, + } +} + +// nexusTaskHandlerBase contains common dependencies shared by the invocation and cancellation task handlers. +type nexusTaskHandlerBase struct { + config *Config + namespaceRegistry namespace.Registry + metricsHandler metrics.Handler + logger log.Logger + clientProvider ClientProvider + endpointRegistry commonnexus.EndpointRegistry + httpTraceProvider commonnexus.HTTPClientTraceProvider + historyClient resource.HistoryClient + chasmRegistry *chasm.Registry +} + +func (b *nexusTaskHandlerBase) buildCallbackURL( + ns *namespace.Namespace, + endpoint *persistencespb.NexusEndpointEntry, +) (string, error) { + // endpoint is nil for system-internal operations where endpoint lookup is skipped. + // These always use the system callback URL since the callback is handled internally. + if endpoint == nil { + return commonnexus.SystemCallbackURL, nil + } + target := endpoint.GetEndpoint().GetSpec().GetTarget().GetVariant() + if !b.config.UseSystemCallbackURL() { + return buildCallbackFromTemplate(b.config.CallbackURLTemplate(), ns) + } + switch target.(type) { + case *persistencespb.NexusEndpointTarget_Worker_: + return commonnexus.SystemCallbackURL, nil + case *persistencespb.NexusEndpointTarget_External_: + return buildCallbackFromTemplate(b.config.CallbackURLTemplate(), ns) + default: + return "", fmt.Errorf("unknown endpoint target type: %T", target) + } +} + +func buildCallbackFromTemplate(callbackTemplate *template.Template, ns *namespace.Namespace) (string, error) { + if callbackTemplate == nil { + return "", serviceerror.NewInternalf("dynamic config %q is unset", CallbackURLTemplate.Key().String()) + } + builder := &strings.Builder{} + err := callbackTemplate.Execute(builder, struct{ NamespaceName, NamespaceID string }{ + NamespaceName: ns.Name().String(), + NamespaceID: ns.ID().String(), + }) + if err != nil { + return "", fmt.Errorf("failed to format callback URL: %w", err) + } + return builder.String(), nil +} + +// newInvocation creates an invocation for the given endpoint, selecting the appropriate implementation +// based on the call timeout and endpoint type. +func (b *nexusTaskHandlerBase) newInvocation( + ctx context.Context, + ns *namespace.Namespace, + endpoint *persistencespb.NexusEndpointEntry, + endpointName string, + service string, + callTimeout time.Duration, + timeoutType enumspb.TimeoutType, + traceCtx invocationTraceContext, +) (invocation, error) { + if callTimeout < b.config.MinRequestTimeout(ns.Name().String()) { + return &invocationTimeout{timeoutType}, nil + } + if endpointName == commonnexus.SystemEndpoint { + return b.newInvocationSystem(ns), nil + } + return b.newInvocationHTTP(ctx, ns, endpoint, service, traceCtx) +} + +// lookupEndpoint gets an endpoint from the registry, preferring to look up by ID and falling back to name lookup. +// The fallback is needed because endpoints may be deleted and recreated with the same name but a different ID. +// In that case, the ID stored in the operation state becomes stale, but the name-based lookup still resolves correctly. +// Returns a nil entry if the endpoint name is the system nexus endpoint. +func (b *nexusTaskHandlerBase) lookupEndpoint(ctx context.Context, namespaceID namespace.ID, endpointID, endpointName string) (*persistencespb.NexusEndpointEntry, error) { + // Skip endpoint lookup for system-internal operations. + if endpointName == commonnexus.SystemEndpoint { + return nil, nil + } + + entry, err := b.endpointRegistry.GetByID(ctx, endpointID) + if err != nil { + if _, ok := errors.AsType[*serviceerror.NotFound](err); ok { + // Endpoint was not found by ID, fall back to name lookup. + return b.endpointRegistry.GetByName(ctx, namespaceID, endpointName) + } + return nil, err + } + return entry, nil +} + +// setupCallContext creates a context with a timeout and attaches the failure source tracking value. +func (b *nexusTaskHandlerBase) setupCallContext(ctx context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + callCtx, cancel := context.WithTimeout(ctx, timeout) + callCtx = context.WithValue(callCtx, commonnexus.FailureSourceContextKey, &atomic.Value{}) + return callCtx, cancel +} + +// recordCallOutcome records metrics and logs errors for the outcome of an outbound Nexus call. +func (b *nexusTaskHandlerBase) recordCallOutcome( + ns *namespace.Namespace, + endpoint *persistencespb.NexusEndpointEntry, + endpointName string, + methodName string, + outcomeTag string, + callErr error, + callDuration time.Duration, + failureSource string, +) { + methodTag := metrics.NexusMethodTag(methodName) + namespaceTag := metrics.NamespaceTag(ns.Name().String()) + var destTag metrics.Tag + if endpoint != nil { + destTag = metrics.DestinationTag(endpoint.Endpoint.Spec.GetName()) + } else { + destTag = metrics.DestinationTag(endpointName) + } + outcomeMetricTag := metrics.OutcomeTag(outcomeTag) + failureSourceTag := metrics.FailureSourceTag(failureSource) + OutboundRequestCounter.With(b.metricsHandler).Record(1, namespaceTag, destTag, methodTag, outcomeMetricTag, failureSourceTag) + OutboundRequestLatency.With(b.metricsHandler).Record(callDuration, namespaceTag, destTag, methodTag, outcomeMetricTag, failureSourceTag) + + if callErr != nil { + _, isTimeoutBelowMin := errors.AsType[*operationTimeoutBelowMinError](callErr) + if failureSource == commonnexus.FailureSourceWorker || isTimeoutBelowMin { + b.logger.Debug(fmt.Sprintf("Nexus %s request failed", methodName), tag.Error(callErr)) + } else { + b.logger.Error(fmt.Sprintf("Nexus %s request failed", methodName), tag.Error(callErr)) + } + } +} diff --git a/chasm/lib/nexusoperation/task_handler_helpers.go b/chasm/lib/nexusoperation/task_handler_helpers.go new file mode 100644 index 00000000000..060c1398812 --- /dev/null +++ b/chasm/lib/nexusoperation/task_handler_helpers.go @@ -0,0 +1,407 @@ +package nexusoperation + +import ( + "context" + "errors" + "fmt" + "strings" + "sync/atomic" + "text/template" + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" + tokenspb "go.temporal.io/server/api/token/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + queueserrors "go.temporal.io/server/service/history/queues/errors" +) + +var ( + ErrResponseBodyTooLarge = errors.New("http: response body too large") + ErrInvalidOperationToken = errors.New("invalid operation token") + errRequestTimedOut = errors.New("request timed out") + errOpProcessorFailed = errors.New("nexus operation processor failed") +) + +const maxDuration = time.Duration(1<<63 - 1) + +type operationTimeoutBelowMinError struct { + timeoutType enumspb.TimeoutType +} + +func (o *operationTimeoutBelowMinError) Error() string { + return fmt.Sprintf("not enough time to execute another request before %s timeout", o.timeoutType.String()) +} + +func convertResponseLinks(links []nexus.Link, logger log.Logger) []*commonpb.Link { + var result []*commonpb.Link + for _, nexusLink := range links { + switch nexusLink.Type { + case string((&commonpb.Link_WorkflowEvent{}).ProtoReflect().Descriptor().FullName()): + link, err := commonnexus.ConvertNexusLinkToLinkWorkflowEvent(nexusLink) + if err != nil { + logger.Error( + fmt.Sprintf("failed to parse link to %q: %s", nexusLink.Type, nexusLink.URL), + tag.Error(err), + ) + continue + } + result = append(result, &commonpb.Link{ + Variant: &commonpb.Link_WorkflowEvent_{ + WorkflowEvent: link, + }, + }) + default: + logger.Error(fmt.Sprintf("invalid link data type: %q", nexusLink.Type)) + } + } + return result +} + +func isDestinationDown(err error) bool { + if _, ok := errors.AsType[serviceerror.ServiceError](err); ok { + return false + } + if _, ok := errors.AsType[*nexus.OperationError](err); ok { + return false + } + if handlerError, ok := errors.AsType[*nexus.HandlerError](err); ok { + return handlerError.Retryable() + } + if errors.Is(err, errOpProcessorFailed) { + return false + } + if errors.Is(err, ErrResponseBodyTooLarge) { + return false + } + if errors.Is(err, ErrInvalidOperationToken) { + return false + } + _, ok := errors.AsType[*operationTimeoutBelowMinError](err) + return !ok +} + +func failureSourceFromContext(ctx context.Context) string { + ctxVal := ctx.Value(commonnexus.FailureSourceContextKey) + if ctxVal == nil { + return "" + } + val, ok := ctxVal.(*atomic.Value) + if !ok { + return "" + } + src := val.Load() + if src == nil { + return "" + } + source, ok := src.(string) + if !ok { + return "" + } + return source +} + +func startCallOutcomeTag(callCtx context.Context, result *nexusrpc.ClientStartOperationResponse[*commonpb.Payload], callErr error) string { + if callErr != nil { + if _, ok := errors.AsType[*operationTimeoutBelowMinError](callErr); ok { + return "operation-timeout" + } + if errors.Is(callErr, ErrInvalidOperationToken) { + return "invalid-operation-token" + } + if errors.Is(callErr, errOpProcessorFailed) { + return "operation-processor-failed" + } + if callCtx.Err() != nil { + return "request-timeout" + } + if serviceErr, ok := errors.AsType[serviceerror.ServiceError](callErr); ok { + return "service-error:" + strings.Replace(fmt.Sprintf("%T", serviceErr), "*serviceerror.", "", 1) + } + if opFailedError, ok := errors.AsType[*nexus.OperationError](callErr); ok { + return "operation-unsuccessful:" + string(opFailedError.State) + } + if handlerError, ok := errors.AsType[*nexus.HandlerError](callErr); ok { + return "handler-error:" + string(handlerError.Type) + } + return "unknown-error" + } + if result.Pending != nil { + return "pending" + } + return "successful" +} + +// cancelCallOutcomeTag returns a metric tag for the outcome of a cancel call. +func cancelCallOutcomeTag(callCtx context.Context, callErr error) string { + if callErr != nil { + if errors.Is(callErr, errOpProcessorFailed) { + return "operation-processor-failed" + } + if _, ok := errors.AsType[*operationTimeoutBelowMinError](callErr); ok { + return "operation-timeout" + } + if callCtx.Err() != nil { + return "request-timeout" + } + if handlerErr, ok := errors.AsType[*nexus.HandlerError](callErr); ok { + return "handler-error:" + string(handlerErr.Type) + } + if serviceErr, ok := errors.AsType[serviceerror.ServiceError](callErr); ok { + return "service-error:" + strings.Replace(fmt.Sprintf("%T", serviceErr), "*serviceerror.", "", 1) + } + return "unknown-error" + } + return "successful" +} + +// callErrorToFailure converts a Nexus call error to a Temporal failure with retryability. +// Always returns a non-nil failure. +func callErrorToFailure(callErr error) (*failurepb.Failure, bool, error) { + if serviceErr, ok := errors.AsType[serviceerror.ServiceError](callErr); ok { + retryable := common.IsRetryableRPCError(callErr) + failure := &failurepb.Failure{ + Message: fmt.Sprintf("%s: %s", strings.Replace(fmt.Sprintf("%T", serviceErr), "*serviceerror.", "", 1), serviceErr.Error()), + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{ + NonRetryable: !retryable, + }, + }, + } + return failure, retryable, nil + } + + if handlerErr, ok := errors.AsType[*nexus.HandlerError](callErr); ok { + var nf nexus.Failure + if handlerErr.OriginalFailure != nil { + nf = *handlerErr.OriginalFailure + } else { + var err error + nf, err = nexusrpc.DefaultFailureConverter().ErrorToFailure(handlerErr) + if err != nil { + return nil, false, err + } + } + failure, err := commonnexus.NexusFailureToTemporalFailure(nf) + if err != nil { + return nil, false, err + } + return failure, handlerErr.Retryable(), nil + } + + if errors.Is(callErr, context.DeadlineExceeded) || errors.Is(callErr, context.Canceled) { + // If timed out, don't leak internal info to the user. + callErr = errRequestTimedOut + } + + // Fallback to server failure. + retryable := !errors.Is(callErr, ErrResponseBodyTooLarge) && !errors.Is(callErr, ErrInvalidOperationToken) + failure := &failurepb.Failure{ + Message: callErr.Error(), + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{ + NonRetryable: !retryable, + }, + }, + } + return failure, retryable, nil +} + +// invocationResult is a marker interface for the outcome of a Nexus start operation call. +type invocationResult interface { + mustImplementInvocationResult() +} + +type invocationResultOK struct { + response *nexusrpc.ClientStartOperationResponse[*commonpb.Payload] +} + +func (invocationResultOK) mustImplementInvocationResult() {} + +type invocationResultFail struct { + failure *failurepb.Failure +} + +func (invocationResultFail) mustImplementInvocationResult() {} + +type invocationResultRetry struct { + failure *failurepb.Failure +} + +func (invocationResultRetry) mustImplementInvocationResult() {} + +type invocationResultCancel struct { + failure *failurepb.Failure +} + +func (invocationResultCancel) mustImplementInvocationResult() {} + +type invocationResultTimeout struct { + failure *failurepb.Failure +} + +func (invocationResultTimeout) mustImplementInvocationResult() {} + +func newInvocationResult( + response *nexusrpc.ClientStartOperationResponse[*commonpb.Payload], + callErr error, +) (invocationResult, error) { + if callErr == nil { + return invocationResultOK{response: response}, nil + } + + if serviceErr, ok := errors.AsType[serviceerror.ServiceError](callErr); ok { + retryable := common.IsRetryableRPCError(callErr) + failure := &failurepb.Failure{ + Message: fmt.Sprintf("%s: %s", strings.Replace(fmt.Sprintf("%T", serviceErr), "*serviceerror.", "", 1), serviceErr.Error()), + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{ + NonRetryable: !retryable, + }, + }, + } + if retryable { + return invocationResultRetry{failure: failure}, nil + } + return invocationResultFail{failure: failure}, nil + } + + if handlerErr, ok := errors.AsType[*nexus.HandlerError](callErr); ok { + var nf nexus.Failure + if handlerErr.OriginalFailure != nil { + nf = *handlerErr.OriginalFailure + } else { + var err error + nf, err = nexusrpc.DefaultFailureConverter().ErrorToFailure(handlerErr) + if err != nil { + return nil, err + } + } + failure, err := commonnexus.NexusFailureToTemporalFailure(nf) + if err != nil { + return nil, err + } + if handlerErr.Retryable() { + return invocationResultRetry{failure: failure}, nil + } + return invocationResultFail{failure: failure}, nil + } + + if opErr, ok := errors.AsType[*nexus.OperationError](callErr); ok { + failure, err := operationErrorToFailure(opErr) + if err != nil { + return nil, err + } + if opErr.State == nexus.OperationStateCanceled { + return invocationResultCancel{failure: failure}, nil + } + return invocationResultFail{failure: failure}, nil + } + + if opTimeoutBelowMinErr, ok := errors.AsType[*operationTimeoutBelowMinError](callErr); ok { + failure := &failurepb.Failure{ + Message: "operation timed out", + FailureInfo: &failurepb.Failure_TimeoutFailureInfo{ + TimeoutFailureInfo: &failurepb.TimeoutFailureInfo{ + TimeoutType: opTimeoutBelowMinErr.timeoutType, + }, + }, + } + return invocationResultTimeout{failure: failure}, nil + } + + if errors.Is(callErr, context.DeadlineExceeded) || errors.Is(callErr, context.Canceled) { + callErr = errRequestTimedOut + } + + failure := &failurepb.Failure{ + Message: callErr.Error(), + FailureInfo: &failurepb.Failure_ServerFailureInfo{ + ServerFailureInfo: &failurepb.ServerFailureInfo{}, + }, + } + if errors.Is(callErr, ErrResponseBodyTooLarge) || errors.Is(callErr, ErrInvalidOperationToken) { + failure.GetServerFailureInfo().NonRetryable = true + return invocationResultFail{failure: failure}, nil + } + return invocationResultRetry{failure: failure}, nil +} + +func operationErrorToFailure(opErr *nexus.OperationError) (*failurepb.Failure, error) { + var nf nexus.Failure + if opErr.OriginalFailure != nil { + nf = *opErr.OriginalFailure + } else { + var err error + nf, err = nexusrpc.DefaultFailureConverter().ErrorToFailure(opErr) + if err != nil { + return nil, err + } + } + unwrapError := nf.Metadata["unwrap-error"] == "true" + if unwrapError && nf.Cause != nil { + return commonnexus.NexusFailureToTemporalFailure(*nf.Cause) + } + return commonnexus.NexusFailureToTemporalFailure(nf) +} + +func buildCallbackURL( + useSystemCallback bool, + callbackTemplate *template.Template, + ns *namespace.Namespace, + endpoint *persistencespb.NexusEndpointEntry, +) (string, error) { + if endpoint == nil { + return commonnexus.SystemCallbackURL, nil + } + target := endpoint.GetEndpoint().GetSpec().GetTarget().GetVariant() + if !useSystemCallback { + return buildCallbackFromTemplate(callbackTemplate, ns) + } + switch target.(type) { + case *persistencespb.NexusEndpointTarget_Worker_: + return commonnexus.SystemCallbackURL, nil + case *persistencespb.NexusEndpointTarget_External_: + return buildCallbackFromTemplate(callbackTemplate, ns) + default: + return "", fmt.Errorf("unknown endpoint target type: %T", target) + } +} + +// lookupEndpoint gets an endpoint from the registry, preferring to look up by ID and falling back to name lookup. +// The fallback is needed because endpoints may be deleted and recreated with the same name but a different ID. +// In that case, the ID stored in the operation state becomes stale, but the name-based lookup still resolves correctly. +func lookupEndpoint(ctx context.Context, registry commonnexus.EndpointRegistry, namespaceID namespace.ID, endpointID, endpointName string) (*persistencespb.NexusEndpointEntry, error) { + entry, err := registry.GetByID(ctx, endpointID) + if err != nil { + if _, ok := errors.AsType[*serviceerror.NotFound](err); ok { + return registry.GetByName(ctx, namespaceID, endpointName) + } + return nil, err + } + return entry, nil +} + +// generateCallbackToken creates a callback token for the given operation reference. +func (h *operationInvocationTaskHandler) generateCallbackToken( + serializedRef []byte, + requestID string, +) (string, error) { + token, err := h.callbackTokenGenerator.Tokenize(&tokenspb.NexusOperationCompletion{ + ComponentRef: serializedRef, + RequestId: requestID, + }) + if err != nil { + return "", fmt.Errorf("%w: %w", queueserrors.NewUnprocessableTaskError("failed to generate a callback token"), err) + } + return token, nil +} diff --git a/chasm/lib/nexusoperation/tasks_test_helpers_test.go b/chasm/lib/nexusoperation/tasks_test_helpers_test.go new file mode 100644 index 00000000000..9e551028b28 --- /dev/null +++ b/chasm/lib/nexusoperation/tasks_test_helpers_test.go @@ -0,0 +1,128 @@ +package nexusoperation + +import ( + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + failurepb "go.temporal.io/api/failure/v1" + "go.temporal.io/sdk/converter" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" +) + +var endpointEntry = &persistencespb.NexusEndpointEntry{ + Id: "endpoint-id", + Endpoint: &persistencespb.NexusEndpoint{ + Spec: &persistencespb.NexusEndpointSpec{ + Name: "endpoint", + Target: &persistencespb.NexusEndpointTarget{ + Variant: &persistencespb.NexusEndpointTarget_External_{ + External: &persistencespb.NexusEndpointTarget_External{ + Url: "http://" + uuid.NewString(), + }, + }, + }, + }, + }, +} + +func mustToPayload(t *testing.T, input any) *commonpb.Payload { + conv := converter.GetDefaultDataConverter() + payload, err := conv.ToPayload(input) + require.NoError(t, err) + return payload +} + +// mockStoreComponent is a mock parent component that implements OperationStore. +// It allows the Operation to load its start args and apply transitions. +// TODO(stephan): Remove this layer from tests once loading invocation data from the operation component is implemented. +type mockStoreComponent struct { + chasm.UnimplementedComponent + + // Data is required by CHASM for serialization - every component needs a proto.Message field. + Data *nexusoperationpb.OperationState + + invocationData InvocationData + Op chasm.Field[*Operation] + startLinks []*commonpb.Link + completionLinks []*commonpb.Link + startTime *time.Time +} + +func (m *mockStoreComponent) LifecycleState(_ chasm.Context) chasm.LifecycleState { + return chasm.LifecycleStateRunning +} + +func (m *mockStoreComponent) ContextMetadata(_ chasm.Context) map[string]string { + return nil +} + +func (m *mockStoreComponent) Terminate(_ chasm.MutableContext, _ chasm.TerminateComponentRequest) (chasm.TerminateComponentResponse, error) { + return chasm.TerminateComponentResponse{}, nil +} + +func (m *mockStoreComponent) NexusOperationInvocationData(_ chasm.Context, _ *Operation) (InvocationData, error) { + return m.invocationData, nil +} + +func (m *mockStoreComponent) OnNexusOperationStarted(ctx chasm.MutableContext, op *Operation, operationToken string, startTime *time.Time, links []*commonpb.Link) error { + m.startTime = startTime + m.startLinks = links + return TransitionStarted.Apply(op, ctx, EventStarted{ + OperationToken: operationToken, + StartTime: startTime, + }) +} + +func (m *mockStoreComponent) OnNexusOperationCompleted(ctx chasm.MutableContext, op *Operation, result *commonpb.Payload, links []*commonpb.Link) error { + m.completionLinks = links + return TransitionSucceeded.Apply(op, ctx, EventSucceeded{Result: result}) +} + +func (m *mockStoreComponent) OnNexusOperationFailed(ctx chasm.MutableContext, op *Operation, cause *failurepb.Failure) error { + return TransitionFailed.Apply(op, ctx, EventFailed{Failure: cause}) +} + +func (m *mockStoreComponent) OnNexusOperationCanceled(ctx chasm.MutableContext, op *Operation, cause *failurepb.Failure) error { + return TransitionCanceled.Apply(op, ctx, EventCanceled{Failure: cause}) +} + +func (m *mockStoreComponent) OnNexusOperationTimedOut(ctx chasm.MutableContext, op *Operation, cause *failurepb.Failure, fromAttempt bool) error { + return TransitionTimedOut.Apply(op, ctx, EventTimedOut{ + Failure: cause, + FromAttempt: fromAttempt, + }) +} + +func (m *mockStoreComponent) OnNexusOperationCancellationCompleted(ctx chasm.MutableContext, op *Operation) error { + cancellation, _ := op.Cancellation.TryGet(ctx) + return TransitionCancellationSucceeded.Apply(cancellation, ctx, EventCancellationSucceeded{}) +} + +func (m *mockStoreComponent) OnNexusOperationCancellationFailed(ctx chasm.MutableContext, op *Operation, cause *failurepb.Failure) error { + cancellation, _ := op.Cancellation.TryGet(ctx) + return TransitionCancellationFailed.Apply(cancellation, ctx, EventCancellationFailed{Failure: cause}) +} + +func (m *mockStoreComponent) WorkflowTypeName() string { + return "" +} + +// mockStoreLibrary registers the mockStoreComponent so the CHASM tree can work with it. +type mockStoreLibrary struct { + chasm.UnimplementedLibrary +} + +func (l *mockStoreLibrary) Name() string { + return "mockStore" +} + +func (l *mockStoreLibrary) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*mockStoreComponent]("mockStore"), + } +} diff --git a/chasm/lib/nexusoperation/validator.go b/chasm/lib/nexusoperation/validator.go new file mode 100644 index 00000000000..de9028f5a7b --- /dev/null +++ b/chasm/lib/nexusoperation/validator.go @@ -0,0 +1,340 @@ +package nexusoperation + +import ( + "errors" + "fmt" + "slices" + "strings" + + "github.com/google/uuid" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" + "google.golang.org/protobuf/types/known/durationpb" +) + +// ValidateServiceName checks that the service name does not exceed the configured limit. +func ValidateServiceName(service string, limit int) error { + if len(service) > limit { + return fmt.Errorf("service exceeds length limit. Length=%d Limit=%d", len(service), limit) + } + return nil +} + +// ValidateOperationName checks that the operation name does not exceed the configured limit. +func ValidateOperationName(operation string, limit int) error { + if len(operation) > limit { + return fmt.Errorf("operation exceeds length limit. Length=%d Limit=%d", len(operation), limit) + } + return nil +} + +// ValidateAndLowercaseNexusHeaders validates headers and returns a new map with lower-cased keys. +func ValidateAndLowercaseNexusHeaders(headers map[string]string, disallowed []string, sizeLimit int) (map[string]string, error) { + headerLength := 0 + lowered := make(map[string]string, len(headers)) + for k, v := range headers { + lowerK := strings.ToLower(k) + headerLength += len(lowerK) + len(v) + if slices.Contains(disallowed, lowerK) { + return nil, fmt.Errorf("nexus_header contains a disallowed key: %q", k) + } + lowered[lowerK] = v + } + if headerLength > sizeLimit { + return nil, errors.New("nexus_header exceeds size limit") + } + return lowered, nil +} + +// ValidatePayloadSize checks that the payload does not exceed the size limit. +func ValidatePayloadSize(input *commonpb.Payload, limit int) error { + if input.Size() > limit { + return errors.New("input exceeds size limit") + } + return nil +} + +//revive:disable-next-line:cognitive-complexity,cyclomatic +func validateAndNormalizeStartRequest( + req *workflowservice.StartNexusOperationExecutionRequest, + config *Config, + logger log.Logger, + saMapperProvider searchattribute.MapperProvider, + saValidator *searchattribute.Validator, +) error { + ns := req.GetNamespace() + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } else if len(req.GetRequestId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("request_id exceeds length limit. Length=%d Limit=%d", + len(req.GetRequestId()), config.MaxIDLengthLimit()) + } + if req.GetOperationId() == "" { + return serviceerror.NewInvalidArgument("operation_id is required") + } + if len(req.GetOperationId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("operation_id exceeds length limit. Length=%d Limit=%d", + len(req.GetOperationId()), config.MaxIDLengthLimit()) + } + if len(req.GetIdentity()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("identity exceeds length limit. Length=%d Limit=%d", + len(req.GetIdentity()), config.MaxIDLengthLimit()) + } + if req.GetEndpoint() == "" { + return serviceerror.NewInvalidArgument("endpoint is required") + } + if req.GetService() == "" { + return serviceerror.NewInvalidArgument("service is required") + } + if err := ValidateServiceName(req.GetService(), config.MaxServiceNameLength(ns)); err != nil { + return serviceerror.NewInvalidArgument(err.Error()) + } + if req.GetOperation() == "" { + return serviceerror.NewInvalidArgument("operation is required") + } + if err := ValidateOperationName(req.GetOperation(), config.MaxOperationNameLength(ns)); err != nil { + return serviceerror.NewInvalidArgument(err.Error()) + } + if err := timestamp.ValidateAndCapProtoDuration(req.GetScheduleToCloseTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("schedule_to_close_timeout is invalid: %v", err) + } + if err := timestamp.ValidateAndCapProtoDuration(req.GetScheduleToStartTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("schedule_to_start_timeout is invalid: %v", err) + } + if err := timestamp.ValidateAndCapProtoDuration(req.GetStartToCloseTimeout()); err != nil { + return serviceerror.NewInvalidArgumentf("start_to_close_timeout is invalid: %v", err) + } + + scheduleToCloseTimeout := req.GetScheduleToCloseTimeout().AsDuration() + maxTimeout := config.MaxOperationScheduleToCloseTimeout(ns) + if maxTimeout > 0 { + if scheduleToCloseTimeout == 0 || scheduleToCloseTimeout > maxTimeout { + // Apply the effective namespace limit to schedule_to_close_timeout before capping the other timeouts. + req.ScheduleToCloseTimeout = durationpb.New(maxTimeout) + scheduleToCloseTimeout = maxTimeout + } + } + + // Bound schedule_to_start_timeout and start_to_close_timeout to schedule_to_close_timeout. + if scheduleToCloseTimeout > 0 { + if req.GetScheduleToStartTimeout().AsDuration() > scheduleToCloseTimeout { + req.ScheduleToStartTimeout = req.GetScheduleToCloseTimeout() + } + if req.GetStartToCloseTimeout().AsDuration() > scheduleToCloseTimeout { + req.StartToCloseTimeout = req.GetScheduleToCloseTimeout() + } + } + + inputSize := req.GetInput().Size() + if inputSize > config.PayloadSizeLimitWarn(ns) { + logger.Warn("Nexus Start Operation input size exceeds the warning limit.", + tag.WorkflowNamespace(ns), + tag.OperationID(req.GetOperationId()), + tag.BlobSize(int64(inputSize)), + tag.BlobSizeViolationOperation("StartNexusOperationExecution")) + } + if inputSize > config.PayloadSizeLimit(ns) { + return serviceerror.NewInvalidArgumentf("input exceeds size limit. Length=%d Limit=%d", + inputSize, config.PayloadSizeLimit(ns)) + } + + if summary := req.GetUserMetadata().GetSummary(); summary != nil && summary.Size() > config.MaxUserMetadataSummarySize(ns) { + return serviceerror.NewInvalidArgumentf( + "user_metadata.summary exceeds size limit. Length=%d Limit=%d", + summary.Size(), + config.MaxUserMetadataSummarySize(ns), + ) + } + if details := req.GetUserMetadata().GetDetails(); details != nil && details.Size() > config.MaxUserMetadataDetailsSize(ns) { + return serviceerror.NewInvalidArgumentf( + "user_metadata.details exceeds size limit. Length=%d Limit=%d", + details.Size(), + config.MaxUserMetadataDetailsSize(ns), + ) + } + + loweredHeaders, err := ValidateAndLowercaseNexusHeaders(req.GetNexusHeader(), config.DisallowedOperationHeaders(), config.MaxOperationHeaderSize(ns)) + if err != nil { + return serviceerror.NewInvalidArgument(err.Error()) + } + req.NexusHeader = loweredHeaders + if err := validateAndNormalizeSearchAttributes(req, saMapperProvider, saValidator); err != nil { + // SA validator already returns properly typed gRPC status errors; no need to re-wrap. + return err + } + if req.GetIdReusePolicy() == enumspb.NEXUS_OPERATION_ID_REUSE_POLICY_UNSPECIFIED { + req.IdReusePolicy = enumspb.NEXUS_OPERATION_ID_REUSE_POLICY_ALLOW_DUPLICATE + } + if req.GetIdConflictPolicy() == enumspb.NEXUS_OPERATION_ID_CONFLICT_POLICY_UNSPECIFIED { + req.IdConflictPolicy = enumspb.NEXUS_OPERATION_ID_CONFLICT_POLICY_FAIL + } + return nil +} + +func validateAndNormalizeDeleteRequest(req *workflowservice.DeleteNexusOperationExecutionRequest, config *Config) error { + if req.GetOperationId() == "" { + return serviceerror.NewInvalidArgument("operation_id is required") + } + if len(req.GetOperationId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("operation_id exceeds length limit. Length=%d Limit=%d", + len(req.GetOperationId()), config.MaxIDLengthLimit()) + } + if req.GetRunId() != "" { + if err := uuid.Validate(req.GetRunId()); err != nil { + return serviceerror.NewInvalidArgument("invalid run id: must be a valid UUID") + } + } + return nil +} + +func validateAndNormalizeDescribeRequest( + req *workflowservice.DescribeNexusOperationExecutionRequest, + namespaceID string, + config *Config, +) error { + if req.GetOperationId() == "" { + return serviceerror.NewInvalidArgument("operation_id is required") + } + if len(req.GetOperationId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("operation_id exceeds length limit. Length=%d Limit=%d", + len(req.GetOperationId()), config.MaxIDLengthLimit()) + } + if len(req.GetLongPollToken()) > 0 && req.GetRunId() == "" { + return serviceerror.NewInvalidArgument("run_id is required when long_poll_token is provided") + } + if req.GetRunId() != "" { + if err := uuid.Validate(req.GetRunId()); err != nil { + return serviceerror.NewInvalidArgument("run_id is not a valid UUID") + } + } + if len(req.GetLongPollToken()) > 0 { + ref, err := chasm.DeserializeComponentRef(req.GetLongPollToken()) + if err != nil { + return serviceerror.NewInvalidArgument("invalid long poll token") + } + if ref.NamespaceID != namespaceID { + return serviceerror.NewInvalidArgument("long poll token does not match execution") + } + } + return nil +} + +func validateAndNormalizePollRequest(req *workflowservice.PollNexusOperationExecutionRequest, config *Config) error { + // Normalize wait stage: UNSPECIFIED defaults to CLOSED. + if req.GetWaitStage() == enumspb.NEXUS_OPERATION_WAIT_STAGE_UNSPECIFIED { + req.WaitStage = enumspb.NEXUS_OPERATION_WAIT_STAGE_CLOSED + } else { + switch req.GetWaitStage() { + case enumspb.NEXUS_OPERATION_WAIT_STAGE_STARTED, + enumspb.NEXUS_OPERATION_WAIT_STAGE_CLOSED: + default: + return serviceerror.NewInvalidArgumentf("unsupported wait_stage: %s", req.GetWaitStage()) + } + } + if req.GetOperationId() == "" { + return serviceerror.NewInvalidArgument("operation_id is required") + } + if len(req.GetOperationId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("operation_id exceeds length limit. Length=%d Limit=%d", + len(req.GetOperationId()), config.MaxIDLengthLimit()) + } + if runID := req.GetRunId(); runID != "" { + if err := uuid.Validate(runID); err != nil { + return serviceerror.NewInvalidArgument("run_id is not a valid UUID") + } + } + return nil +} + +func validateAndNormalizeCancelRequest(req *workflowservice.RequestCancelNexusOperationExecutionRequest, config *Config) error { + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } else if len(req.GetRequestId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("request_id exceeds length limit. Length=%d Limit=%d", + len(req.GetRequestId()), config.MaxIDLengthLimit()) + } + if req.GetOperationId() == "" { + return serviceerror.NewInvalidArgument("operation_id is required") + } + if len(req.GetOperationId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("operation_id exceeds length limit. Length=%d Limit=%d", + len(req.GetOperationId()), config.MaxIDLengthLimit()) + } + if runID := req.GetRunId(); runID != "" { + if err := uuid.Validate(runID); err != nil { + return serviceerror.NewInvalidArgument("run_id is not a valid UUID") + } + } + if len(req.GetIdentity()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("identity exceeds length limit. Length=%d Limit=%d", + len(req.GetIdentity()), config.MaxIDLengthLimit()) + } + if len(req.GetReason()) > config.MaxReasonLength(req.GetNamespace()) { + return serviceerror.NewInvalidArgumentf("reason exceeds length limit. Length=%d Limit=%d", + len(req.GetReason()), config.MaxReasonLength(req.GetNamespace())) + } + + return nil +} + +func validateAndNormalizeTerminateRequest(req *workflowservice.TerminateNexusOperationExecutionRequest, config *Config) error { + if req.GetRequestId() == "" { + req.RequestId = uuid.NewString() + } else if len(req.GetRequestId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("request_id exceeds length limit. Length=%d Limit=%d", + len(req.GetRequestId()), config.MaxIDLengthLimit()) + } + if req.GetOperationId() == "" { + return serviceerror.NewInvalidArgument("operation_id is required") + } + if len(req.GetOperationId()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("operation_id exceeds length limit. Length=%d Limit=%d", + len(req.GetOperationId()), config.MaxIDLengthLimit()) + } + if runID := req.GetRunId(); runID != "" { + if err := uuid.Validate(runID); err != nil { + return serviceerror.NewInvalidArgument("run_id is not a valid UUID") + } + } + if len(req.GetIdentity()) > config.MaxIDLengthLimit() { + return serviceerror.NewInvalidArgumentf("identity exceeds length limit. Length=%d Limit=%d", + len(req.GetIdentity()), config.MaxIDLengthLimit()) + } + if len(req.GetReason()) > config.MaxReasonLength(req.GetNamespace()) { + return serviceerror.NewInvalidArgumentf("reason exceeds length limit. Length=%d Limit=%d", + len(req.GetReason()), config.MaxReasonLength(req.GetNamespace())) + } + + return nil +} + +func validateAndNormalizeSearchAttributes( + req *workflowservice.StartNexusOperationExecutionRequest, + saMapperProvider searchattribute.MapperProvider, + saValidator *searchattribute.Validator, +) error { + namespaceName := req.GetNamespace() + + // Unalias search attributes for validation. + saToValidate := req.SearchAttributes + if saMapperProvider != nil && saToValidate != nil { + var err error + saToValidate, err = searchattribute.UnaliasFields(saMapperProvider, saToValidate, namespaceName) + if err != nil { + return err + } + } + + if err := saValidator.Validate(saToValidate, namespaceName); err != nil { + return err + } + + return saValidator.ValidateSize(saToValidate, namespaceName) +} diff --git a/chasm/lib/nexusoperation/validator_test.go b/chasm/lib/nexusoperation/validator_test.go new file mode 100644 index 00000000000..ebb9d7f1d96 --- /dev/null +++ b/chasm/lib/nexusoperation/validator_test.go @@ -0,0 +1,779 @@ +package nexusoperation + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + sdkpb "go.temporal.io/api/sdk/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/persistence/visibility/manager" + "go.temporal.io/server/common/searchattribute" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/durationpb" +) + +func TestValidateStartNexusOperationExecutionRequest(t *testing.T) { + ctrl := gomock.NewController(t) + mockVisibilityManager := manager.NewMockVisibilityManager(ctrl) + mockVisibilityManager.EXPECT().GetIndexName().Return("index-name").AnyTimes() + mockVisibilityManager.EXPECT().ValidateCustomSearchAttributes(gomock.Any()).Return(nil, nil).AnyTimes() + + saValidator := searchattribute.NewValidator( + searchattribute.NewTestEsProvider(), + searchattribute.NewTestMapperProvider(nil), + func(string) int { return 2 }, // max number of keys + func(string) int { return 20 }, // max size of value + func(string) int { return 100 }, // max total size + mockVisibilityManager, + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), + ) + + config := &Config{ + MaxIDLengthLimit: func() int { return 50 }, + MaxServiceNameLength: func(string) int { return 10 }, + MaxOperationNameLength: func(string) int { return 10 }, + PayloadSizeLimit: func(string) int { return 20 }, + PayloadSizeLimitWarn: func(string) int { return 10 }, + MaxUserMetadataSummarySize: func(string) int { return 10 }, + MaxUserMetadataDetailsSize: func(string) int { return 20 }, + MaxOperationHeaderSize: func(string) int { return 10 }, + DisallowedOperationHeaders: func() []string { return []string{"disallowed-header"} }, + MaxOperationScheduleToCloseTimeout: func(string) time.Duration { return time.Hour }, + } + + for _, tc := range []struct { + name string + mutate func(*workflowservice.StartNexusOperationExecutionRequest) + errMsg string + check func(*testing.T, *workflowservice.StartNexusOperationExecutionRequest) + }{ + { + name: "valid request", + }, + { + name: "operation_id - required", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.OperationId = "" + }, + errMsg: "operation_id is required", + }, + { + name: "operation_id - exceeds length limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.OperationId = strings.Repeat("x", 51) + }, + errMsg: "operation_id exceeds length limit", + }, + { + name: "request_id - defaults empty to UUID", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.RequestId = "" + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Len(t, r.RequestId, 36) // UUID length + }, + }, + { + name: "request_id - exceeds length limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.RequestId = strings.Repeat("x", 51) + }, + errMsg: "request_id exceeds length limit", + }, + { + name: "identity - exceeds length limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.Identity = strings.Repeat("x", 51) + }, + errMsg: "identity exceeds length limit", + }, + { + name: "endpoint - required", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { r.Endpoint = "" }, + errMsg: "endpoint is required", + }, + { + name: "service - required", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { r.Service = "" }, + errMsg: "service is required", + }, + { + name: "service - exceeds length limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.Service = "too-long-svc" + }, + errMsg: "service exceeds length limit", + }, + { + name: "operation - required", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { r.Operation = "" }, + errMsg: "operation is required", + }, + { + name: "operation - exceeds length limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.Operation = "too-long-op!" + }, + errMsg: "operation exceeds length limit", + }, + { + name: "schedule_to_close_timeout - invalid", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToCloseTimeout = &durationpb.Duration{Seconds: -1} + }, + errMsg: "schedule_to_close_timeout is invalid", + }, + { + name: "schedule_to_close_timeout - caps exceeding max", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToCloseTimeout = durationpb.New(2 * time.Hour) + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, time.Hour, r.ScheduleToCloseTimeout.AsDuration()) + }, + }, + { + name: "schedule_to_close_timeout - caps unset to max", + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, time.Hour, r.ScheduleToCloseTimeout.AsDuration()) + }, + }, + { + name: "schedule_to_close_timeout - preserves within max", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToCloseTimeout = durationpb.New(30 * time.Minute) + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, 30*time.Minute, r.ScheduleToCloseTimeout.AsDuration()) + }, + }, + { + name: "schedule_to_start_timeout - invalid", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToStartTimeout = &durationpb.Duration{Seconds: -1} + }, + errMsg: "schedule_to_start_timeout is invalid", + }, + { + name: "schedule_to_start_timeout - caps to defaulted schedule_to_close_timeout", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToStartTimeout = durationpb.New(2 * time.Hour) + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, time.Hour, r.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, time.Hour, r.ScheduleToStartTimeout.AsDuration()) + }, + }, + { + name: "schedule_to_start_timeout - caps to schedule_to_close_timeout", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToCloseTimeout = durationpb.New(30 * time.Minute) + r.ScheduleToStartTimeout = durationpb.New(time.Hour) + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, 30*time.Minute, r.ScheduleToStartTimeout.AsDuration()) + }, + }, + { + name: "schedule_to_start_timeout - preserves value within schedule_to_close_timeout", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToCloseTimeout = durationpb.New(30 * time.Minute) + r.ScheduleToStartTimeout = durationpb.New(20 * time.Minute) + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, 20*time.Minute, r.ScheduleToStartTimeout.AsDuration()) + }, + }, + { + name: "start_to_close_timeout - invalid", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.StartToCloseTimeout = &durationpb.Duration{Seconds: -1} + }, + errMsg: "start_to_close_timeout is invalid", + }, + { + name: "start_to_close_timeout - caps to defaulted schedule_to_close_timeout", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.StartToCloseTimeout = durationpb.New(2 * time.Hour) + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, time.Hour, r.ScheduleToCloseTimeout.AsDuration()) + require.Equal(t, time.Hour, r.StartToCloseTimeout.AsDuration()) + }, + }, + { + name: "start_to_close_timeout - caps to schedule_to_close_timeout", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToCloseTimeout = durationpb.New(30 * time.Minute) + r.StartToCloseTimeout = durationpb.New(time.Hour) + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, 30*time.Minute, r.StartToCloseTimeout.AsDuration()) + }, + }, + { + name: "start_to_close_timeout - preserves value within schedule_to_close_timeout", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.ScheduleToCloseTimeout = durationpb.New(30 * time.Minute) + r.StartToCloseTimeout = durationpb.New(10 * time.Minute) + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, 10*time.Minute, r.StartToCloseTimeout.AsDuration()) + }, + }, + { + name: "input - exceeds warning limit but within hard limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.Input = &commonpb.Payload{Data: []byte("exceed-warn-limit")} + }, + }, + { + name: "input - exceeds size limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.Input = &commonpb.Payload{Data: []byte("this-input-is-longer-than-twenty-characters")} + }, + errMsg: "input exceeds size limit", + }, + { + name: "user_metadata.summary - exceeds size limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.UserMetadata = &sdkpb.UserMetadata{ + Summary: &commonpb.Payload{Data: []byte("too-long-summary")}, + } + }, + errMsg: "user_metadata.summary exceeds size limit", + }, + { + name: "user_metadata.details - exceeds size limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.UserMetadata = &sdkpb.UserMetadata{ + Details: &commonpb.Payload{Data: []byte("this-details-payload-is-too-long")}, + } + }, + errMsg: "user_metadata.details exceeds size limit", + }, + { + name: "nexus_header - disallowed key", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.NexusHeader = map[string]string{"Disallowed-Header": "value"} + }, + errMsg: "nexus_header contains a disallowed key", + }, + { + name: "nexus_header - exceeds size limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.NexusHeader = map[string]string{"key": "too-long-val"} + }, + errMsg: "nexus_header exceeds size limit", + }, + { + name: "id_policies - defaults unspecified", + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, enumspb.NEXUS_OPERATION_ID_REUSE_POLICY_ALLOW_DUPLICATE, r.IdReusePolicy) + require.Equal(t, enumspb.NEXUS_OPERATION_ID_CONFLICT_POLICY_FAIL, r.IdConflictPolicy) + }, + }, + { + name: "id_policies - preserves explicit values", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.IdReusePolicy = enumspb.NEXUS_OPERATION_ID_REUSE_POLICY_REJECT_DUPLICATE + r.IdConflictPolicy = enumspb.NEXUS_OPERATION_ID_CONFLICT_POLICY_USE_EXISTING + }, + check: func(t *testing.T, r *workflowservice.StartNexusOperationExecutionRequest) { + require.Equal(t, enumspb.NEXUS_OPERATION_ID_REUSE_POLICY_REJECT_DUPLICATE, r.IdReusePolicy) + require.Equal(t, enumspb.NEXUS_OPERATION_ID_CONFLICT_POLICY_USE_EXISTING, r.IdConflictPolicy) + }, + }, + { + name: "search_attributes - too many keys", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.SearchAttributes = &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + "CustomKeywordField": payload.EncodeString("v1"), + "CustomTextField": payload.EncodeString("v2"), + "CustomIntField": payload.EncodeString("3"), + }, + } + }, + errMsg: "number of search attributes", + }, + { + name: "search_attributes - value exceeds size limit", + mutate: func(r *workflowservice.StartNexusOperationExecutionRequest) { + r.SearchAttributes = &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + "CustomKeywordField": payload.EncodeString(strings.Repeat("x", 100)), + }, + } + }, + errMsg: "exceeds size limit", + }, + } { + t.Run(tc.name, func(t *testing.T) { + req := &workflowservice.StartNexusOperationExecutionRequest{ + Namespace: "default", + OperationId: "op-id", + RequestId: "request-id", + Endpoint: "endpoint", + Service: "service", + Operation: "operation", + SearchAttributes: &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + "CustomKeywordField": payload.EncodeString("val"), + }, + }, + } + if tc.mutate != nil { + tc.mutate(req) + } + err := validateAndNormalizeStartRequest(req, config, log.NewNoopLogger(), nil, saValidator) + if tc.errMsg != "" { + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), tc.errMsg) + } else { + require.NoError(t, err) + } + if tc.check != nil { + tc.check(t, req) + } + }) + } +} + +func TestValidateDescribeNexusOperationExecutionRequest(t *testing.T) { + config := &Config{ + MaxIDLengthLimit: func() int { return 20 }, + } + + validRunID := "11111111-2222-3333-4444-555555555555" + validToken, err := (&persistencespb.ChasmComponentRef{ + NamespaceId: "test-namespace-id", + BusinessId: "operation-id", + RunId: validRunID, + }).Marshal() + require.NoError(t, err) + + wrongNamespaceToken, err := (&persistencespb.ChasmComponentRef{ + NamespaceId: "other-namespace-id", + BusinessId: "operation-id", + RunId: validRunID, + }).Marshal() + require.NoError(t, err) + + for _, tc := range []struct { + name string + mutate func(*workflowservice.DescribeNexusOperationExecutionRequest) + errMsg string + }{ + { + name: "valid request", + }, + { + name: "operation_id - required", + mutate: func(r *workflowservice.DescribeNexusOperationExecutionRequest) { + r.OperationId = "" + }, + errMsg: "operation_id is required", + }, + { + name: "operation_id - exceeds length limit", + mutate: func(r *workflowservice.DescribeNexusOperationExecutionRequest) { + r.OperationId = "this-operation-id-is-too-long" + }, + errMsg: "operation_id exceeds length limit", + }, + { + name: "run_id - not a valid UUID", + mutate: func(r *workflowservice.DescribeNexusOperationExecutionRequest) { + r.RunId = "not-a-uuid" + }, + errMsg: "run_id is not a valid UUID", + }, + { + name: "long_poll_token - requires run_id", + mutate: func(r *workflowservice.DescribeNexusOperationExecutionRequest) { + r.LongPollToken = validToken + }, + errMsg: "run_id is required when long_poll_token is provided", + }, + { + name: "long_poll_token - rejects malformed token", + mutate: func(r *workflowservice.DescribeNexusOperationExecutionRequest) { + r.RunId = validRunID + r.LongPollToken = []byte("not-a-token") + }, + errMsg: "invalid long poll token", + }, + { + name: "long_poll_token - rejects wrong namespace", + mutate: func(r *workflowservice.DescribeNexusOperationExecutionRequest) { + r.RunId = validRunID + r.LongPollToken = wrongNamespaceToken + }, + errMsg: "long poll token does not match execution", + }, + } { + t.Run(tc.name, func(t *testing.T) { + validReq := &workflowservice.DescribeNexusOperationExecutionRequest{ + Namespace: "default", + OperationId: "operation-id", + } + if tc.mutate != nil { + tc.mutate(validReq) + } + err := validateAndNormalizeDescribeRequest(validReq, "test-namespace-id", config) + if tc.errMsg != "" { + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), tc.errMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidateRequestCancelNexusOperationExecutionRequest(t *testing.T) { + config := &Config{ + MaxIDLengthLimit: func() int { return 20 }, + MaxReasonLength: func(string) int { return 20 }, + } + + for _, tc := range []struct { + name string + mutate func(*workflowservice.RequestCancelNexusOperationExecutionRequest) + errMsg string + check func(*testing.T, *workflowservice.RequestCancelNexusOperationExecutionRequest) + }{ + { + name: "valid request", + }, + { + name: "request_id - defaults empty to UUID", + mutate: func(r *workflowservice.RequestCancelNexusOperationExecutionRequest) { + r.RequestId = "" + }, + check: func(t *testing.T, r *workflowservice.RequestCancelNexusOperationExecutionRequest) { + require.Len(t, r.RequestId, 36) + }, + }, + { + name: "operation_id - required", + mutate: func(r *workflowservice.RequestCancelNexusOperationExecutionRequest) { + r.OperationId = "" + }, + errMsg: "operation_id is required", + }, + { + name: "operation_id - exceeds length limit", + mutate: func(r *workflowservice.RequestCancelNexusOperationExecutionRequest) { + r.OperationId = "this-operation-id-is-too-long" + }, + errMsg: "operation_id exceeds length limit", + }, + { + name: "request_id - exceeds length limit", + mutate: func(r *workflowservice.RequestCancelNexusOperationExecutionRequest) { + r.RequestId = "this-request-id-is-too-long" + }, + errMsg: "request_id exceeds length limit", + }, + { + name: "run_id - not a valid UUID", + mutate: func(r *workflowservice.RequestCancelNexusOperationExecutionRequest) { + r.RunId = "not-a-uuid" + }, + errMsg: "run_id is not a valid UUID", + }, + { + name: "identity - exceeds length limit", + mutate: func(r *workflowservice.RequestCancelNexusOperationExecutionRequest) { + r.Identity = "this-identity-is-too-long!!" + }, + errMsg: "identity exceeds length limit", + }, + { + name: "reason - exceeds length limit", + mutate: func(r *workflowservice.RequestCancelNexusOperationExecutionRequest) { + r.Reason = "this-reason-is-longer-than-twenty-characters" + }, + errMsg: "reason exceeds length limit", + }, + } { + t.Run(tc.name, func(t *testing.T) { + validReq := &workflowservice.RequestCancelNexusOperationExecutionRequest{ + Namespace: "default", + OperationId: "operation-id", + } + if tc.mutate != nil { + tc.mutate(validReq) + } + err := validateAndNormalizeCancelRequest(validReq, config) + if tc.errMsg != "" { + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), tc.errMsg) + } else { + require.NoError(t, err) + } + if tc.check != nil { + tc.check(t, validReq) + } + }) + } +} + +func TestValidateDeleteNexusOperationExecutionRequest(t *testing.T) { + config := &Config{ + MaxIDLengthLimit: func() int { return 20 }, + } + + for _, tc := range []struct { + name string + mutate func(*workflowservice.DeleteNexusOperationExecutionRequest) + errMsg string + }{ + { + name: "valid request", + }, + { + name: "valid request - with run_id", + mutate: func(r *workflowservice.DeleteNexusOperationExecutionRequest) { + r.RunId = "550e8400-e29b-41d4-a716-446655440000" + }, + }, + { + name: "operation_id - required", + mutate: func(r *workflowservice.DeleteNexusOperationExecutionRequest) { + r.OperationId = "" + }, + errMsg: "operation_id is required", + }, + { + name: "operation_id - exceeds length limit", + mutate: func(r *workflowservice.DeleteNexusOperationExecutionRequest) { + r.OperationId = "this-operation-id-is-too-long" + }, + errMsg: "operation_id exceeds length limit", + }, + { + name: "run_id - invalid UUID", + mutate: func(r *workflowservice.DeleteNexusOperationExecutionRequest) { + r.RunId = "not-a-valid-uuid" + }, + errMsg: "invalid run id: must be a valid UUID", + }, + } { + t.Run(tc.name, func(t *testing.T) { + validReq := &workflowservice.DeleteNexusOperationExecutionRequest{ + Namespace: "default", + OperationId: "operation-id", + } + if tc.mutate != nil { + tc.mutate(validReq) + } + err := validateAndNormalizeDeleteRequest(validReq, config) + if tc.errMsg != "" { + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), tc.errMsg) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestValidateTerminateNexusOperationExecutionRequest(t *testing.T) { + config := &Config{ + MaxIDLengthLimit: func() int { return 20 }, + MaxReasonLength: func(string) int { return 20 }, + } + + for _, tc := range []struct { + name string + mutate func(*workflowservice.TerminateNexusOperationExecutionRequest) + errMsg string + check func(*testing.T, *workflowservice.TerminateNexusOperationExecutionRequest) + }{ + { + name: "valid request", + }, + { + name: "request_id - defaults empty to UUID", + mutate: func(r *workflowservice.TerminateNexusOperationExecutionRequest) { + r.RequestId = "" + }, + check: func(t *testing.T, r *workflowservice.TerminateNexusOperationExecutionRequest) { + require.Len(t, r.RequestId, 36) + }, + }, + { + name: "operation_id - required", + mutate: func(r *workflowservice.TerminateNexusOperationExecutionRequest) { + r.OperationId = "" + }, + errMsg: "operation_id is required", + }, + { + name: "operation_id - exceeds length limit", + mutate: func(r *workflowservice.TerminateNexusOperationExecutionRequest) { + r.OperationId = "this-operation-id-is-too-long" + }, + errMsg: "operation_id exceeds length limit", + }, + { + name: "request_id - exceeds length limit", + mutate: func(r *workflowservice.TerminateNexusOperationExecutionRequest) { + r.RequestId = "this-request-id-is-too-long" + }, + errMsg: "request_id exceeds length limit", + }, + { + name: "run_id - not a valid UUID", + mutate: func(r *workflowservice.TerminateNexusOperationExecutionRequest) { + r.RunId = "not-a-uuid" + }, + errMsg: "run_id is not a valid UUID", + }, + { + name: "identity - exceeds length limit", + mutate: func(r *workflowservice.TerminateNexusOperationExecutionRequest) { + r.Identity = "this-identity-is-too-long!!" + }, + errMsg: "identity exceeds length limit", + }, + { + name: "reason - exceeds length limit", + mutate: func(r *workflowservice.TerminateNexusOperationExecutionRequest) { + r.Reason = "this-reason-is-longer-than-twenty-characters" + }, + errMsg: "reason exceeds length limit", + }, + } { + t.Run(tc.name, func(t *testing.T) { + validReq := &workflowservice.TerminateNexusOperationExecutionRequest{ + Namespace: "default", + OperationId: "operation-id", + } + if tc.mutate != nil { + tc.mutate(validReq) + } + err := validateAndNormalizeTerminateRequest(validReq, config) + if tc.errMsg != "" { + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), tc.errMsg) + } else { + require.NoError(t, err) + } + if tc.check != nil { + tc.check(t, validReq) + } + }) + } +} + +func TestValidatePollNexusOperationExecutionRequest(t *testing.T) { + config := &Config{ + MaxIDLengthLimit: func() int { return 20 }, + } + + for _, tc := range []struct { + name string + mutate func(*workflowservice.PollNexusOperationExecutionRequest) + errMsg string + check func(*testing.T, *workflowservice.PollNexusOperationExecutionRequest) + }{ + { + name: "valid request", + }, + { + name: "operation_id - required", + mutate: func(r *workflowservice.PollNexusOperationExecutionRequest) { + r.OperationId = "" + }, + errMsg: "operation_id is required", + }, + { + name: "operation_id - exceeds length limit", + mutate: func(r *workflowservice.PollNexusOperationExecutionRequest) { + r.OperationId = "this-operation-id-is-too-long" + }, + errMsg: "operation_id exceeds length limit", + }, + { + name: "run_id - not a valid UUID", + mutate: func(r *workflowservice.PollNexusOperationExecutionRequest) { + r.RunId = "not-a-uuid" + }, + errMsg: "run_id is not a valid UUID", + }, + { + name: "wait_stage - normalizes UNSPECIFIED to CLOSED", + mutate: func(r *workflowservice.PollNexusOperationExecutionRequest) { + r.WaitStage = enumspb.NEXUS_OPERATION_WAIT_STAGE_UNSPECIFIED + }, + check: func(t *testing.T, r *workflowservice.PollNexusOperationExecutionRequest) { + require.Equal(t, enumspb.NEXUS_OPERATION_WAIT_STAGE_CLOSED, r.WaitStage) + }, + }, + { + name: "wait_stage - preserves STARTED", + mutate: func(r *workflowservice.PollNexusOperationExecutionRequest) { + r.WaitStage = enumspb.NEXUS_OPERATION_WAIT_STAGE_STARTED + }, + check: func(t *testing.T, r *workflowservice.PollNexusOperationExecutionRequest) { + require.Equal(t, enumspb.NEXUS_OPERATION_WAIT_STAGE_STARTED, r.WaitStage) + }, + }, + { + name: "wait_stage - preserves CLOSED", + mutate: func(r *workflowservice.PollNexusOperationExecutionRequest) { + r.WaitStage = enumspb.NEXUS_OPERATION_WAIT_STAGE_CLOSED + }, + check: func(t *testing.T, r *workflowservice.PollNexusOperationExecutionRequest) { + require.Equal(t, enumspb.NEXUS_OPERATION_WAIT_STAGE_CLOSED, r.WaitStage) + }, + }, + { + name: "wait_stage - rejects unsupported value", + mutate: func(r *workflowservice.PollNexusOperationExecutionRequest) { + r.WaitStage = enumspb.NexusOperationWaitStage(99) + }, + errMsg: "unsupported wait_stage", + }, + } { + t.Run(tc.name, func(t *testing.T) { + validReq := &workflowservice.PollNexusOperationExecutionRequest{ + Namespace: "default", + OperationId: "operation-id", + } + if tc.mutate != nil { + tc.mutate(validReq) + } + err := validateAndNormalizePollRequest(validReq, config) + if tc.errMsg != "" { + var invalidArgErr *serviceerror.InvalidArgument + require.ErrorAs(t, err, &invalidArgErr) + require.Contains(t, err.Error(), tc.errMsg) + } else { + require.NoError(t, err) + } + if tc.check != nil { + tc.check(t, validReq) + } + }) + } +} diff --git a/chasm/lib/scheduler/backfiller.go b/chasm/lib/scheduler/backfiller.go new file mode 100644 index 00000000000..2b8a28a7a21 --- /dev/null +++ b/chasm/lib/scheduler/backfiller.go @@ -0,0 +1,86 @@ +package scheduler + +import ( + "time" + + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + schedulescommon "go.temporal.io/server/common/schedules" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// The Backfiller component is responsible for buffering manually +// requested actions. Each backfill request has its own Backfiller node. +type Backfiller struct { + chasm.UnimplementedComponent + + *schedulerpb.BackfillerState + + Scheduler chasm.ParentPtr[*Scheduler] +} + +type BackfillRequestType int + +const ( + RequestTypeTrigger BackfillRequestType = iota + RequestTypeBackfill +) + +// addBackfiller returns an initialized backfiller, adding it to the scheduler's +// Backfillers. +func addBackfiller( + ctx chasm.MutableContext, + scheduler *Scheduler, +) *Backfiller { + id := schedulescommon.GenerateBackfillerID() + backfiller := newBackfillerWithState(ctx, &schedulerpb.BackfillerState{ + BackfillId: id, + LastProcessedTime: timestamppb.New(ctx.Now(scheduler)), + }) + + if scheduler.Backfillers == nil { + scheduler.Backfillers = make(chasm.Map[string, *Backfiller]) + } + scheduler.Backfillers[id] = chasm.NewComponentField(ctx, backfiller) + + return backfiller +} + +func newBackfillerWithState(ctx chasm.MutableContext, state *schedulerpb.BackfillerState) *Backfiller { + backfiller := &Backfiller{ + BackfillerState: state, + } + backfiller.scheduleTask(ctx, chasm.TaskScheduledTimeImmediate) + return backfiller +} + +// scheduleTask schedules a BackfillerTask at the given time. +func (b *Backfiller) scheduleTask(ctx chasm.MutableContext, scheduledTime time.Time) { + ctx.AddTask(b, chasm.TaskAttributes{ + ScheduledTime: scheduledTime, + }, &schedulerpb.BackfillerTask{}) +} + +func (b *Backfiller) LifecycleState(ctx chasm.Context) chasm.LifecycleState { + return chasm.LifecycleStateRunning +} + +func (b *Backfiller) RequestType() BackfillRequestType { + if b.GetTriggerRequest() != nil { + return RequestTypeTrigger + } + + return RequestTypeBackfill +} + +type backfillProgressResult struct { + // BufferedStarts that should be enqueued to the Invoker. + BufferedStarts []*schedulespb.BufferedStart + + // High water mark for when state was last updated. + LastProcessedTime time.Time + + // When true, the backfill has completed and the node can be deleted. + Complete bool +} diff --git a/chasm/lib/scheduler/backfiller_tasks.go b/chasm/lib/scheduler/backfiller_tasks.go new file mode 100644 index 00000000000..fa5380b1fa3 --- /dev/null +++ b/chasm/lib/scheduler/backfiller_tasks.go @@ -0,0 +1,241 @@ +package scheduler + +import ( + "fmt" + "time" + + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + schedulescommon "go.temporal.io/server/common/schedules" + queueerrors "go.temporal.io/server/service/history/queues/errors" + "go.uber.org/fx" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type ( + BackfillerTaskHandlerOptions struct { + fx.In + + Config *Config + MetricsHandler metrics.Handler + BaseLogger log.Logger + SpecProcessor SpecProcessor + } + + BackfillerTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config + metricsHandler metrics.Handler + baseLogger log.Logger + specProcessor SpecProcessor + } +) + +func NewBackfillerTaskHandler(opts BackfillerTaskHandlerOptions) *BackfillerTaskHandler { + return &BackfillerTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + baseLogger: opts.BaseLogger, + specProcessor: opts.SpecProcessor, + } +} + +func (b *BackfillerTaskHandler) Validate( + ctx chasm.Context, + backfiller *Backfiller, + attrs chasm.TaskAttributes, + _ *schedulerpb.BackfillerTask, +) (bool, error) { + return validateTaskHighWaterMark( + backfiller.GetLastProcessedTime(), + attrs.ScheduledTime, + ) +} + +func (b *BackfillerTaskHandler) Execute( + ctx chasm.MutableContext, + backfiller *Backfiller, + _ chasm.TaskAttributes, + _ *schedulerpb.BackfillerTask, +) error { + defer func() { backfiller.Attempt++ }() + + scheduler := backfiller.Scheduler.Get(ctx) + logger := newTaggedLogger(b.baseLogger, scheduler) + + invoker := scheduler.Invoker.Get(ctx) + + // If the buffer is already full, don't move the watermark at all, just back off + // and retry. + tweakables := b.config.Tweakables(scheduler.Namespace) + limit, err := b.allowedBufferedStarts(ctx, scheduler, invoker, tweakables) + if err != nil { + return err + } + if limit <= 0 { + // Buffer is full, back off and retry later. Unlike the generator, the + // backfiller doesn't drop actions - it will retry after backoff. + logger.Debug("Buffer full, backing off backfill", + tag.String("backfill-id", backfiller.GetBackfillId())) + b.rescheduleBackfill(ctx, backfiller) + return nil + } + + // Process backfills, returning BufferedStarts. + var result backfillProgressResult + switch backfiller.RequestType() { + case RequestTypeBackfill: + result, err = b.processBackfill(ctx, scheduler, backfiller, limit) + case RequestTypeTrigger: + result, err = b.processTrigger(ctx, scheduler, backfiller) + default: + return queueerrors.NewUnprocessableTaskError(fmt.Sprintf("unknown backfill type: %v", backfiller.RequestType())) + } + if err != nil { + return queueerrors.NewUnprocessableTaskError(fmt.Sprintf("failed to process backfill: %s", err.Error())) + } + + // Enqueue new BufferedStarts on the Invoker, if we have any. + if len(result.BufferedStarts) > 0 { + invoker.EnqueueBufferedStarts(ctx, result.BufferedStarts) + } + + // If we're complete, we can delete this Backfiller component and return without + // any more tasks. + if result.Complete { + logger.Debug("backfill complete, deleting Backfiller", + tag.String("backfill-id", backfiller.GetBackfillId())) + delete(scheduler.Backfillers, backfiller.GetBackfillId()) + return nil + } + + // Otherwise, update watermark and reschedule. + backfiller.LastProcessedTime = timestamppb.New(result.LastProcessedTime) + b.rescheduleBackfill(ctx, backfiller) + + return nil +} + +func (b *BackfillerTaskHandler) rescheduleBackfill(ctx chasm.MutableContext, backfiller *Backfiller) { + backoffTime := ctx.Now(backfiller).Add(b.backoffDelay(backfiller)) + backfiller.scheduleTask(ctx, backoffTime) +} + +// processBackfill processes a Backfiller's BackfillRequest. +func (b *BackfillerTaskHandler) processBackfill( + _ chasm.MutableContext, + scheduler *Scheduler, + backfiller *Backfiller, + limit int, +) (result backfillProgressResult, err error) { + request := backfiller.GetBackfillRequest() + + // Restore high watermark if we've already started processing the backfill. + var startTime time.Time + lastProcessed := backfiller.GetLastProcessedTime() + if backfiller.GetAttempt() > 0 { + startTime = lastProcessed.AsTime() + } else { + // On the first attempt, the start time is set slightly behind in order to make + // the backfill start time inclusive. + startTime = request.GetStartTime().AsTime().Add(-1 * time.Millisecond) + } + endTime := request.GetEndTime().AsTime() + specResult, err := b.specProcessor.ProcessTimeRange( + scheduler, + startTime, + endTime, + request.GetOverlapPolicy(), + scheduler.WorkflowID(), + backfiller.GetBackfillId(), + true, + &limit, + ) + if err != nil { + return + } + + next := specResult.NextWakeupTime + if next.IsZero() || next.After(endTime) { + result.Complete = true + } else { + // More to backfill, indicating the buffer is full. Set the high watermark, and + // apply a backoff time before attempting to continue filling. + result.LastProcessedTime = specResult.LastActionTime + } + result.BufferedStarts = specResult.BufferedStarts + + return +} + +// backoffDelay returns the amount of delay that should be added when retrying. +func (b *BackfillerTaskHandler) backoffDelay(backfiller *Backfiller) time.Duration { + // Increment GetAttempt here early, to avoid needing to increment + // backfiller.Attempt wherever backoffDelay's result is needed. + return b.config.RetryPolicy().ComputeNextDelay(0, int(backfiller.GetAttempt()+1), nil) +} + +// processTrigger processes a Backfiller's TriggerImmediatelyRequest. +func (b *BackfillerTaskHandler) processTrigger( + _ chasm.MutableContext, + scheduler *Scheduler, + backfiller *Backfiller, +) (result backfillProgressResult, err error) { + request := backfiller.GetTriggerRequest() + overlapPolicy := scheduler.resolveOverlapPolicy(request.GetOverlapPolicy()) + + // Add a single manual start and mark the Backfiller as complete. For batch + // backfill requests, a deterministic start time is trivial as they follow the + // schedule. For immediate trigger requests, the `LastProcessedTime` (set to + // "now" when the Backfiller is spawned to handle a request) is used for start + // time determinism. + nowpb := backfiller.GetLastProcessedTime() + now := nowpb.AsTime() + requestID := generateRequestID(scheduler, backfiller.GetBackfillId(), now, now) + workflowID := schedulescommon.GenerateWorkflowID(scheduler.WorkflowID(), now) + result.BufferedStarts = []*schedulespb.BufferedStart{ + { + NominalTime: nowpb, + ActualTime: nowpb, + DesiredTime: nowpb, + OverlapPolicy: overlapPolicy, + Manual: true, + RequestId: requestID, + WorkflowId: workflowID, + }, + } + result.Complete = true + + return +} + +// allowedBufferedStarts returns the number of BufferedStarts that the Backfiller should +// buffer, taking into account buffer limits and concurrent backfills. +func (b *BackfillerTaskHandler) allowedBufferedStarts( + ctx chasm.Context, + scheduler *Scheduler, + invoker *Invoker, + tweakables Tweakables, +) (int, error) { + // Count the number of Backfillers active. + backfillerCount := 0 + for _, field := range scheduler.Backfillers { + b := field.Get(ctx) + + // Don't count trigger-immediately requests, as they only fire a single start. + if b.RequestType() == RequestTypeBackfill { + backfillerCount++ + } + } + + // Prevents a division by 0. + backfillerCount = max(1, backfillerCount) + + // Give half the available buffer to backfillers, distributed evenly, minus + // Generator reserve space. + return max(0, ((tweakables.MaxBufferSize/2)/backfillerCount)-len(invoker.GetBufferedStarts())-tweakables.GeneratorBufferReserveSize), nil +} diff --git a/chasm/lib/scheduler/backfiller_tasks_test.go b/chasm/lib/scheduler/backfiller_tasks_test.go new file mode 100644 index 00000000000..10963a6dc50 --- /dev/null +++ b/chasm/lib/scheduler/backfiller_tasks_test.go @@ -0,0 +1,266 @@ +package scheduler_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + enumspb "go.temporal.io/api/enums/v1" + schedulepb "go.temporal.io/api/schedule/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/metrics" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type backfillTestCase struct { + InitialTriggerRequest *schedulepb.TriggerImmediatelyRequest + InitialBackfillRequest *schedulepb.BackfillRequest + ExpectedBufferedStarts int + ExpectedComplete bool // asserts the Backfiller is deleted + ExpectedLastProcessedTime time.Time + ExpectedAttempt int + + ValidateInvoker func(t *testing.T, invoker *scheduler.Invoker) + ValidateBackfiller func(t *testing.T, backfiller *scheduler.Backfiller) +} + +func runBackfillTestCase(t *testing.T, env *testEnv, c *backfillTestCase) { + ctx := env.MutableContext() + schedComponent, err := env.Node.Component(ctx, chasm.ComponentRef{}) + require.NoError(t, err) + sched := schedComponent.(*scheduler.Scheduler) + invoker := sched.Invoker.Get(ctx) + + // Exactly one type of request can be set per Backfiller. + require.False(t, c.InitialBackfillRequest != nil && c.InitialTriggerRequest != nil) + require.False(t, c.InitialBackfillRequest == nil && c.InitialTriggerRequest == nil) + + // Spawn backfiller. + var backfiller *scheduler.Backfiller + if c.InitialTriggerRequest != nil { + backfiller = sched.NewImmediateBackfiller(ctx, c.InitialTriggerRequest) + } else { + backfiller = sched.NewRangeBackfiller(ctx, c.InitialBackfillRequest) + } + + // Either type of request will spawn a Backfiller and schedule an immediate pure task. + // The immediate task executes automatically during CloseTransaction(). + require.NoError(t, env.CloseTransaction()) + + // Validate completion or partial progress. + if c.ExpectedComplete { + // Backfiller should no longer be present in the backfiller map. + _, ok := sched.Backfillers[backfiller.BackfillId].TryGet(ctx) + require.False(t, ok) + } else { + // TODO - check that a pure task to continue driving backfill exists here. Because + // a pure task in the tree already has the physically-created status, closing the + // transaction won't call our backend mock for AddTasks twice. Fix this when CHASM + // offers unit testing hooks for task generation. + + require.Equal(t, int64(c.ExpectedAttempt), backfiller.GetAttempt()) + require.Equal(t, c.ExpectedLastProcessedTime.UTC(), backfiller.GetLastProcessedTime().AsTime()) + } + + // Validate BufferedStarts. More detailed validation must be done in the callbacks. + require.Len(t, invoker.GetBufferedStarts(), c.ExpectedBufferedStarts) + + // Validate RequestId -> WorkflowId mapping. + for _, start := range invoker.GetBufferedStarts() { + require.Equal(t, start.WorkflowId, invoker.RunningWorkflowID(start.RequestId)) + } + + // Callbacks. + if c.ValidateInvoker != nil { + c.ValidateInvoker(t, invoker) + } + if c.ValidateBackfiller != nil { + c.ValidateBackfiller(t, backfiller) + } +} + +// An immediately-triggered run should result in the machine being deleted after +// completion. +func TestBackfillTask_TriggerImmediate(t *testing.T) { + env := newTestEnv(t) + request := &schedulepb.TriggerImmediatelyRequest{ + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + } + runBackfillTestCase(t, env, &backfillTestCase{ + InitialTriggerRequest: request, + ExpectedBufferedStarts: 1, + ExpectedComplete: true, + ValidateInvoker: func(t *testing.T, invoker *scheduler.Invoker) { + start := invoker.GetBufferedStarts()[0] + require.Equal(t, request.OverlapPolicy, start.OverlapPolicy) + require.True(t, start.Manual) + }, + }) +} + +// An immediately-triggered run will back off and retry if the buffer is full. +func TestBackfillTask_TriggerImmediateFullBuffer(t *testing.T) { + env := newTestEnv(t) + + // Backfillers get half of the max buffer size, so fill (half the buffer - + // expected starts). + ctx := env.MutableContext() + invoker := env.Scheduler.Invoker.Get(ctx) + for range scheduler.DefaultTweakables.MaxBufferSize { + invoker.BufferedStarts = append(invoker.BufferedStarts, &schedulespb.BufferedStart{}) + } + + now := env.TimeSource.Now() + runBackfillTestCase(t, env, &backfillTestCase{ + InitialTriggerRequest: &schedulepb.TriggerImmediatelyRequest{}, + ExpectedBufferedStarts: 1000, + ExpectedComplete: false, + ExpectedLastProcessedTime: now, + ExpectedAttempt: 1, + }) +} + +// A backfill request completes entirely should result in the machine being +// deleted after completion. +func TestBackfillTask_CompleteFill(t *testing.T) { + env := newTestEnv(t) + startTime := env.TimeSource.Now() + endTime := startTime.Add(5 * defaultInterval) + request := &schedulepb.BackfillRequest{ + StartTime: timestamppb.New(startTime), + EndTime: timestamppb.New(endTime), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + } + runBackfillTestCase(t, env, &backfillTestCase{ + InitialBackfillRequest: request, + ExpectedBufferedStarts: 5, + ExpectedComplete: true, + ValidateInvoker: func(t *testing.T, invoker *scheduler.Invoker) { + for _, start := range invoker.GetBufferedStarts() { + require.Equal(t, request.OverlapPolicy, start.OverlapPolicy) + startAt := start.GetActualTime().AsTime() + require.True(t, startAt.After(startTime)) + require.True(t, startAt.Before(endTime)) + require.True(t, start.Manual) + } + }, + }) +} + +// Backfill start and end times are inclusive, so a backfill scheduled for an +// instant that exactly matches a time in the calendar spec's sequence should result +// in a start. +func TestBackfillTask_InclusiveStartEnd(t *testing.T) { + env := newTestEnv(t) + + // Set an identical start and end time, landing on the calendar spec's interval. + backfillTime := env.TimeSource.Now().Truncate(defaultInterval) + request := &schedulepb.BackfillRequest{ + StartTime: timestamppb.New(backfillTime), + EndTime: timestamppb.New(backfillTime), + } + runBackfillTestCase(t, env, &backfillTestCase{ + InitialBackfillRequest: request, + ExpectedBufferedStarts: 1, + ExpectedComplete: true, + }) + + // Clear the Invoker's buffered starts. + ctx := env.MutableContext() + invoker := env.Scheduler.Invoker.Get(ctx) + invoker.BufferedStarts = nil + + // A hair off and the action won't fire. + backfillTime = backfillTime.Add(1 * time.Millisecond) + request = &schedulepb.BackfillRequest{ + StartTime: timestamppb.New(backfillTime), + EndTime: timestamppb.New(backfillTime), + } + runBackfillTestCase(t, env, &backfillTestCase{ + InitialBackfillRequest: request, + ExpectedBufferedStarts: 0, + ExpectedComplete: true, + }) +} + +// When the buffer's completely full, the high watermark shouldn't advance and no +// starts should be buffered. +func TestBackfillTask_BufferCompletelyFull(t *testing.T) { + env := newTestEnv(t) + + // Fill buffer past max. + ctx := env.MutableContext() + invoker := env.Scheduler.Invoker.Get(ctx) + for range scheduler.DefaultTweakables.MaxBufferSize { + invoker.BufferedStarts = append(invoker.BufferedStarts, &schedulespb.BufferedStart{}) + } + + startTime := env.TimeSource.Now() + endTime := startTime.Add(5 * defaultInterval) + request := &schedulepb.BackfillRequest{ + StartTime: timestamppb.New(startTime), + EndTime: timestamppb.New(endTime), + } + runBackfillTestCase(t, env, &backfillTestCase{ + InitialBackfillRequest: request, + ExpectedBufferedStarts: 1000, + ExpectedComplete: false, + ExpectedAttempt: 1, + ExpectedLastProcessedTime: startTime, + }) +} + +// When the backfill range exceeds buffer capacity, partial filling should occur +// with the remainder left for a retry. +func TestBackfillTask_PartialFill(t *testing.T) { + env := newTestEnv(t) + + // Use a large backfill range (1000 intervals) that exceeds the backfiller's + // buffer limit (MaxBufferSize/2 = 500). + startTime := env.TimeSource.Now() + endTime := startTime.Add(1000 * defaultInterval) + request := &schedulepb.BackfillRequest{ + StartTime: timestamppb.New(startTime), + EndTime: timestamppb.New(endTime), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + } + + ctx := env.MutableContext() + schedComponent, err := env.Node.Component(ctx, chasm.ComponentRef{}) + require.NoError(t, err) + sched := schedComponent.(*scheduler.Scheduler) + backfiller := sched.NewRangeBackfiller(ctx, request) + require.NoError(t, env.CloseTransaction()) + + // Backfiller should have processed up to its limit (500), not the full 1000. + require.False(t, backfiller.GetLastProcessedTime().AsTime().IsZero()) + require.Equal(t, int64(1), backfiller.GetAttempt()) + + // Backfiller should still exist (not complete). + ctx = env.MutableContext() + schedComponent, err = env.Node.Component(ctx, chasm.ComponentRef{}) + require.NoError(t, err) + sched = schedComponent.(*scheduler.Scheduler) + _, ok := sched.Backfillers[backfiller.BackfillId].TryGet(ctx) + require.True(t, ok) + + // Manually execute the second iteration since the scheduled continuation + // task is in the future (after backoff delay). + invoker := sched.Invoker.Get(ctx) + invoker.BufferedStarts = nil // Clear to make room for next batch + handler := scheduler.NewBackfillerTaskHandler(scheduler.BackfillerTaskHandlerOptions{ + Config: defaultConfig(), + MetricsHandler: metrics.NoopMetricsHandler, + BaseLogger: env.Logger, + SpecProcessor: env.SpecProcessor, + }) + err = handler.Execute(ctx, backfiller, chasm.TaskAttributes{}, &schedulerpb.BackfillerTask{}) + require.NoError(t, err) + require.NoError(t, env.CloseTransaction()) + + // After second iteration, should have processed another batch. + require.Equal(t, int64(2), backfiller.GetAttempt()) +} diff --git a/chasm/lib/scheduler/config.go b/chasm/lib/scheduler/config.go new file mode 100644 index 00000000000..a687464739f --- /dev/null +++ b/chasm/lib/scheduler/config.go @@ -0,0 +1,83 @@ +package scheduler + +import ( + "time" + + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/dynamicconfig" +) + +type ( + Tweakables struct { + DefaultCatchupWindow time.Duration // Default for catchup window + MinCatchupWindow time.Duration // Minimum for catchup window + MaxBufferSize int // MaxBufferSize limits the number of buffered actions pending execution in total + GeneratorBufferReserveSize int // Minimum number of spaces in `BufferedStarts` reserved for automated actions. + CanceledTerminatedCountAsFailures bool // Whether cancelled+terminated count for pause-on-failure + MaxActionsPerExecution int // Limits the number of actions (startWorkflow, terminate/cancel) taken by ExecuteTask in a single iteration + IdleTime time.Duration // How long to keep schedules after they're done + } + + // Config is the CHASM Scheduler dynamic config, shared among all sub-components. + Config struct { + Tweakables dynamicconfig.TypedPropertyFnWithNamespaceFilter[Tweakables] + ServiceCallTimeout dynamicconfig.DurationPropertyFn + RetryPolicy func() backoff.RetryPolicy + } +) + +var ( + CurrentTweakables = dynamicconfig.NewNamespaceTypedSetting( + "scheduler.tweakables", + DefaultTweakables, + "A set of tweakable parameters for the CHASM scheduler.") + + RetryPolicyInitialInterval = dynamicconfig.NewGlobalDurationSetting( + "scheduler.retryPolicy.initialInterval", + time.Second, + `The initial backoff interval when retrying a failed task.`, + ) + + RetryPolicyMaximumInterval = dynamicconfig.NewGlobalDurationSetting( + "scheduler.retryPolicy.maxInterval", + time.Minute, + `The maximum backoff interval when retrying a failed task.`, + ) + + ServiceCallTimeout = dynamicconfig.NewGlobalDurationSetting( + "scheduler.serviceCallTimeout", + 2*time.Second, + `The upper bound on how long a service call can take before being timed out.`, + ) + + // SentinelIdleTime is how long a CHASM sentinel reserves the schedule ID + // before auto-closing via the idle task mechanism. Matches the dummy + // workflow's duration. + SentinelIdleTime = 15 * time.Minute + + DefaultTweakables = Tweakables{ + DefaultCatchupWindow: 365 * 24 * time.Hour, + MinCatchupWindow: 10 * time.Second, + MaxBufferSize: 1000, + GeneratorBufferReserveSize: 50, + CanceledTerminatedCountAsFailures: false, + MaxActionsPerExecution: 5, + IdleTime: 7 * 24 * time.Hour, + } +) + +func ConfigProvider(dc *dynamicconfig.Collection) *Config { + return &Config{ + Tweakables: CurrentTweakables.Get(dc), + ServiceCallTimeout: ServiceCallTimeout.Get(dc), + RetryPolicy: func() backoff.RetryPolicy { + return backoff.NewExponentialRetryPolicy( + RetryPolicyInitialInterval.Get(dc)(), + ).WithMaximumInterval( + RetryPolicyMaximumInterval.Get(dc)(), + ).WithExpirationInterval( + backoff.NoInterval, + ) + }, + } +} diff --git a/chasm/lib/scheduler/export_test.go b/chasm/lib/scheduler/export_test.go new file mode 100644 index 00000000000..0995ae1a056 --- /dev/null +++ b/chasm/lib/scheduler/export_test.go @@ -0,0 +1,39 @@ +package scheduler + +import ( + "context" + "time" + + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/log" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" +) + +// Export unexported methods for testing. + +func NewTestHandler(logger log.Logger) *handler { + return newHandler(logger, legacyscheduler.NewSpecBuilder()) +} + +func (h *handler) TestCreateFromMigrationState(ctx context.Context, req *schedulerpb.CreateFromMigrationStateRequest) (*schedulerpb.CreateFromMigrationStateResponse, error) { + return h.CreateFromMigrationState(ctx, req) +} + +func (h *handler) TestMigrateToWorkflow(ctx context.Context, req *schedulerpb.MigrateToWorkflowRequest) (*schedulerpb.MigrateToWorkflowResponse, error) { + return h.MigrateToWorkflow(ctx, req) +} + +func (s *Scheduler) RecordCompletedAction( + ctx chasm.MutableContext, + completed *schedulespb.CompletedResult, + requestID string, +) time.Time { + invoker := s.Invoker.Get(ctx) + return invoker.recordCompletedAction(ctx, completed, requestID) +} + +func (i *Invoker) RunningWorkflowID(requestID string) string { + return i.runningWorkflowID(requestID) +} diff --git a/chasm/lib/scheduler/fx.go b/chasm/lib/scheduler/fx.go new file mode 100644 index 00000000000..15dfb763ed8 --- /dev/null +++ b/chasm/lib/scheduler/fx.go @@ -0,0 +1,32 @@ +package scheduler + +import ( + "go.temporal.io/server/chasm" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" + "go.uber.org/fx" +) + +func Register( + registry *chasm.Registry, + library *Library, +) error { + return registry.Register(library) +} + +var Module = fx.Module( + "chasm.lib.scheduler", + fx.Provide(ConfigProvider), + fx.Provide(legacyscheduler.NewSpecBuilder), + fx.Provide(NewSpecProcessor), + fx.Provide(func(impl *SpecProcessorImpl) SpecProcessor { return impl }), + fx.Provide(newHandler), + fx.Provide(NewSchedulerIdleTaskHandler), + fx.Provide(NewSchedulerCallbacksTaskHandler), + fx.Provide(NewGeneratorTaskHandler), + fx.Provide(NewInvokerExecuteTaskHandler), + fx.Provide(NewInvokerProcessBufferTaskHandler), + fx.Provide(NewBackfillerTaskHandler), + fx.Provide(NewSchedulerMigrateToWorkflowTaskHandler), + fx.Provide(NewLibrary), + fx.Invoke(Register), +) diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/message.go-helpers.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/message.go-helpers.pb.go new file mode 100644 index 00000000000..202513f2988 --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/message.go-helpers.pb.go @@ -0,0 +1,265 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package schedulerpb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type SchedulerState to the protobuf v3 wire format +func (val *SchedulerState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SchedulerState from the protobuf v3 wire format +func (val *SchedulerState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SchedulerState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SchedulerState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SchedulerState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SchedulerState + switch t := that.(type) { + case *SchedulerState: + that1 = t + case SchedulerState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type WorkflowMigrationState to the protobuf v3 wire format +func (val *WorkflowMigrationState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type WorkflowMigrationState from the protobuf v3 wire format +func (val *WorkflowMigrationState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *WorkflowMigrationState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two WorkflowMigrationState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *WorkflowMigrationState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *WorkflowMigrationState + switch t := that.(type) { + case *WorkflowMigrationState: + that1 = t + case WorkflowMigrationState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GeneratorState to the protobuf v3 wire format +func (val *GeneratorState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GeneratorState from the protobuf v3 wire format +func (val *GeneratorState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GeneratorState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GeneratorState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GeneratorState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GeneratorState + switch t := that.(type) { + case *GeneratorState: + that1 = t + case GeneratorState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InvokerState to the protobuf v3 wire format +func (val *InvokerState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InvokerState from the protobuf v3 wire format +func (val *InvokerState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InvokerState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InvokerState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InvokerState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InvokerState + switch t := that.(type) { + case *InvokerState: + that1 = t + case InvokerState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type BackfillerState to the protobuf v3 wire format +func (val *BackfillerState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type BackfillerState from the protobuf v3 wire format +func (val *BackfillerState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *BackfillerState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two BackfillerState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *BackfillerState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *BackfillerState + switch t := that.(type) { + case *BackfillerState: + that1 = t + case BackfillerState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type LastCompletionResult to the protobuf v3 wire format +func (val *LastCompletionResult) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type LastCompletionResult from the protobuf v3 wire format +func (val *LastCompletionResult) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *LastCompletionResult) Size() int { + return proto.Size(val) +} + +// Equal returns whether two LastCompletionResult values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *LastCompletionResult) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *LastCompletionResult + switch t := that.(type) { + case *LastCompletionResult: + that1 = t + case LastCompletionResult: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SchedulerMigrationState to the protobuf v3 wire format +func (val *SchedulerMigrationState) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SchedulerMigrationState from the protobuf v3 wire format +func (val *SchedulerMigrationState) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SchedulerMigrationState) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SchedulerMigrationState values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SchedulerMigrationState) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SchedulerMigrationState + switch t := that.(type) { + case *SchedulerMigrationState: + that1 = t + case SchedulerMigrationState: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/message.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/message.pb.go new file mode 100644 index 00000000000..16e69c85590 --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/message.pb.go @@ -0,0 +1,749 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/scheduler/proto/v1/message.proto + +package schedulerpb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v12 "go.temporal.io/api/common/v1" + v13 "go.temporal.io/api/failure/v1" + v1 "go.temporal.io/api/schedule/v1" + v11 "go.temporal.io/server/api/schedule/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// CHASM scheduler top-level state. +type SchedulerState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Scheduler request parameters and metadata. + Schedule *v1.Schedule `protobuf:"bytes,2,opt,name=schedule,proto3" json:"schedule,omitempty"` + Info *v1.ScheduleInfo `protobuf:"bytes,3,opt,name=info,proto3" json:"info,omitempty"` + // State common to all generators is stored in the top-level machine. + Namespace string `protobuf:"bytes,5,opt,name=namespace,proto3" json:"namespace,omitempty"` + NamespaceId string `protobuf:"bytes,6,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + ScheduleId string `protobuf:"bytes,7,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` + // Implemented as a sequence number. Used for optimistic locking against + // update requests. + ConflictToken int64 `protobuf:"varint,8,opt,name=conflict_token,json=conflictToken,proto3" json:"conflict_token,omitempty"` + // The closed flag is set true after a schedule completes, and the idle timer + // expires. + Closed bool `protobuf:"varint,9,opt,name=closed,proto3" json:"closed,omitempty"` + // When true, this scheduler is a sentinel that exists only to reserve the + // schedule ID. All API operations return NotFound. + Sentinel bool `protobuf:"varint,10,opt,name=sentinel,proto3" json:"sentinel,omitempty"` + // Set when a migration to workflow-backed scheduler (V1) is pending. + // Unpause operations are blocked while this is set. + WorkflowMigration *WorkflowMigrationState `protobuf:"bytes,11,opt,name=workflow_migration,json=workflowMigration,proto3" json:"workflow_migration,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SchedulerState) Reset() { + *x = SchedulerState{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchedulerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchedulerState) ProtoMessage() {} + +func (x *SchedulerState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchedulerState.ProtoReflect.Descriptor instead. +func (*SchedulerState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescGZIP(), []int{0} +} + +func (x *SchedulerState) GetSchedule() *v1.Schedule { + if x != nil { + return x.Schedule + } + return nil +} + +func (x *SchedulerState) GetInfo() *v1.ScheduleInfo { + if x != nil { + return x.Info + } + return nil +} + +func (x *SchedulerState) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *SchedulerState) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *SchedulerState) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +func (x *SchedulerState) GetConflictToken() int64 { + if x != nil { + return x.ConflictToken + } + return 0 +} + +func (x *SchedulerState) GetClosed() bool { + if x != nil { + return x.Closed + } + return false +} + +func (x *SchedulerState) GetSentinel() bool { + if x != nil { + return x.Sentinel + } + return false +} + +func (x *SchedulerState) GetWorkflowMigration() *WorkflowMigrationState { + if x != nil { + return x.WorkflowMigration + } + return nil +} + +// WorkflowMigrationState tracks the state of an in-progress V2-to-V1 migration. +type WorkflowMigrationState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The schedule's paused state before migration was initiated. Used to + // restore the correct paused state when passing state to the V1 workflow. + PreMigrationPaused bool `protobuf:"varint,1,opt,name=pre_migration_paused,json=preMigrationPaused,proto3" json:"pre_migration_paused,omitempty"` + // The schedule's notes before migration was initiated. + PreMigrationNotes string `protobuf:"bytes,2,opt,name=pre_migration_notes,json=preMigrationNotes,proto3" json:"pre_migration_notes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WorkflowMigrationState) Reset() { + *x = WorkflowMigrationState{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WorkflowMigrationState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowMigrationState) ProtoMessage() {} + +func (x *WorkflowMigrationState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowMigrationState.ProtoReflect.Descriptor instead. +func (*WorkflowMigrationState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescGZIP(), []int{1} +} + +func (x *WorkflowMigrationState) GetPreMigrationPaused() bool { + if x != nil { + return x.PreMigrationPaused + } + return false +} + +func (x *WorkflowMigrationState) GetPreMigrationNotes() string { + if x != nil { + return x.PreMigrationNotes + } + return "" +} + +// CHASM scheduler's Generator internal state. +type GeneratorState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // High water mark. + LastProcessedTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_processed_time,json=lastProcessedTime,proto3" json:"last_processed_time,omitempty"` + // A list of upcoming times an action will be triggered. + FutureActionTimes []*timestamppb.Timestamp `protobuf:"bytes,4,rep,name=future_action_times,json=futureActionTimes,proto3" json:"future_action_times,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GeneratorState) Reset() { + *x = GeneratorState{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GeneratorState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratorState) ProtoMessage() {} + +func (x *GeneratorState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratorState.ProtoReflect.Descriptor instead. +func (*GeneratorState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescGZIP(), []int{2} +} + +func (x *GeneratorState) GetLastProcessedTime() *timestamppb.Timestamp { + if x != nil { + return x.LastProcessedTime + } + return nil +} + +func (x *GeneratorState) GetFutureActionTimes() []*timestamppb.Timestamp { + if x != nil { + return x.FutureActionTimes + } + return nil +} + +// CHASM scheduler's Invoker internal state. +type InvokerState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Buffered starts that will be started by the Invoker. + BufferedStarts []*v11.BufferedStart `protobuf:"bytes,2,rep,name=buffered_starts,json=bufferedStarts,proto3" json:"buffered_starts,omitempty"` + // Workflow executions that will be cancelled due to overlap policy. + CancelWorkflows []*v12.WorkflowExecution `protobuf:"bytes,3,rep,name=cancel_workflows,json=cancelWorkflows,proto3" json:"cancel_workflows,omitempty"` + // Workflow executions that will be terminated due to overlap policy. + TerminateWorkflows []*v12.WorkflowExecution `protobuf:"bytes,4,rep,name=terminate_workflows,json=terminateWorkflows,proto3" json:"terminate_workflows,omitempty"` + // High water mark, used for evaluating when to fire tasks that are backing + // off from a retry. LastProcessedTime is stored as state so that task + // generation will be consistent, regardless of when generation occurs, such + // as after applying a replicated state (as opposed to evaluating based on + // present time). + LastProcessedTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=last_processed_time,json=lastProcessedTime,proto3" json:"last_processed_time,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokerState) Reset() { + *x = InvokerState{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokerState) ProtoMessage() {} + +func (x *InvokerState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokerState.ProtoReflect.Descriptor instead. +func (*InvokerState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescGZIP(), []int{3} +} + +func (x *InvokerState) GetBufferedStarts() []*v11.BufferedStart { + if x != nil { + return x.BufferedStarts + } + return nil +} + +func (x *InvokerState) GetCancelWorkflows() []*v12.WorkflowExecution { + if x != nil { + return x.CancelWorkflows + } + return nil +} + +func (x *InvokerState) GetTerminateWorkflows() []*v12.WorkflowExecution { + if x != nil { + return x.TerminateWorkflows + } + return nil +} + +func (x *InvokerState) GetLastProcessedTime() *timestamppb.Timestamp { + if x != nil { + return x.LastProcessedTime + } + return nil +} + +// CHASM scheduler's Backfiller internal state. Backfill requests are 1:1 +// with Backfiller nodes. Backfiller nodes also handle immediate trigger requests. +type BackfillerState struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Request: + // + // *BackfillerState_BackfillRequest + // *BackfillerState_TriggerRequest + Request isBackfillerState_Request `protobuf_oneof:"request"` + // Every Backfiller should be assigned a unique ID upon creation, used + // for deduplication. + BackfillId string `protobuf:"bytes,6,opt,name=backfill_id,json=backfillId,proto3" json:"backfill_id,omitempty"` + // High water mark. + LastProcessedTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=last_processed_time,json=lastProcessedTime,proto3" json:"last_processed_time,omitempty"` + // Attempt count, incremented when the buffer is full and the Backfiller + // needs to back off before retrying to fill. + Attempt int64 `protobuf:"varint,8,opt,name=attempt,proto3" json:"attempt,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackfillerState) Reset() { + *x = BackfillerState{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackfillerState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackfillerState) ProtoMessage() {} + +func (x *BackfillerState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackfillerState.ProtoReflect.Descriptor instead. +func (*BackfillerState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescGZIP(), []int{4} +} + +func (x *BackfillerState) GetRequest() isBackfillerState_Request { + if x != nil { + return x.Request + } + return nil +} + +func (x *BackfillerState) GetBackfillRequest() *v1.BackfillRequest { + if x != nil { + if x, ok := x.Request.(*BackfillerState_BackfillRequest); ok { + return x.BackfillRequest + } + } + return nil +} + +func (x *BackfillerState) GetTriggerRequest() *v1.TriggerImmediatelyRequest { + if x != nil { + if x, ok := x.Request.(*BackfillerState_TriggerRequest); ok { + return x.TriggerRequest + } + } + return nil +} + +func (x *BackfillerState) GetBackfillId() string { + if x != nil { + return x.BackfillId + } + return "" +} + +func (x *BackfillerState) GetLastProcessedTime() *timestamppb.Timestamp { + if x != nil { + return x.LastProcessedTime + } + return nil +} + +func (x *BackfillerState) GetAttempt() int64 { + if x != nil { + return x.Attempt + } + return 0 +} + +type isBackfillerState_Request interface { + isBackfillerState_Request() +} + +type BackfillerState_BackfillRequest struct { + BackfillRequest *v1.BackfillRequest `protobuf:"bytes,1,opt,name=backfill_request,json=backfillRequest,proto3,oneof"` +} + +type BackfillerState_TriggerRequest struct { + // When set, immediately buffer a single manual action. + TriggerRequest *v1.TriggerImmediatelyRequest `protobuf:"bytes,2,opt,name=trigger_request,json=triggerRequest,proto3,oneof"` +} + +func (*BackfillerState_BackfillRequest) isBackfillerState_Request() {} + +func (*BackfillerState_TriggerRequest) isBackfillerState_Request() {} + +// CHASM scheduler retains the payload data for the last completed workflow. Both +// last success and failure are stored simultaneously. +type LastCompletionResult struct { + state protoimpl.MessageState `protogen:"open.v1"` + Success *v12.Payload `protobuf:"bytes,1,opt,name=success,proto3" json:"success,omitempty"` + Failure *v13.Failure `protobuf:"bytes,2,opt,name=failure,proto3" json:"failure,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *LastCompletionResult) Reset() { + *x = LastCompletionResult{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *LastCompletionResult) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LastCompletionResult) ProtoMessage() {} + +func (x *LastCompletionResult) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LastCompletionResult.ProtoReflect.Descriptor instead. +func (*LastCompletionResult) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescGZIP(), []int{5} +} + +func (x *LastCompletionResult) GetSuccess() *v12.Payload { + if x != nil { + return x.Success + } + return nil +} + +func (x *LastCompletionResult) GetFailure() *v13.Failure { + if x != nil { + return x.Failure + } + return nil +} + +// SchedulerMigrationState is a stack-agnostic interchange format for migrating +// scheduler state between V1 (workflow-backed) and V2 (CHASM) implementations. +type SchedulerMigrationState struct { + state protoimpl.MessageState `protogen:"open.v1"` + SchedulerState *SchedulerState `protobuf:"bytes,1,opt,name=scheduler_state,json=schedulerState,proto3" json:"scheduler_state,omitempty"` + GeneratorState *GeneratorState `protobuf:"bytes,2,opt,name=generator_state,json=generatorState,proto3" json:"generator_state,omitempty"` + InvokerState *InvokerState `protobuf:"bytes,3,opt,name=invoker_state,json=invokerState,proto3" json:"invoker_state,omitempty"` + Backfillers map[string]*BackfillerState `protobuf:"bytes,4,rep,name=backfillers,proto3" json:"backfillers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + LastCompletionResult *LastCompletionResult `protobuf:"bytes,5,opt,name=last_completion_result,json=lastCompletionResult,proto3" json:"last_completion_result,omitempty"` + // Visibility data. + SearchAttributes map[string]*v12.Payload `protobuf:"bytes,6,rep,name=search_attributes,json=searchAttributes,proto3" json:"search_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Memo map[string]*v12.Payload `protobuf:"bytes,7,rep,name=memo,proto3" json:"memo,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SchedulerMigrationState) Reset() { + *x = SchedulerMigrationState{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchedulerMigrationState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchedulerMigrationState) ProtoMessage() {} + +func (x *SchedulerMigrationState) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchedulerMigrationState.ProtoReflect.Descriptor instead. +func (*SchedulerMigrationState) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescGZIP(), []int{6} +} + +func (x *SchedulerMigrationState) GetSchedulerState() *SchedulerState { + if x != nil { + return x.SchedulerState + } + return nil +} + +func (x *SchedulerMigrationState) GetGeneratorState() *GeneratorState { + if x != nil { + return x.GeneratorState + } + return nil +} + +func (x *SchedulerMigrationState) GetInvokerState() *InvokerState { + if x != nil { + return x.InvokerState + } + return nil +} + +func (x *SchedulerMigrationState) GetBackfillers() map[string]*BackfillerState { + if x != nil { + return x.Backfillers + } + return nil +} + +func (x *SchedulerMigrationState) GetLastCompletionResult() *LastCompletionResult { + if x != nil { + return x.LastCompletionResult + } + return nil +} + +func (x *SchedulerMigrationState) GetSearchAttributes() map[string]*v12.Payload { + if x != nil { + return x.SearchAttributes + } + return nil +} + +func (x *SchedulerMigrationState) GetMemo() map[string]*v12.Payload { + if x != nil { + return x.Memo + } + return nil +} + +var File_temporal_server_chasm_lib_scheduler_proto_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDesc = "" + + "\n" + + ":temporal/server/chasm/lib/scheduler/proto/v1/message.proto\x12,temporal.server.chasm.lib.scheduler.proto.v1\x1a\x1fgoogle/protobuf/timestamp.proto\x1a$temporal/api/common/v1/message.proto\x1a%temporal/api/failure/v1/message.proto\x1a&temporal/api/schedule/v1/message.proto\x1a-temporal/server/api/schedule/v1/message.proto\"\xbe\x03\n" + + "\x0eSchedulerState\x12>\n" + + "\bschedule\x18\x02 \x01(\v2\".temporal.api.schedule.v1.ScheduleR\bschedule\x12:\n" + + "\x04info\x18\x03 \x01(\v2&.temporal.api.schedule.v1.ScheduleInfoR\x04info\x12\x1c\n" + + "\tnamespace\x18\x05 \x01(\tR\tnamespace\x12!\n" + + "\fnamespace_id\x18\x06 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vschedule_id\x18\a \x01(\tR\n" + + "scheduleId\x12%\n" + + "\x0econflict_token\x18\b \x01(\x03R\rconflictToken\x12\x16\n" + + "\x06closed\x18\t \x01(\bR\x06closed\x12\x1a\n" + + "\bsentinel\x18\n" + + " \x01(\bR\bsentinel\x12s\n" + + "\x12workflow_migration\x18\v \x01(\v2D.temporal.server.chasm.lib.scheduler.proto.v1.WorkflowMigrationStateR\x11workflowMigration\"z\n" + + "\x16WorkflowMigrationState\x120\n" + + "\x14pre_migration_paused\x18\x01 \x01(\bR\x12preMigrationPaused\x12.\n" + + "\x13pre_migration_notes\x18\x02 \x01(\tR\x11preMigrationNotes\"\xa8\x01\n" + + "\x0eGeneratorState\x12J\n" + + "\x13last_processed_time\x18\x03 \x01(\v2\x1a.google.protobuf.TimestampR\x11lastProcessedTime\x12J\n" + + "\x13future_action_times\x18\x04 \x03(\v2\x1a.google.protobuf.TimestampR\x11futureActionTimes\"\xeb\x02\n" + + "\fInvokerState\x12W\n" + + "\x0fbuffered_starts\x18\x02 \x03(\v2..temporal.server.api.schedule.v1.BufferedStartR\x0ebufferedStarts\x12T\n" + + "\x10cancel_workflows\x18\x03 \x03(\v2).temporal.api.common.v1.WorkflowExecutionR\x0fcancelWorkflows\x12Z\n" + + "\x13terminate_workflows\x18\x04 \x03(\v2).temporal.api.common.v1.WorkflowExecutionR\x12terminateWorkflows\x12J\n" + + "\x13last_processed_time\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\x11lastProcessedTimeJ\x04\b\x06\x10\a\"\xdb\x02\n" + + "\x0fBackfillerState\x12V\n" + + "\x10backfill_request\x18\x01 \x01(\v2).temporal.api.schedule.v1.BackfillRequestH\x00R\x0fbackfillRequest\x12^\n" + + "\x0ftrigger_request\x18\x02 \x01(\v23.temporal.api.schedule.v1.TriggerImmediatelyRequestH\x00R\x0etriggerRequest\x12\x1f\n" + + "\vbackfill_id\x18\x06 \x01(\tR\n" + + "backfillId\x12J\n" + + "\x13last_processed_time\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\x11lastProcessedTime\x12\x18\n" + + "\aattempt\x18\b \x01(\x03R\aattemptB\t\n" + + "\arequest\"\x8d\x01\n" + + "\x14LastCompletionResult\x129\n" + + "\asuccess\x18\x01 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\asuccess\x12:\n" + + "\afailure\x18\x02 \x01(\v2 .temporal.api.failure.v1.FailureR\afailure\"\xeb\b\n" + + "\x17SchedulerMigrationState\x12e\n" + + "\x0fscheduler_state\x18\x01 \x01(\v2<.temporal.server.chasm.lib.scheduler.proto.v1.SchedulerStateR\x0eschedulerState\x12e\n" + + "\x0fgenerator_state\x18\x02 \x01(\v2<.temporal.server.chasm.lib.scheduler.proto.v1.GeneratorStateR\x0egeneratorState\x12_\n" + + "\rinvoker_state\x18\x03 \x01(\v2:.temporal.server.chasm.lib.scheduler.proto.v1.InvokerStateR\finvokerState\x12x\n" + + "\vbackfillers\x18\x04 \x03(\v2V.temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.BackfillersEntryR\vbackfillers\x12x\n" + + "\x16last_completion_result\x18\x05 \x01(\v2B.temporal.server.chasm.lib.scheduler.proto.v1.LastCompletionResultR\x14lastCompletionResult\x12\x88\x01\n" + + "\x11search_attributes\x18\x06 \x03(\v2[.temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.SearchAttributesEntryR\x10searchAttributes\x12c\n" + + "\x04memo\x18\a \x03(\v2O.temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.MemoEntryR\x04memo\x1a}\n" + + "\x10BackfillersEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12S\n" + + "\x05value\x18\x02 \x01(\v2=.temporal.server.chasm.lib.scheduler.proto.v1.BackfillerStateR\x05value:\x028\x01\x1ad\n" + + "\x15SearchAttributesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x125\n" + + "\x05value\x18\x02 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\x05value:\x028\x01\x1aX\n" + + "\tMemoEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x125\n" + + "\x05value\x18\x02 \x01(\v2\x1f.temporal.api.common.v1.PayloadR\x05value:\x028\x01BGZEgo.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb;schedulerpbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDesc), len(file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDescData +} + +var file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_goTypes = []any{ + (*SchedulerState)(nil), // 0: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerState + (*WorkflowMigrationState)(nil), // 1: temporal.server.chasm.lib.scheduler.proto.v1.WorkflowMigrationState + (*GeneratorState)(nil), // 2: temporal.server.chasm.lib.scheduler.proto.v1.GeneratorState + (*InvokerState)(nil), // 3: temporal.server.chasm.lib.scheduler.proto.v1.InvokerState + (*BackfillerState)(nil), // 4: temporal.server.chasm.lib.scheduler.proto.v1.BackfillerState + (*LastCompletionResult)(nil), // 5: temporal.server.chasm.lib.scheduler.proto.v1.LastCompletionResult + (*SchedulerMigrationState)(nil), // 6: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState + nil, // 7: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.BackfillersEntry + nil, // 8: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.SearchAttributesEntry + nil, // 9: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.MemoEntry + (*v1.Schedule)(nil), // 10: temporal.api.schedule.v1.Schedule + (*v1.ScheduleInfo)(nil), // 11: temporal.api.schedule.v1.ScheduleInfo + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp + (*v11.BufferedStart)(nil), // 13: temporal.server.api.schedule.v1.BufferedStart + (*v12.WorkflowExecution)(nil), // 14: temporal.api.common.v1.WorkflowExecution + (*v1.BackfillRequest)(nil), // 15: temporal.api.schedule.v1.BackfillRequest + (*v1.TriggerImmediatelyRequest)(nil), // 16: temporal.api.schedule.v1.TriggerImmediatelyRequest + (*v12.Payload)(nil), // 17: temporal.api.common.v1.Payload + (*v13.Failure)(nil), // 18: temporal.api.failure.v1.Failure +} +var file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_depIdxs = []int32{ + 10, // 0: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerState.schedule:type_name -> temporal.api.schedule.v1.Schedule + 11, // 1: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerState.info:type_name -> temporal.api.schedule.v1.ScheduleInfo + 1, // 2: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerState.workflow_migration:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.WorkflowMigrationState + 12, // 3: temporal.server.chasm.lib.scheduler.proto.v1.GeneratorState.last_processed_time:type_name -> google.protobuf.Timestamp + 12, // 4: temporal.server.chasm.lib.scheduler.proto.v1.GeneratorState.future_action_times:type_name -> google.protobuf.Timestamp + 13, // 5: temporal.server.chasm.lib.scheduler.proto.v1.InvokerState.buffered_starts:type_name -> temporal.server.api.schedule.v1.BufferedStart + 14, // 6: temporal.server.chasm.lib.scheduler.proto.v1.InvokerState.cancel_workflows:type_name -> temporal.api.common.v1.WorkflowExecution + 14, // 7: temporal.server.chasm.lib.scheduler.proto.v1.InvokerState.terminate_workflows:type_name -> temporal.api.common.v1.WorkflowExecution + 12, // 8: temporal.server.chasm.lib.scheduler.proto.v1.InvokerState.last_processed_time:type_name -> google.protobuf.Timestamp + 15, // 9: temporal.server.chasm.lib.scheduler.proto.v1.BackfillerState.backfill_request:type_name -> temporal.api.schedule.v1.BackfillRequest + 16, // 10: temporal.server.chasm.lib.scheduler.proto.v1.BackfillerState.trigger_request:type_name -> temporal.api.schedule.v1.TriggerImmediatelyRequest + 12, // 11: temporal.server.chasm.lib.scheduler.proto.v1.BackfillerState.last_processed_time:type_name -> google.protobuf.Timestamp + 17, // 12: temporal.server.chasm.lib.scheduler.proto.v1.LastCompletionResult.success:type_name -> temporal.api.common.v1.Payload + 18, // 13: temporal.server.chasm.lib.scheduler.proto.v1.LastCompletionResult.failure:type_name -> temporal.api.failure.v1.Failure + 0, // 14: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.scheduler_state:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.SchedulerState + 2, // 15: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.generator_state:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.GeneratorState + 3, // 16: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.invoker_state:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.InvokerState + 7, // 17: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.backfillers:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.BackfillersEntry + 5, // 18: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.last_completion_result:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.LastCompletionResult + 8, // 19: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.search_attributes:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.SearchAttributesEntry + 9, // 20: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.memo:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.MemoEntry + 4, // 21: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.BackfillersEntry.value:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.BackfillerState + 17, // 22: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.SearchAttributesEntry.value:type_name -> temporal.api.common.v1.Payload + 17, // 23: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState.MemoEntry.value:type_name -> temporal.api.common.v1.Payload + 24, // [24:24] is the sub-list for method output_type + 24, // [24:24] is the sub-list for method input_type + 24, // [24:24] is the sub-list for extension type_name + 24, // [24:24] is the sub-list for extension extendee + 0, // [0:24] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_init() } +func file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_init() { + if File_temporal_server_chasm_lib_scheduler_proto_v1_message_proto != nil { + return + } + file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes[4].OneofWrappers = []any{ + (*BackfillerState_BackfillRequest)(nil), + (*BackfillerState_TriggerRequest)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDesc), len(file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_rawDesc)), + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_scheduler_proto_v1_message_proto = out.File + file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_goTypes = nil + file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_depIdxs = nil +} diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/request_response.go-helpers.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/request_response.go-helpers.pb.go new file mode 100644 index 00000000000..c0bf15355a8 --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/request_response.go-helpers.pb.go @@ -0,0 +1,672 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package schedulerpb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type CreateScheduleRequest to the protobuf v3 wire format +func (val *CreateScheduleRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateScheduleRequest from the protobuf v3 wire format +func (val *CreateScheduleRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateScheduleRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateScheduleRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateScheduleRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateScheduleRequest + switch t := that.(type) { + case *CreateScheduleRequest: + that1 = t + case CreateScheduleRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateScheduleResponse to the protobuf v3 wire format +func (val *CreateScheduleResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateScheduleResponse from the protobuf v3 wire format +func (val *CreateScheduleResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateScheduleResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateScheduleResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateScheduleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateScheduleResponse + switch t := that.(type) { + case *CreateScheduleResponse: + that1 = t + case CreateScheduleResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateScheduleRequest to the protobuf v3 wire format +func (val *UpdateScheduleRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateScheduleRequest from the protobuf v3 wire format +func (val *UpdateScheduleRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateScheduleRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateScheduleRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateScheduleRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateScheduleRequest + switch t := that.(type) { + case *UpdateScheduleRequest: + that1 = t + case UpdateScheduleRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type UpdateScheduleResponse to the protobuf v3 wire format +func (val *UpdateScheduleResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type UpdateScheduleResponse from the protobuf v3 wire format +func (val *UpdateScheduleResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *UpdateScheduleResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two UpdateScheduleResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *UpdateScheduleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *UpdateScheduleResponse + switch t := that.(type) { + case *UpdateScheduleResponse: + that1 = t + case UpdateScheduleResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PatchScheduleRequest to the protobuf v3 wire format +func (val *PatchScheduleRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PatchScheduleRequest from the protobuf v3 wire format +func (val *PatchScheduleRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PatchScheduleRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PatchScheduleRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PatchScheduleRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PatchScheduleRequest + switch t := that.(type) { + case *PatchScheduleRequest: + that1 = t + case PatchScheduleRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type PatchScheduleResponse to the protobuf v3 wire format +func (val *PatchScheduleResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type PatchScheduleResponse from the protobuf v3 wire format +func (val *PatchScheduleResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *PatchScheduleResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two PatchScheduleResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *PatchScheduleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *PatchScheduleResponse + switch t := that.(type) { + case *PatchScheduleResponse: + that1 = t + case PatchScheduleResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteScheduleRequest to the protobuf v3 wire format +func (val *DeleteScheduleRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteScheduleRequest from the protobuf v3 wire format +func (val *DeleteScheduleRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteScheduleRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteScheduleRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteScheduleRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteScheduleRequest + switch t := that.(type) { + case *DeleteScheduleRequest: + that1 = t + case DeleteScheduleRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DeleteScheduleResponse to the protobuf v3 wire format +func (val *DeleteScheduleResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DeleteScheduleResponse from the protobuf v3 wire format +func (val *DeleteScheduleResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DeleteScheduleResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DeleteScheduleResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DeleteScheduleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DeleteScheduleResponse + switch t := that.(type) { + case *DeleteScheduleResponse: + that1 = t + case DeleteScheduleResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeScheduleRequest to the protobuf v3 wire format +func (val *DescribeScheduleRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeScheduleRequest from the protobuf v3 wire format +func (val *DescribeScheduleRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeScheduleRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeScheduleRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeScheduleRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeScheduleRequest + switch t := that.(type) { + case *DescribeScheduleRequest: + that1 = t + case DescribeScheduleRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type DescribeScheduleResponse to the protobuf v3 wire format +func (val *DescribeScheduleResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type DescribeScheduleResponse from the protobuf v3 wire format +func (val *DescribeScheduleResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *DescribeScheduleResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two DescribeScheduleResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *DescribeScheduleResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *DescribeScheduleResponse + switch t := that.(type) { + case *DescribeScheduleResponse: + that1 = t + case DescribeScheduleResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ListScheduleMatchingTimesRequest to the protobuf v3 wire format +func (val *ListScheduleMatchingTimesRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ListScheduleMatchingTimesRequest from the protobuf v3 wire format +func (val *ListScheduleMatchingTimesRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ListScheduleMatchingTimesRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ListScheduleMatchingTimesRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ListScheduleMatchingTimesRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ListScheduleMatchingTimesRequest + switch t := that.(type) { + case *ListScheduleMatchingTimesRequest: + that1 = t + case ListScheduleMatchingTimesRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type ListScheduleMatchingTimesResponse to the protobuf v3 wire format +func (val *ListScheduleMatchingTimesResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type ListScheduleMatchingTimesResponse from the protobuf v3 wire format +func (val *ListScheduleMatchingTimesResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *ListScheduleMatchingTimesResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two ListScheduleMatchingTimesResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *ListScheduleMatchingTimesResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *ListScheduleMatchingTimesResponse + switch t := that.(type) { + case *ListScheduleMatchingTimesResponse: + that1 = t + case ListScheduleMatchingTimesResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateFromMigrationStateRequest to the protobuf v3 wire format +func (val *CreateFromMigrationStateRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateFromMigrationStateRequest from the protobuf v3 wire format +func (val *CreateFromMigrationStateRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateFromMigrationStateRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateFromMigrationStateRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateFromMigrationStateRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateFromMigrationStateRequest + switch t := that.(type) { + case *CreateFromMigrationStateRequest: + that1 = t + case CreateFromMigrationStateRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateFromMigrationStateResponse to the protobuf v3 wire format +func (val *CreateFromMigrationStateResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateFromMigrationStateResponse from the protobuf v3 wire format +func (val *CreateFromMigrationStateResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateFromMigrationStateResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateFromMigrationStateResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateFromMigrationStateResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateFromMigrationStateResponse + switch t := that.(type) { + case *CreateFromMigrationStateResponse: + that1 = t + case CreateFromMigrationStateResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateSentinelRequest to the protobuf v3 wire format +func (val *CreateSentinelRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateSentinelRequest from the protobuf v3 wire format +func (val *CreateSentinelRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateSentinelRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateSentinelRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateSentinelRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateSentinelRequest + switch t := that.(type) { + case *CreateSentinelRequest: + that1 = t + case CreateSentinelRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type CreateSentinelResponse to the protobuf v3 wire format +func (val *CreateSentinelResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type CreateSentinelResponse from the protobuf v3 wire format +func (val *CreateSentinelResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *CreateSentinelResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two CreateSentinelResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *CreateSentinelResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *CreateSentinelResponse + switch t := that.(type) { + case *CreateSentinelResponse: + that1 = t + case CreateSentinelResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MigrateToWorkflowRequest to the protobuf v3 wire format +func (val *MigrateToWorkflowRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MigrateToWorkflowRequest from the protobuf v3 wire format +func (val *MigrateToWorkflowRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MigrateToWorkflowRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MigrateToWorkflowRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MigrateToWorkflowRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MigrateToWorkflowRequest + switch t := that.(type) { + case *MigrateToWorkflowRequest: + that1 = t + case MigrateToWorkflowRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type MigrateToWorkflowResponse to the protobuf v3 wire format +func (val *MigrateToWorkflowResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type MigrateToWorkflowResponse from the protobuf v3 wire format +func (val *MigrateToWorkflowResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *MigrateToWorkflowResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two MigrateToWorkflowResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *MigrateToWorkflowResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *MigrateToWorkflowResponse + switch t := that.(type) { + case *MigrateToWorkflowResponse: + that1 = t + case MigrateToWorkflowResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/request_response.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/request_response.pb.go new file mode 100644 index 00000000000..1329a1557b3 --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/request_response.pb.go @@ -0,0 +1,1046 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/scheduler/proto/v1/request_response.proto + +package schedulerpb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + v1 "go.temporal.io/api/workflowservice/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CreateScheduleRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Internal namespace ID (UUID). + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.CreateScheduleRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateScheduleRequest) Reset() { + *x = CreateScheduleRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateScheduleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateScheduleRequest) ProtoMessage() {} + +func (x *CreateScheduleRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateScheduleRequest.ProtoReflect.Descriptor instead. +func (*CreateScheduleRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateScheduleRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CreateScheduleRequest) GetFrontendRequest() *v1.CreateScheduleRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type CreateScheduleResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.CreateScheduleResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateScheduleResponse) Reset() { + *x = CreateScheduleResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateScheduleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateScheduleResponse) ProtoMessage() {} + +func (x *CreateScheduleResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateScheduleResponse.ProtoReflect.Descriptor instead. +func (*CreateScheduleResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateScheduleResponse) GetFrontendResponse() *v1.CreateScheduleResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type UpdateScheduleRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Internal namespace ID (UUID). + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.UpdateScheduleRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateScheduleRequest) Reset() { + *x = UpdateScheduleRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateScheduleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateScheduleRequest) ProtoMessage() {} + +func (x *UpdateScheduleRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateScheduleRequest.ProtoReflect.Descriptor instead. +func (*UpdateScheduleRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{2} +} + +func (x *UpdateScheduleRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *UpdateScheduleRequest) GetFrontendRequest() *v1.UpdateScheduleRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type UpdateScheduleResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.UpdateScheduleResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateScheduleResponse) Reset() { + *x = UpdateScheduleResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateScheduleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateScheduleResponse) ProtoMessage() {} + +func (x *UpdateScheduleResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateScheduleResponse.ProtoReflect.Descriptor instead. +func (*UpdateScheduleResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{3} +} + +func (x *UpdateScheduleResponse) GetFrontendResponse() *v1.UpdateScheduleResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type PatchScheduleRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Internal namespace ID (UUID). + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.PatchScheduleRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PatchScheduleRequest) Reset() { + *x = PatchScheduleRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PatchScheduleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PatchScheduleRequest) ProtoMessage() {} + +func (x *PatchScheduleRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PatchScheduleRequest.ProtoReflect.Descriptor instead. +func (*PatchScheduleRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{4} +} + +func (x *PatchScheduleRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *PatchScheduleRequest) GetFrontendRequest() *v1.PatchScheduleRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type PatchScheduleResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.PatchScheduleResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PatchScheduleResponse) Reset() { + *x = PatchScheduleResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PatchScheduleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PatchScheduleResponse) ProtoMessage() {} + +func (x *PatchScheduleResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PatchScheduleResponse.ProtoReflect.Descriptor instead. +func (*PatchScheduleResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{5} +} + +func (x *PatchScheduleResponse) GetFrontendResponse() *v1.PatchScheduleResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type DeleteScheduleRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Internal namespace ID (UUID). + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.DeleteScheduleRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteScheduleRequest) Reset() { + *x = DeleteScheduleRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteScheduleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteScheduleRequest) ProtoMessage() {} + +func (x *DeleteScheduleRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteScheduleRequest.ProtoReflect.Descriptor instead. +func (*DeleteScheduleRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{6} +} + +func (x *DeleteScheduleRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DeleteScheduleRequest) GetFrontendRequest() *v1.DeleteScheduleRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type DeleteScheduleResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.DeleteScheduleResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteScheduleResponse) Reset() { + *x = DeleteScheduleResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteScheduleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteScheduleResponse) ProtoMessage() {} + +func (x *DeleteScheduleResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteScheduleResponse.ProtoReflect.Descriptor instead. +func (*DeleteScheduleResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{7} +} + +func (x *DeleteScheduleResponse) GetFrontendResponse() *v1.DeleteScheduleResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type DescribeScheduleRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Internal namespace ID (UUID). + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.DescribeScheduleRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeScheduleRequest) Reset() { + *x = DescribeScheduleRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeScheduleRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeScheduleRequest) ProtoMessage() {} + +func (x *DescribeScheduleRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeScheduleRequest.ProtoReflect.Descriptor instead. +func (*DescribeScheduleRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{8} +} + +func (x *DescribeScheduleRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *DescribeScheduleRequest) GetFrontendRequest() *v1.DescribeScheduleRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type DescribeScheduleResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.DescribeScheduleResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DescribeScheduleResponse) Reset() { + *x = DescribeScheduleResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DescribeScheduleResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DescribeScheduleResponse) ProtoMessage() {} + +func (x *DescribeScheduleResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DescribeScheduleResponse.ProtoReflect.Descriptor instead. +func (*DescribeScheduleResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{9} +} + +func (x *DescribeScheduleResponse) GetFrontendResponse() *v1.DescribeScheduleResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type ListScheduleMatchingTimesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Internal namespace ID (UUID). + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + FrontendRequest *v1.ListScheduleMatchingTimesRequest `protobuf:"bytes,2,opt,name=frontend_request,json=frontendRequest,proto3" json:"frontend_request,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListScheduleMatchingTimesRequest) Reset() { + *x = ListScheduleMatchingTimesRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListScheduleMatchingTimesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListScheduleMatchingTimesRequest) ProtoMessage() {} + +func (x *ListScheduleMatchingTimesRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListScheduleMatchingTimesRequest.ProtoReflect.Descriptor instead. +func (*ListScheduleMatchingTimesRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{10} +} + +func (x *ListScheduleMatchingTimesRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *ListScheduleMatchingTimesRequest) GetFrontendRequest() *v1.ListScheduleMatchingTimesRequest { + if x != nil { + return x.FrontendRequest + } + return nil +} + +type ListScheduleMatchingTimesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + FrontendResponse *v1.ListScheduleMatchingTimesResponse `protobuf:"bytes,1,opt,name=frontend_response,json=frontendResponse,proto3" json:"frontend_response,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListScheduleMatchingTimesResponse) Reset() { + *x = ListScheduleMatchingTimesResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListScheduleMatchingTimesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListScheduleMatchingTimesResponse) ProtoMessage() {} + +func (x *ListScheduleMatchingTimesResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListScheduleMatchingTimesResponse.ProtoReflect.Descriptor instead. +func (*ListScheduleMatchingTimesResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{11} +} + +func (x *ListScheduleMatchingTimesResponse) GetFrontendResponse() *v1.ListScheduleMatchingTimesResponse { + if x != nil { + return x.FrontendResponse + } + return nil +} + +type CreateFromMigrationStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Internal namespace ID (UUID). + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + State *SchedulerMigrationState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateFromMigrationStateRequest) Reset() { + *x = CreateFromMigrationStateRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateFromMigrationStateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateFromMigrationStateRequest) ProtoMessage() {} + +func (x *CreateFromMigrationStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateFromMigrationStateRequest.ProtoReflect.Descriptor instead. +func (*CreateFromMigrationStateRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{12} +} + +func (x *CreateFromMigrationStateRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CreateFromMigrationStateRequest) GetState() *SchedulerMigrationState { + if x != nil { + return x.State + } + return nil +} + +type CreateFromMigrationStateResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateFromMigrationStateResponse) Reset() { + *x = CreateFromMigrationStateResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateFromMigrationStateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateFromMigrationStateResponse) ProtoMessage() {} + +func (x *CreateFromMigrationStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateFromMigrationStateResponse.ProtoReflect.Descriptor instead. +func (*CreateFromMigrationStateResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{13} +} + +type CreateSentinelRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Internal namespace ID (UUID). + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + ScheduleId string `protobuf:"bytes,3,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSentinelRequest) Reset() { + *x = CreateSentinelRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSentinelRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSentinelRequest) ProtoMessage() {} + +func (x *CreateSentinelRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSentinelRequest.ProtoReflect.Descriptor instead. +func (*CreateSentinelRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{14} +} + +func (x *CreateSentinelRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *CreateSentinelRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *CreateSentinelRequest) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +type CreateSentinelResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSentinelResponse) Reset() { + *x = CreateSentinelResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSentinelResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSentinelResponse) ProtoMessage() {} + +func (x *CreateSentinelResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSentinelResponse.ProtoReflect.Descriptor instead. +func (*CreateSentinelResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{15} +} + +type MigrateToWorkflowRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The namespace ID of the schedule to migrate. + NamespaceId string `protobuf:"bytes,1,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` + // The schedule ID to migrate from CHASM to workflow-backed. + ScheduleId string `protobuf:"bytes,2,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` + // The identity of the caller initiating the migration. + Identity string `protobuf:"bytes,3,opt,name=identity,proto3" json:"identity,omitempty"` + // A unique request ID for idempotency. + RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MigrateToWorkflowRequest) Reset() { + *x = MigrateToWorkflowRequest{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MigrateToWorkflowRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrateToWorkflowRequest) ProtoMessage() {} + +func (x *MigrateToWorkflowRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrateToWorkflowRequest.ProtoReflect.Descriptor instead. +func (*MigrateToWorkflowRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{16} +} + +func (x *MigrateToWorkflowRequest) GetNamespaceId() string { + if x != nil { + return x.NamespaceId + } + return "" +} + +func (x *MigrateToWorkflowRequest) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +func (x *MigrateToWorkflowRequest) GetIdentity() string { + if x != nil { + return x.Identity + } + return "" +} + +func (x *MigrateToWorkflowRequest) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +type MigrateToWorkflowResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *MigrateToWorkflowResponse) Reset() { + *x = MigrateToWorkflowResponse{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *MigrateToWorkflowResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MigrateToWorkflowResponse) ProtoMessage() {} + +func (x *MigrateToWorkflowResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MigrateToWorkflowResponse.ProtoReflect.Descriptor instead. +func (*MigrateToWorkflowResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP(), []int{17} +} + +var File_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDesc = "" + + "\n" + + "Ctemporal/server/chasm/lib/scheduler/proto/v1/request_response.proto\x12,temporal.server.chasm.lib.scheduler.proto.v1\x1a:temporal/server/chasm/lib/scheduler/proto/v1/message.proto\x1a6temporal/api/workflowservice/v1/request_response.proto\"\x9d\x01\n" + + "\x15CreateScheduleRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12a\n" + + "\x10frontend_request\x18\x02 \x01(\v26.temporal.api.workflowservice.v1.CreateScheduleRequestR\x0ffrontendRequest\"~\n" + + "\x16CreateScheduleResponse\x12d\n" + + "\x11frontend_response\x18\x01 \x01(\v27.temporal.api.workflowservice.v1.CreateScheduleResponseR\x10frontendResponse\"\x9d\x01\n" + + "\x15UpdateScheduleRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12a\n" + + "\x10frontend_request\x18\x02 \x01(\v26.temporal.api.workflowservice.v1.UpdateScheduleRequestR\x0ffrontendRequest\"~\n" + + "\x16UpdateScheduleResponse\x12d\n" + + "\x11frontend_response\x18\x01 \x01(\v27.temporal.api.workflowservice.v1.UpdateScheduleResponseR\x10frontendResponse\"\x9b\x01\n" + + "\x14PatchScheduleRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12`\n" + + "\x10frontend_request\x18\x02 \x01(\v25.temporal.api.workflowservice.v1.PatchScheduleRequestR\x0ffrontendRequest\"|\n" + + "\x15PatchScheduleResponse\x12c\n" + + "\x11frontend_response\x18\x01 \x01(\v26.temporal.api.workflowservice.v1.PatchScheduleResponseR\x10frontendResponse\"\x9d\x01\n" + + "\x15DeleteScheduleRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12a\n" + + "\x10frontend_request\x18\x02 \x01(\v26.temporal.api.workflowservice.v1.DeleteScheduleRequestR\x0ffrontendRequest\"~\n" + + "\x16DeleteScheduleResponse\x12d\n" + + "\x11frontend_response\x18\x01 \x01(\v27.temporal.api.workflowservice.v1.DeleteScheduleResponseR\x10frontendResponse\"\xa1\x01\n" + + "\x17DescribeScheduleRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12c\n" + + "\x10frontend_request\x18\x02 \x01(\v28.temporal.api.workflowservice.v1.DescribeScheduleRequestR\x0ffrontendRequest\"\x82\x01\n" + + "\x18DescribeScheduleResponse\x12f\n" + + "\x11frontend_response\x18\x01 \x01(\v29.temporal.api.workflowservice.v1.DescribeScheduleResponseR\x10frontendResponse\"\xb3\x01\n" + + " ListScheduleMatchingTimesRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12l\n" + + "\x10frontend_request\x18\x02 \x01(\v2A.temporal.api.workflowservice.v1.ListScheduleMatchingTimesRequestR\x0ffrontendRequest\"\x94\x01\n" + + "!ListScheduleMatchingTimesResponse\x12o\n" + + "\x11frontend_response\x18\x01 \x01(\v2B.temporal.api.workflowservice.v1.ListScheduleMatchingTimesResponseR\x10frontendResponse\"\xa1\x01\n" + + "\x1fCreateFromMigrationStateRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12[\n" + + "\x05state\x18\x02 \x01(\v2E.temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationStateR\x05state\"\"\n" + + " CreateFromMigrationStateResponse\"y\n" + + "\x15CreateSentinelRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1c\n" + + "\tnamespace\x18\x02 \x01(\tR\tnamespace\x12\x1f\n" + + "\vschedule_id\x18\x03 \x01(\tR\n" + + "scheduleId\"\x18\n" + + "\x16CreateSentinelResponse\"\x99\x01\n" + + "\x18MigrateToWorkflowRequest\x12!\n" + + "\fnamespace_id\x18\x01 \x01(\tR\vnamespaceId\x12\x1f\n" + + "\vschedule_id\x18\x02 \x01(\tR\n" + + "scheduleId\x12\x1a\n" + + "\bidentity\x18\x03 \x01(\tR\bidentity\x12\x1d\n" + + "\n" + + "request_id\x18\x04 \x01(\tR\trequestId\"\x1b\n" + + "\x19MigrateToWorkflowResponseBGZEgo.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb;schedulerpbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDescData +} + +var file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 18) +var file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_goTypes = []any{ + (*CreateScheduleRequest)(nil), // 0: temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleRequest + (*CreateScheduleResponse)(nil), // 1: temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleResponse + (*UpdateScheduleRequest)(nil), // 2: temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleRequest + (*UpdateScheduleResponse)(nil), // 3: temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleResponse + (*PatchScheduleRequest)(nil), // 4: temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleRequest + (*PatchScheduleResponse)(nil), // 5: temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleResponse + (*DeleteScheduleRequest)(nil), // 6: temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleRequest + (*DeleteScheduleResponse)(nil), // 7: temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleResponse + (*DescribeScheduleRequest)(nil), // 8: temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleRequest + (*DescribeScheduleResponse)(nil), // 9: temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleResponse + (*ListScheduleMatchingTimesRequest)(nil), // 10: temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesRequest + (*ListScheduleMatchingTimesResponse)(nil), // 11: temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesResponse + (*CreateFromMigrationStateRequest)(nil), // 12: temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateRequest + (*CreateFromMigrationStateResponse)(nil), // 13: temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateResponse + (*CreateSentinelRequest)(nil), // 14: temporal.server.chasm.lib.scheduler.proto.v1.CreateSentinelRequest + (*CreateSentinelResponse)(nil), // 15: temporal.server.chasm.lib.scheduler.proto.v1.CreateSentinelResponse + (*MigrateToWorkflowRequest)(nil), // 16: temporal.server.chasm.lib.scheduler.proto.v1.MigrateToWorkflowRequest + (*MigrateToWorkflowResponse)(nil), // 17: temporal.server.chasm.lib.scheduler.proto.v1.MigrateToWorkflowResponse + (*v1.CreateScheduleRequest)(nil), // 18: temporal.api.workflowservice.v1.CreateScheduleRequest + (*v1.CreateScheduleResponse)(nil), // 19: temporal.api.workflowservice.v1.CreateScheduleResponse + (*v1.UpdateScheduleRequest)(nil), // 20: temporal.api.workflowservice.v1.UpdateScheduleRequest + (*v1.UpdateScheduleResponse)(nil), // 21: temporal.api.workflowservice.v1.UpdateScheduleResponse + (*v1.PatchScheduleRequest)(nil), // 22: temporal.api.workflowservice.v1.PatchScheduleRequest + (*v1.PatchScheduleResponse)(nil), // 23: temporal.api.workflowservice.v1.PatchScheduleResponse + (*v1.DeleteScheduleRequest)(nil), // 24: temporal.api.workflowservice.v1.DeleteScheduleRequest + (*v1.DeleteScheduleResponse)(nil), // 25: temporal.api.workflowservice.v1.DeleteScheduleResponse + (*v1.DescribeScheduleRequest)(nil), // 26: temporal.api.workflowservice.v1.DescribeScheduleRequest + (*v1.DescribeScheduleResponse)(nil), // 27: temporal.api.workflowservice.v1.DescribeScheduleResponse + (*v1.ListScheduleMatchingTimesRequest)(nil), // 28: temporal.api.workflowservice.v1.ListScheduleMatchingTimesRequest + (*v1.ListScheduleMatchingTimesResponse)(nil), // 29: temporal.api.workflowservice.v1.ListScheduleMatchingTimesResponse + (*SchedulerMigrationState)(nil), // 30: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState +} +var file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_depIdxs = []int32{ + 18, // 0: temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.CreateScheduleRequest + 19, // 1: temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.CreateScheduleResponse + 20, // 2: temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.UpdateScheduleRequest + 21, // 3: temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.UpdateScheduleResponse + 22, // 4: temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.PatchScheduleRequest + 23, // 5: temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.PatchScheduleResponse + 24, // 6: temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.DeleteScheduleRequest + 25, // 7: temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.DeleteScheduleResponse + 26, // 8: temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.DescribeScheduleRequest + 27, // 9: temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.DescribeScheduleResponse + 28, // 10: temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesRequest.frontend_request:type_name -> temporal.api.workflowservice.v1.ListScheduleMatchingTimesRequest + 29, // 11: temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesResponse.frontend_response:type_name -> temporal.api.workflowservice.v1.ListScheduleMatchingTimesResponse + 30, // 12: temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateRequest.state:type_name -> temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrationState + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_init() } +func file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_init() { + if File_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto != nil { + return + } + file_temporal_server_chasm_lib_scheduler_proto_v1_message_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 18, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto = out.File + file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_goTypes = nil + file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_depIdxs = nil +} diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/service.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/service.pb.go new file mode 100644 index 00000000000..be6244cb83d --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/service.pb.go @@ -0,0 +1,110 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/scheduler/proto/v1/service.proto + +package schedulerpb + +import ( + reflect "reflect" + unsafe "unsafe" + + _ "go.temporal.io/server/api/common/v1" + _ "go.temporal.io/server/api/routing/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_temporal_server_chasm_lib_scheduler_proto_v1_service_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_rawDesc = "" + + "\n" + + ":temporal/server/chasm/lib/scheduler/proto/v1/service.proto\x12,temporal.server.chasm.lib.scheduler.proto.v1\x1aCtemporal/server/chasm/lib/scheduler/proto/v1/request_response.proto\x1a0temporal/server/api/common/v1/api_category.proto\x1a.temporal/server/api/routing/v1/extension.proto2\xb6\x0e\n" + + "\x10SchedulerService\x12\xc5\x01\n" + + "\x0eCreateSchedule\x12C.temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleRequest\x1aD.temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.schedule_id\x12\xc5\x01\n" + + "\x0eUpdateSchedule\x12C.temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleRequest\x1aD.temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.schedule_id\x12\xc2\x01\n" + + "\rPatchSchedule\x12B.temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleRequest\x1aC.temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.schedule_id\x12\xc5\x01\n" + + "\x0eDeleteSchedule\x12C.temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleRequest\x1aD.temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.schedule_id\x12\xcb\x01\n" + + "\x10DescribeSchedule\x12E.temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleRequest\x1aF.temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.schedule_id\x12\xe6\x01\n" + + "\x19ListScheduleMatchingTimes\x12N.temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesRequest\x1aO.temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesResponse\"(\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x1e\x1a\x1cfrontend_request.schedule_id\x12\xe2\x01\n" + + "\x18CreateFromMigrationState\x12M.temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateRequest\x1aN.temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateResponse\"'\xd2\xc3\x18#\x1a!state.scheduler_state.schedule_id\x12\xae\x01\n" + + "\x0eCreateSentinel\x12C.temporal.server.chasm.lib.scheduler.proto.v1.CreateSentinelRequest\x1aD.temporal.server.chasm.lib.scheduler.proto.v1.CreateSentinelResponse\"\x11\xd2\xc3\x18\r\x1a\vschedule_id\x12\xb7\x01\n" + + "\x11MigrateToWorkflow\x12F.temporal.server.chasm.lib.scheduler.proto.v1.MigrateToWorkflowRequest\x1aG.temporal.server.chasm.lib.scheduler.proto.v1.MigrateToWorkflowResponse\"\x11\xd2\xc3\x18\r\x1a\vschedule_idBGZEgo.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb;schedulerpbb\x06proto3" + +var file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_goTypes = []any{ + (*CreateScheduleRequest)(nil), // 0: temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleRequest + (*UpdateScheduleRequest)(nil), // 1: temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleRequest + (*PatchScheduleRequest)(nil), // 2: temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleRequest + (*DeleteScheduleRequest)(nil), // 3: temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleRequest + (*DescribeScheduleRequest)(nil), // 4: temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleRequest + (*ListScheduleMatchingTimesRequest)(nil), // 5: temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesRequest + (*CreateFromMigrationStateRequest)(nil), // 6: temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateRequest + (*CreateSentinelRequest)(nil), // 7: temporal.server.chasm.lib.scheduler.proto.v1.CreateSentinelRequest + (*MigrateToWorkflowRequest)(nil), // 8: temporal.server.chasm.lib.scheduler.proto.v1.MigrateToWorkflowRequest + (*CreateScheduleResponse)(nil), // 9: temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleResponse + (*UpdateScheduleResponse)(nil), // 10: temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleResponse + (*PatchScheduleResponse)(nil), // 11: temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleResponse + (*DeleteScheduleResponse)(nil), // 12: temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleResponse + (*DescribeScheduleResponse)(nil), // 13: temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleResponse + (*ListScheduleMatchingTimesResponse)(nil), // 14: temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesResponse + (*CreateFromMigrationStateResponse)(nil), // 15: temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateResponse + (*CreateSentinelResponse)(nil), // 16: temporal.server.chasm.lib.scheduler.proto.v1.CreateSentinelResponse + (*MigrateToWorkflowResponse)(nil), // 17: temporal.server.chasm.lib.scheduler.proto.v1.MigrateToWorkflowResponse +} +var file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_depIdxs = []int32{ + 0, // 0: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.CreateSchedule:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleRequest + 1, // 1: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.UpdateSchedule:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleRequest + 2, // 2: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.PatchSchedule:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleRequest + 3, // 3: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.DeleteSchedule:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleRequest + 4, // 4: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.DescribeSchedule:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleRequest + 5, // 5: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.ListScheduleMatchingTimes:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesRequest + 6, // 6: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.CreateFromMigrationState:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateRequest + 7, // 7: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.CreateSentinel:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.CreateSentinelRequest + 8, // 8: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.MigrateToWorkflow:input_type -> temporal.server.chasm.lib.scheduler.proto.v1.MigrateToWorkflowRequest + 9, // 9: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.CreateSchedule:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.CreateScheduleResponse + 10, // 10: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.UpdateSchedule:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.UpdateScheduleResponse + 11, // 11: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.PatchSchedule:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.PatchScheduleResponse + 12, // 12: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.DeleteSchedule:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.DeleteScheduleResponse + 13, // 13: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.DescribeSchedule:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.DescribeScheduleResponse + 14, // 14: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.ListScheduleMatchingTimes:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.ListScheduleMatchingTimesResponse + 15, // 15: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.CreateFromMigrationState:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.CreateFromMigrationStateResponse + 16, // 16: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.CreateSentinel:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.CreateSentinelResponse + 17, // 17: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService.MigrateToWorkflow:output_type -> temporal.server.chasm.lib.scheduler.proto.v1.MigrateToWorkflowResponse + 9, // [9:18] is the sub-list for method output_type + 0, // [0:9] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_init() } +func file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_init() { + if File_temporal_server_chasm_lib_scheduler_proto_v1_service_proto != nil { + return + } + file_temporal_server_chasm_lib_scheduler_proto_v1_request_response_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_rawDesc), len(file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_depIdxs, + }.Build() + File_temporal_server_chasm_lib_scheduler_proto_v1_service_proto = out.File + file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_goTypes = nil + file_temporal_server_chasm_lib_scheduler_proto_v1_service_proto_depIdxs = nil +} diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/service_client.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/service_client.pb.go new file mode 100644 index 00000000000..571b68416a0 --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/service_client.pb.go @@ -0,0 +1,447 @@ +// Code generated by protoc-gen-go-chasm. DO NOT EDIT. +package schedulerpb + +import ( + "context" + "time" + + "go.temporal.io/server/client/history" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives" + "google.golang.org/grpc" +) + +// SchedulerServiceLayeredClient is a client for SchedulerService. +type SchedulerServiceLayeredClient struct { + metricsHandler metrics.Handler + numShards int32 + redirector history.Redirector[SchedulerServiceClient] + retryPolicy backoff.RetryPolicy +} + +// NewSchedulerServiceLayeredClient initializes a new SchedulerServiceLayeredClient. +func NewSchedulerServiceLayeredClient( + dc *dynamicconfig.Collection, + rpcFactory common.RPCFactory, + monitor membership.Monitor, + config *config.Persistence, + logger log.Logger, + metricsHandler metrics.Handler, +) (SchedulerServiceClient, error) { + resolver, err := monitor.GetResolver(primitives.HistoryService) + if err != nil { + return nil, err + } + connections := history.NewConnectionPool(resolver, rpcFactory, NewSchedulerServiceClient) + var redirector history.Redirector[SchedulerServiceClient] + if dynamicconfig.HistoryClientOwnershipCachingEnabled.Get(dc)() { + redirector = history.NewCachingRedirector( + connections, + resolver, + logger, + dynamicconfig.HistoryClientOwnershipCachingStaleTTL.Get(dc), + ) + } else { + redirector = history.NewBasicRedirector(connections, resolver) + } + return &SchedulerServiceLayeredClient{ + metricsHandler: metricsHandler, + redirector: redirector, + numShards: config.NumHistoryShards, + retryPolicy: common.CreateHistoryClientRetryPolicy(), + }, nil +} +func (c *SchedulerServiceLayeredClient) callCreateScheduleNoRetry( + ctx context.Context, + request *CreateScheduleRequest, + opts ...grpc.CallOption, +) (*CreateScheduleResponse, error) { + var response *CreateScheduleResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.CreateSchedule"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.CreateSchedule(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) CreateSchedule( + ctx context.Context, + request *CreateScheduleRequest, + opts ...grpc.CallOption, +) (*CreateScheduleResponse, error) { + call := func(ctx context.Context) (*CreateScheduleResponse, error) { + return c.callCreateScheduleNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *SchedulerServiceLayeredClient) callUpdateScheduleNoRetry( + ctx context.Context, + request *UpdateScheduleRequest, + opts ...grpc.CallOption, +) (*UpdateScheduleResponse, error) { + var response *UpdateScheduleResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.UpdateSchedule"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.UpdateSchedule(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) UpdateSchedule( + ctx context.Context, + request *UpdateScheduleRequest, + opts ...grpc.CallOption, +) (*UpdateScheduleResponse, error) { + call := func(ctx context.Context) (*UpdateScheduleResponse, error) { + return c.callUpdateScheduleNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *SchedulerServiceLayeredClient) callPatchScheduleNoRetry( + ctx context.Context, + request *PatchScheduleRequest, + opts ...grpc.CallOption, +) (*PatchScheduleResponse, error) { + var response *PatchScheduleResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.PatchSchedule"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.PatchSchedule(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) PatchSchedule( + ctx context.Context, + request *PatchScheduleRequest, + opts ...grpc.CallOption, +) (*PatchScheduleResponse, error) { + call := func(ctx context.Context) (*PatchScheduleResponse, error) { + return c.callPatchScheduleNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *SchedulerServiceLayeredClient) callDeleteScheduleNoRetry( + ctx context.Context, + request *DeleteScheduleRequest, + opts ...grpc.CallOption, +) (*DeleteScheduleResponse, error) { + var response *DeleteScheduleResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.DeleteSchedule"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.DeleteSchedule(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) DeleteSchedule( + ctx context.Context, + request *DeleteScheduleRequest, + opts ...grpc.CallOption, +) (*DeleteScheduleResponse, error) { + call := func(ctx context.Context) (*DeleteScheduleResponse, error) { + return c.callDeleteScheduleNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *SchedulerServiceLayeredClient) callDescribeScheduleNoRetry( + ctx context.Context, + request *DescribeScheduleRequest, + opts ...grpc.CallOption, +) (*DescribeScheduleResponse, error) { + var response *DescribeScheduleResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.DescribeSchedule"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.DescribeSchedule(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) DescribeSchedule( + ctx context.Context, + request *DescribeScheduleRequest, + opts ...grpc.CallOption, +) (*DescribeScheduleResponse, error) { + call := func(ctx context.Context) (*DescribeScheduleResponse, error) { + return c.callDescribeScheduleNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *SchedulerServiceLayeredClient) callListScheduleMatchingTimesNoRetry( + ctx context.Context, + request *ListScheduleMatchingTimesRequest, + opts ...grpc.CallOption, +) (*ListScheduleMatchingTimesResponse, error) { + var response *ListScheduleMatchingTimesResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.ListScheduleMatchingTimes"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetFrontendRequest().GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.ListScheduleMatchingTimes(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) ListScheduleMatchingTimes( + ctx context.Context, + request *ListScheduleMatchingTimesRequest, + opts ...grpc.CallOption, +) (*ListScheduleMatchingTimesResponse, error) { + call := func(ctx context.Context) (*ListScheduleMatchingTimesResponse, error) { + return c.callListScheduleMatchingTimesNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *SchedulerServiceLayeredClient) callCreateFromMigrationStateNoRetry( + ctx context.Context, + request *CreateFromMigrationStateRequest, + opts ...grpc.CallOption, +) (*CreateFromMigrationStateResponse, error) { + var response *CreateFromMigrationStateResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.CreateFromMigrationState"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetState().GetSchedulerState().GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.CreateFromMigrationState(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) CreateFromMigrationState( + ctx context.Context, + request *CreateFromMigrationStateRequest, + opts ...grpc.CallOption, +) (*CreateFromMigrationStateResponse, error) { + call := func(ctx context.Context) (*CreateFromMigrationStateResponse, error) { + return c.callCreateFromMigrationStateNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *SchedulerServiceLayeredClient) callCreateSentinelNoRetry( + ctx context.Context, + request *CreateSentinelRequest, + opts ...grpc.CallOption, +) (*CreateSentinelResponse, error) { + var response *CreateSentinelResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.CreateSentinel"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.CreateSentinel(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) CreateSentinel( + ctx context.Context, + request *CreateSentinelRequest, + opts ...grpc.CallOption, +) (*CreateSentinelResponse, error) { + call := func(ctx context.Context) (*CreateSentinelResponse, error) { + return c.callCreateSentinelNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} +func (c *SchedulerServiceLayeredClient) callMigrateToWorkflowNoRetry( + ctx context.Context, + request *MigrateToWorkflowRequest, + opts ...grpc.CallOption, +) (*MigrateToWorkflowResponse, error) { + var response *MigrateToWorkflowResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("SchedulerService.MigrateToWorkflow"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := common.WorkflowIDToHistoryShard(request.GetNamespaceId(), request.GetScheduleId(), c.numShards) + op := func(ctx context.Context, client SchedulerServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.MigrateToWorkflow(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *SchedulerServiceLayeredClient) MigrateToWorkflow( + ctx context.Context, + request *MigrateToWorkflowRequest, + opts ...grpc.CallOption, +) (*MigrateToWorkflowResponse, error) { + call := func(ctx context.Context) (*MigrateToWorkflowResponse, error) { + return c.callMigrateToWorkflowNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/service_grpc.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/service_grpc.pb.go new file mode 100644 index 00000000000..a4b5845bbff --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/service_grpc.pb.go @@ -0,0 +1,406 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// plugins: +// - protoc-gen-go-grpc +// - protoc +// source: temporal/server/chasm/lib/scheduler/proto/v1/service.proto + +package schedulerpb + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + SchedulerService_CreateSchedule_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/CreateSchedule" + SchedulerService_UpdateSchedule_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/UpdateSchedule" + SchedulerService_PatchSchedule_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/PatchSchedule" + SchedulerService_DeleteSchedule_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/DeleteSchedule" + SchedulerService_DescribeSchedule_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/DescribeSchedule" + SchedulerService_ListScheduleMatchingTimes_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/ListScheduleMatchingTimes" + SchedulerService_CreateFromMigrationState_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/CreateFromMigrationState" + SchedulerService_CreateSentinel_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/CreateSentinel" + SchedulerService_MigrateToWorkflow_FullMethodName = "/temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService/MigrateToWorkflow" +) + +// SchedulerServiceClient is the client API for SchedulerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SchedulerServiceClient interface { + CreateSchedule(ctx context.Context, in *CreateScheduleRequest, opts ...grpc.CallOption) (*CreateScheduleResponse, error) + UpdateSchedule(ctx context.Context, in *UpdateScheduleRequest, opts ...grpc.CallOption) (*UpdateScheduleResponse, error) + PatchSchedule(ctx context.Context, in *PatchScheduleRequest, opts ...grpc.CallOption) (*PatchScheduleResponse, error) + DeleteSchedule(ctx context.Context, in *DeleteScheduleRequest, opts ...grpc.CallOption) (*DeleteScheduleResponse, error) + DescribeSchedule(ctx context.Context, in *DescribeScheduleRequest, opts ...grpc.CallOption) (*DescribeScheduleResponse, error) + ListScheduleMatchingTimes(ctx context.Context, in *ListScheduleMatchingTimesRequest, opts ...grpc.CallOption) (*ListScheduleMatchingTimesResponse, error) + CreateFromMigrationState(ctx context.Context, in *CreateFromMigrationStateRequest, opts ...grpc.CallOption) (*CreateFromMigrationStateResponse, error) + CreateSentinel(ctx context.Context, in *CreateSentinelRequest, opts ...grpc.CallOption) (*CreateSentinelResponse, error) + MigrateToWorkflow(ctx context.Context, in *MigrateToWorkflowRequest, opts ...grpc.CallOption) (*MigrateToWorkflowResponse, error) +} + +type schedulerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewSchedulerServiceClient(cc grpc.ClientConnInterface) SchedulerServiceClient { + return &schedulerServiceClient{cc} +} + +func (c *schedulerServiceClient) CreateSchedule(ctx context.Context, in *CreateScheduleRequest, opts ...grpc.CallOption) (*CreateScheduleResponse, error) { + out := new(CreateScheduleResponse) + err := c.cc.Invoke(ctx, SchedulerService_CreateSchedule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) UpdateSchedule(ctx context.Context, in *UpdateScheduleRequest, opts ...grpc.CallOption) (*UpdateScheduleResponse, error) { + out := new(UpdateScheduleResponse) + err := c.cc.Invoke(ctx, SchedulerService_UpdateSchedule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) PatchSchedule(ctx context.Context, in *PatchScheduleRequest, opts ...grpc.CallOption) (*PatchScheduleResponse, error) { + out := new(PatchScheduleResponse) + err := c.cc.Invoke(ctx, SchedulerService_PatchSchedule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) DeleteSchedule(ctx context.Context, in *DeleteScheduleRequest, opts ...grpc.CallOption) (*DeleteScheduleResponse, error) { + out := new(DeleteScheduleResponse) + err := c.cc.Invoke(ctx, SchedulerService_DeleteSchedule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) DescribeSchedule(ctx context.Context, in *DescribeScheduleRequest, opts ...grpc.CallOption) (*DescribeScheduleResponse, error) { + out := new(DescribeScheduleResponse) + err := c.cc.Invoke(ctx, SchedulerService_DescribeSchedule_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) ListScheduleMatchingTimes(ctx context.Context, in *ListScheduleMatchingTimesRequest, opts ...grpc.CallOption) (*ListScheduleMatchingTimesResponse, error) { + out := new(ListScheduleMatchingTimesResponse) + err := c.cc.Invoke(ctx, SchedulerService_ListScheduleMatchingTimes_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) CreateFromMigrationState(ctx context.Context, in *CreateFromMigrationStateRequest, opts ...grpc.CallOption) (*CreateFromMigrationStateResponse, error) { + out := new(CreateFromMigrationStateResponse) + err := c.cc.Invoke(ctx, SchedulerService_CreateFromMigrationState_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) CreateSentinel(ctx context.Context, in *CreateSentinelRequest, opts ...grpc.CallOption) (*CreateSentinelResponse, error) { + out := new(CreateSentinelResponse) + err := c.cc.Invoke(ctx, SchedulerService_CreateSentinel_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *schedulerServiceClient) MigrateToWorkflow(ctx context.Context, in *MigrateToWorkflowRequest, opts ...grpc.CallOption) (*MigrateToWorkflowResponse, error) { + out := new(MigrateToWorkflowResponse) + err := c.cc.Invoke(ctx, SchedulerService_MigrateToWorkflow_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SchedulerServiceServer is the server API for SchedulerService service. +// All implementations must embed UnimplementedSchedulerServiceServer +// for forward compatibility +type SchedulerServiceServer interface { + CreateSchedule(context.Context, *CreateScheduleRequest) (*CreateScheduleResponse, error) + UpdateSchedule(context.Context, *UpdateScheduleRequest) (*UpdateScheduleResponse, error) + PatchSchedule(context.Context, *PatchScheduleRequest) (*PatchScheduleResponse, error) + DeleteSchedule(context.Context, *DeleteScheduleRequest) (*DeleteScheduleResponse, error) + DescribeSchedule(context.Context, *DescribeScheduleRequest) (*DescribeScheduleResponse, error) + ListScheduleMatchingTimes(context.Context, *ListScheduleMatchingTimesRequest) (*ListScheduleMatchingTimesResponse, error) + CreateFromMigrationState(context.Context, *CreateFromMigrationStateRequest) (*CreateFromMigrationStateResponse, error) + CreateSentinel(context.Context, *CreateSentinelRequest) (*CreateSentinelResponse, error) + MigrateToWorkflow(context.Context, *MigrateToWorkflowRequest) (*MigrateToWorkflowResponse, error) + mustEmbedUnimplementedSchedulerServiceServer() +} + +// UnimplementedSchedulerServiceServer must be embedded to have forward compatible implementations. +type UnimplementedSchedulerServiceServer struct { +} + +func (UnimplementedSchedulerServiceServer) CreateSchedule(context.Context, *CreateScheduleRequest) (*CreateScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSchedule not implemented") +} +func (UnimplementedSchedulerServiceServer) UpdateSchedule(context.Context, *UpdateScheduleRequest) (*UpdateScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSchedule not implemented") +} +func (UnimplementedSchedulerServiceServer) PatchSchedule(context.Context, *PatchScheduleRequest) (*PatchScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PatchSchedule not implemented") +} +func (UnimplementedSchedulerServiceServer) DeleteSchedule(context.Context, *DeleteScheduleRequest) (*DeleteScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSchedule not implemented") +} +func (UnimplementedSchedulerServiceServer) DescribeSchedule(context.Context, *DescribeScheduleRequest) (*DescribeScheduleResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DescribeSchedule not implemented") +} +func (UnimplementedSchedulerServiceServer) ListScheduleMatchingTimes(context.Context, *ListScheduleMatchingTimesRequest) (*ListScheduleMatchingTimesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListScheduleMatchingTimes not implemented") +} +func (UnimplementedSchedulerServiceServer) CreateFromMigrationState(context.Context, *CreateFromMigrationStateRequest) (*CreateFromMigrationStateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateFromMigrationState not implemented") +} +func (UnimplementedSchedulerServiceServer) CreateSentinel(context.Context, *CreateSentinelRequest) (*CreateSentinelResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSentinel not implemented") +} +func (UnimplementedSchedulerServiceServer) MigrateToWorkflow(context.Context, *MigrateToWorkflowRequest) (*MigrateToWorkflowResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method MigrateToWorkflow not implemented") +} +func (UnimplementedSchedulerServiceServer) mustEmbedUnimplementedSchedulerServiceServer() {} + +// UnsafeSchedulerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SchedulerServiceServer will +// result in compilation errors. +type UnsafeSchedulerServiceServer interface { + mustEmbedUnimplementedSchedulerServiceServer() +} + +func RegisterSchedulerServiceServer(s grpc.ServiceRegistrar, srv SchedulerServiceServer) { + s.RegisterService(&SchedulerService_ServiceDesc, srv) +} + +func _SchedulerService_CreateSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateScheduleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).CreateSchedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_CreateSchedule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).CreateSchedule(ctx, req.(*CreateScheduleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_UpdateSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateScheduleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).UpdateSchedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_UpdateSchedule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).UpdateSchedule(ctx, req.(*UpdateScheduleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_PatchSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PatchScheduleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).PatchSchedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_PatchSchedule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).PatchSchedule(ctx, req.(*PatchScheduleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_DeleteSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteScheduleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).DeleteSchedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_DeleteSchedule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).DeleteSchedule(ctx, req.(*DeleteScheduleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_DescribeSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DescribeScheduleRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).DescribeSchedule(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_DescribeSchedule_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).DescribeSchedule(ctx, req.(*DescribeScheduleRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_ListScheduleMatchingTimes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListScheduleMatchingTimesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).ListScheduleMatchingTimes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_ListScheduleMatchingTimes_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).ListScheduleMatchingTimes(ctx, req.(*ListScheduleMatchingTimesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_CreateFromMigrationState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateFromMigrationStateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).CreateFromMigrationState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_CreateFromMigrationState_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).CreateFromMigrationState(ctx, req.(*CreateFromMigrationStateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_CreateSentinel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSentinelRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).CreateSentinel(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_CreateSentinel_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).CreateSentinel(ctx, req.(*CreateSentinelRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SchedulerService_MigrateToWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MigrateToWorkflowRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SchedulerServiceServer).MigrateToWorkflow(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: SchedulerService_MigrateToWorkflow_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SchedulerServiceServer).MigrateToWorkflow(ctx, req.(*MigrateToWorkflowRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// SchedulerService_ServiceDesc is the grpc.ServiceDesc for SchedulerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var SchedulerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "temporal.server.chasm.lib.scheduler.proto.v1.SchedulerService", + HandlerType: (*SchedulerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSchedule", + Handler: _SchedulerService_CreateSchedule_Handler, + }, + { + MethodName: "UpdateSchedule", + Handler: _SchedulerService_UpdateSchedule_Handler, + }, + { + MethodName: "PatchSchedule", + Handler: _SchedulerService_PatchSchedule_Handler, + }, + { + MethodName: "DeleteSchedule", + Handler: _SchedulerService_DeleteSchedule_Handler, + }, + { + MethodName: "DescribeSchedule", + Handler: _SchedulerService_DescribeSchedule_Handler, + }, + { + MethodName: "ListScheduleMatchingTimes", + Handler: _SchedulerService_ListScheduleMatchingTimes_Handler, + }, + { + MethodName: "CreateFromMigrationState", + Handler: _SchedulerService_CreateFromMigrationState_Handler, + }, + { + MethodName: "CreateSentinel", + Handler: _SchedulerService_CreateSentinel_Handler, + }, + { + MethodName: "MigrateToWorkflow", + Handler: _SchedulerService_MigrateToWorkflow_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "temporal/server/chasm/lib/scheduler/proto/v1/service.proto", +} diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/tasks.go-helpers.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/tasks.go-helpers.pb.go new file mode 100644 index 00000000000..16df8371800 --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/tasks.go-helpers.pb.go @@ -0,0 +1,265 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package schedulerpb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type SchedulerIdleTask to the protobuf v3 wire format +func (val *SchedulerIdleTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SchedulerIdleTask from the protobuf v3 wire format +func (val *SchedulerIdleTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SchedulerIdleTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SchedulerIdleTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SchedulerIdleTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SchedulerIdleTask + switch t := that.(type) { + case *SchedulerIdleTask: + that1 = t + case SchedulerIdleTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SchedulerCallbacksTask to the protobuf v3 wire format +func (val *SchedulerCallbacksTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SchedulerCallbacksTask from the protobuf v3 wire format +func (val *SchedulerCallbacksTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SchedulerCallbacksTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SchedulerCallbacksTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SchedulerCallbacksTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SchedulerCallbacksTask + switch t := that.(type) { + case *SchedulerCallbacksTask: + that1 = t + case SchedulerCallbacksTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type GeneratorTask to the protobuf v3 wire format +func (val *GeneratorTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type GeneratorTask from the protobuf v3 wire format +func (val *GeneratorTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *GeneratorTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two GeneratorTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *GeneratorTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *GeneratorTask + switch t := that.(type) { + case *GeneratorTask: + that1 = t + case GeneratorTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InvokerProcessBufferTask to the protobuf v3 wire format +func (val *InvokerProcessBufferTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InvokerProcessBufferTask from the protobuf v3 wire format +func (val *InvokerProcessBufferTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InvokerProcessBufferTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InvokerProcessBufferTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InvokerProcessBufferTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InvokerProcessBufferTask + switch t := that.(type) { + case *InvokerProcessBufferTask: + that1 = t + case InvokerProcessBufferTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type InvokerExecuteTask to the protobuf v3 wire format +func (val *InvokerExecuteTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type InvokerExecuteTask from the protobuf v3 wire format +func (val *InvokerExecuteTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *InvokerExecuteTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two InvokerExecuteTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *InvokerExecuteTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *InvokerExecuteTask + switch t := that.(type) { + case *InvokerExecuteTask: + that1 = t + case InvokerExecuteTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type BackfillerTask to the protobuf v3 wire format +func (val *BackfillerTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type BackfillerTask from the protobuf v3 wire format +func (val *BackfillerTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *BackfillerTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two BackfillerTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *BackfillerTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *BackfillerTask + switch t := that.(type) { + case *BackfillerTask: + that1 = t + case BackfillerTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type SchedulerMigrateToWorkflowTask to the protobuf v3 wire format +func (val *SchedulerMigrateToWorkflowTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type SchedulerMigrateToWorkflowTask from the protobuf v3 wire format +func (val *SchedulerMigrateToWorkflowTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *SchedulerMigrateToWorkflowTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two SchedulerMigrateToWorkflowTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *SchedulerMigrateToWorkflowTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *SchedulerMigrateToWorkflowTask + switch t := that.(type) { + case *SchedulerMigrateToWorkflowTask: + that1 = t + case SchedulerMigrateToWorkflowTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/scheduler/gen/schedulerpb/v1/tasks.pb.go b/chasm/lib/scheduler/gen/schedulerpb/v1/tasks.pb.go new file mode 100644 index 00000000000..3564a6aac35 --- /dev/null +++ b/chasm/lib/scheduler/gen/schedulerpb/v1/tasks.pb.go @@ -0,0 +1,367 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/scheduler/proto/v1/tasks.proto + +package schedulerpb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Fires when the scheduler's idle period has lapsed, and the scheduler should +// be closed. +type SchedulerIdleTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Idle time total is set at time of task creation, so that if the dynamic config key + // controlling idle time changes, task validation will be aware. + IdleTimeTotal *durationpb.Duration `protobuf:"bytes,1,opt,name=idle_time_total,json=idleTimeTotal,proto3" json:"idle_time_total,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SchedulerIdleTask) Reset() { + *x = SchedulerIdleTask{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchedulerIdleTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchedulerIdleTask) ProtoMessage() {} + +func (x *SchedulerIdleTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchedulerIdleTask.ProtoReflect.Descriptor instead. +func (*SchedulerIdleTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescGZIP(), []int{0} +} + +func (x *SchedulerIdleTask) GetIdleTimeTotal() *durationpb.Duration { + if x != nil { + return x.IdleTimeTotal + } + return nil +} + +// Ensures that callbacks for all running buffered starts are attached. Used only +// during migration from V1, as workflows started by CHASM scheduler are started +// with callbacks attached. +type SchedulerCallbacksTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SchedulerCallbacksTask) Reset() { + *x = SchedulerCallbacksTask{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchedulerCallbacksTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchedulerCallbacksTask) ProtoMessage() {} + +func (x *SchedulerCallbacksTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchedulerCallbacksTask.ProtoReflect.Descriptor instead. +func (*SchedulerCallbacksTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescGZIP(), []int{1} +} + +// Buffers actions based on the schedule's specification. +type GeneratorTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GeneratorTask) Reset() { + *x = GeneratorTask{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GeneratorTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneratorTask) ProtoMessage() {} + +func (x *GeneratorTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneratorTask.ProtoReflect.Descriptor instead. +func (*GeneratorTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescGZIP(), []int{2} +} + +// Processes buffered actions, deciding whether to execute, delay, or discard. +type InvokerProcessBufferTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokerProcessBufferTask) Reset() { + *x = InvokerProcessBufferTask{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokerProcessBufferTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokerProcessBufferTask) ProtoMessage() {} + +func (x *InvokerProcessBufferTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokerProcessBufferTask.ProtoReflect.Descriptor instead. +func (*InvokerProcessBufferTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescGZIP(), []int{3} +} + +// Drives execution of pending buffered actions to completion by starting, +// canceling, or terminating workflows. +type InvokerExecuteTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *InvokerExecuteTask) Reset() { + *x = InvokerExecuteTask{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *InvokerExecuteTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InvokerExecuteTask) ProtoMessage() {} + +func (x *InvokerExecuteTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InvokerExecuteTask.ProtoReflect.Descriptor instead. +func (*InvokerExecuteTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescGZIP(), []int{4} +} + +// Buffers actions based on a manually-requested backfill. +type BackfillerTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackfillerTask) Reset() { + *x = BackfillerTask{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackfillerTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackfillerTask) ProtoMessage() {} + +func (x *BackfillerTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackfillerTask.ProtoReflect.Descriptor instead. +func (*BackfillerTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescGZIP(), []int{5} +} + +// Triggers migration from CHASM (V2) to workflow-backed (V1) scheduler. +type SchedulerMigrateToWorkflowTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SchedulerMigrateToWorkflowTask) Reset() { + *x = SchedulerMigrateToWorkflowTask{} + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SchedulerMigrateToWorkflowTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchedulerMigrateToWorkflowTask) ProtoMessage() {} + +func (x *SchedulerMigrateToWorkflowTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchedulerMigrateToWorkflowTask.ProtoReflect.Descriptor instead. +func (*SchedulerMigrateToWorkflowTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescGZIP(), []int{6} +} + +var File_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDesc = "" + + "\n" + + "8temporal/server/chasm/lib/scheduler/proto/v1/tasks.proto\x12,temporal.server.chasm.lib.scheduler.proto.v1\x1a\x1egoogle/protobuf/duration.proto\"V\n" + + "\x11SchedulerIdleTask\x12A\n" + + "\x0fidle_time_total\x18\x01 \x01(\v2\x19.google.protobuf.DurationR\ridleTimeTotal\"\x18\n" + + "\x16SchedulerCallbacksTask\"\x0f\n" + + "\rGeneratorTask\"\x1a\n" + + "\x18InvokerProcessBufferTask\"\x14\n" + + "\x12InvokerExecuteTask\"\x10\n" + + "\x0eBackfillerTask\" \n" + + "\x1eSchedulerMigrateToWorkflowTaskBGZEgo.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb;schedulerpbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDescData +} + +var file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_goTypes = []any{ + (*SchedulerIdleTask)(nil), // 0: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerIdleTask + (*SchedulerCallbacksTask)(nil), // 1: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerCallbacksTask + (*GeneratorTask)(nil), // 2: temporal.server.chasm.lib.scheduler.proto.v1.GeneratorTask + (*InvokerProcessBufferTask)(nil), // 3: temporal.server.chasm.lib.scheduler.proto.v1.InvokerProcessBufferTask + (*InvokerExecuteTask)(nil), // 4: temporal.server.chasm.lib.scheduler.proto.v1.InvokerExecuteTask + (*BackfillerTask)(nil), // 5: temporal.server.chasm.lib.scheduler.proto.v1.BackfillerTask + (*SchedulerMigrateToWorkflowTask)(nil), // 6: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerMigrateToWorkflowTask + (*durationpb.Duration)(nil), // 7: google.protobuf.Duration +} +var file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_depIdxs = []int32{ + 7, // 0: temporal.server.chasm.lib.scheduler.proto.v1.SchedulerIdleTask.idle_time_total:type_name -> google.protobuf.Duration + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_init() } +func file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_init() { + if File_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDesc), len(file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_rawDesc)), + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto = out.File + file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_goTypes = nil + file_temporal_server_chasm_lib_scheduler_proto_v1_tasks_proto_depIdxs = nil +} diff --git a/chasm/lib/scheduler/generator.go b/chasm/lib/scheduler/generator.go new file mode 100644 index 00000000000..028792f884a --- /dev/null +++ b/chasm/lib/scheduler/generator.go @@ -0,0 +1,89 @@ +package scheduler + +import ( + "time" + + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/service/worker/scheduler" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// The Generator component is responsible for buffering actions according +// to the schedule's specification. Manually requested actions (from an immediate +// request or backfill) are separately handled in the Backfiller component. +type Generator struct { + chasm.UnimplementedComponent + + *schedulerpb.GeneratorState + + Scheduler chasm.ParentPtr[*Scheduler] +} + +// NewGenerator returns an initialized Generator component, which should +// be parented under a Scheduler root node. +func NewGenerator(ctx chasm.MutableContext) *Generator { + generator := newGeneratorWithState(ctx, &schedulerpb.GeneratorState{ + LastProcessedTime: nil, + }) + // Kick off initial generator run as an immediate task. + generator.Generate(ctx) + return generator +} + +func newGeneratorWithState(ctx chasm.MutableContext, state *schedulerpb.GeneratorState) *Generator { + generator := &Generator{ + GeneratorState: state, + } + return generator +} + +// Generate immediately kicks off a new GeneratorTask. Used after updating the +// schedule specification. +func (g *Generator) Generate(ctx chasm.MutableContext) { + g.scheduleTask(ctx, chasm.TaskScheduledTimeImmediate) +} + +// scheduleTask schedules a GeneratorTask at the given time. +func (g *Generator) scheduleTask(ctx chasm.MutableContext, scheduledTime time.Time) { + ctx.AddTask(g, chasm.TaskAttributes{ + ScheduledTime: scheduledTime, + }, &schedulerpb.GeneratorTask{}) +} + +func (g *Generator) LifecycleState(ctx chasm.Context) chasm.LifecycleState { + return chasm.LifecycleStateRunning +} + +// UpdateFutureActionTimes computes and stores the next scheduled action times. +func (g *Generator) UpdateFutureActionTimes( + ctx chasm.Context, + specBuilder *scheduler.SpecBuilder, +) { + sched := g.Scheduler.Get(ctx) + spec, err := sched.getCompiledSpec(specBuilder) + if err != nil { + return + } + + count := recentActionCount + if sched.Schedule.State.LimitedActions { + count = min(int(sched.Schedule.State.RemainingActions), recentActionCount) + } + + futureTimes := make([]*timestamppb.Timestamp, 0, count) + // Start from max(now, updateTime) to ensure we skip times before the last update. + t := ctx.Now(g) + if updateTime := sched.Info.GetUpdateTime().AsTime(); updateTime.After(t) { + t = updateTime + } + for len(futureTimes) < count { + t = spec.GetNextTime(sched.jitterSeed(), t).Next + if t.IsZero() { + break + } + futureTimes = append(futureTimes, timestamppb.New(t)) + } + + g.FutureActionTimes = futureTimes +} diff --git a/chasm/lib/scheduler/generator_tasks.go b/chasm/lib/scheduler/generator_tasks.go new file mode 100644 index 00000000000..b88e96952eb --- /dev/null +++ b/chasm/lib/scheduler/generator_tasks.go @@ -0,0 +1,163 @@ +package scheduler + +import ( + "fmt" + + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + queueerrors "go.temporal.io/server/service/history/queues/errors" + "go.temporal.io/server/service/worker/scheduler" + "go.uber.org/fx" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type ( + GeneratorTaskHandlerOptions struct { + fx.In + + Config *Config + MetricsHandler metrics.Handler + BaseLogger log.Logger + SpecProcessor SpecProcessor + SpecBuilder *scheduler.SpecBuilder + } + + GeneratorTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config + metricsHandler metrics.Handler + baseLogger log.Logger + SpecProcessor SpecProcessor + specBuilder *scheduler.SpecBuilder + } +) + +func NewGeneratorTaskHandler(opts GeneratorTaskHandlerOptions) *GeneratorTaskHandler { + return &GeneratorTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + baseLogger: opts.BaseLogger, + SpecProcessor: opts.SpecProcessor, + specBuilder: opts.SpecBuilder, + } +} + +func (g *GeneratorTaskHandler) Execute( + ctx chasm.MutableContext, + generator *Generator, + _ chasm.TaskAttributes, + _ *schedulerpb.GeneratorTask, +) error { + scheduler := generator.Scheduler.Get(ctx) + logger := newTaggedLogger(g.baseLogger, scheduler) + metricsHandler := newTaggedMetricsHandler(g.metricsHandler, scheduler) + invoker := scheduler.Invoker.Get(ctx) + + now := ctx.Now(generator) + + // If we have no last processed time, this is a new schedule. + if generator.LastProcessedTime == nil { + createdAt := timestamppb.New(now) + generator.LastProcessedTime = createdAt + scheduler.Info.CreateTime = createdAt + + g.logSchedule(logger, "starting schedule", scheduler) + } + + // If the high water mark is earlier than when a schedule was updated, we must skip any actions that hadn't + // yet been processed. + if scheduler.Info.GetUpdateTime().AsTime().After(generator.LastProcessedTime.AsTime()) { + generator.LastProcessedTime = scheduler.Info.GetUpdateTime() + } + + // Process time range between last high water mark and system time. + t1 := generator.LastProcessedTime.AsTime() + t2 := now.UTC() + if t2.Before(t1) { + logger.Error("time went backwards", + tag.Stringer("time", t1), + tag.Stringer("time", t2)) + t2 = t1 + } + + result, err := g.SpecProcessor.ProcessTimeRange( + scheduler, + t1, t2, + scheduler.overlapPolicy(), + scheduler.WorkflowID(), + "", + false, + nil, + ) + if err != nil { + // An error here should be impossible, send to the DLQ. + return queueerrors.NewUnprocessableTaskError( + fmt.Sprintf("failed to process a time range: %s", err.Error())) + } + + // Emit metrics and update state for any dropped actions. + if result.DroppedCount > 0 { + logger.Warn("Buffer overrun, dropping actions", + tag.Int64("dropped-count", result.DroppedCount)) + metricsHandler.Counter(metrics.ScheduleBufferOverruns.Name()).Record(result.DroppedCount) + scheduler.Info.BufferDropped += result.DroppedCount + } + + // Enqueue newly-generated buffered starts. + if len(result.BufferedStarts) > 0 { + invoker.EnqueueBufferedStarts(ctx, result.BufferedStarts) + } + + // Write the new high water mark and future action times. + generator.LastProcessedTime = timestamppb.New(result.LastActionTime) + generator.UpdateFutureActionTimes(ctx, g.specBuilder) + + // Check if the schedule has gone idle. + idleTimeTotal := g.config.Tweakables(scheduler.Namespace).IdleTime + idleExpiration, isIdle := scheduler.getIdleExpiration(ctx, idleTimeTotal, result.NextWakeupTime) + if isIdle { + // Schedule is complete, no need for another buffer task. We keep the schedule's + // backing mutable state explicitly open for a the idle period, during which the + // customer can describe/modify/restart the schedule. + // + // Once the idle timer expires, we close the component. + ctx.AddTask(scheduler, chasm.TaskAttributes{ + ScheduledTime: idleExpiration, + }, &schedulerpb.SchedulerIdleTask{ + IdleTimeTotal: durationpb.New(idleTimeTotal), + }) + return nil + } + + // No more tasks if we're paused. + if scheduler.Schedule.State.Paused { + return nil + } + + // Another buffering task is added if we aren't completely out of actions or paused. + generator.scheduleTask(ctx, result.NextWakeupTime) + + return nil +} + +func (g *GeneratorTaskHandler) logSchedule(logger log.Logger, msg string, sched *Scheduler) { + logger.Info(msg, + tag.Stringer("spec", jsonStringer{sched.Schedule.Spec}), + tag.Stringer("policies", jsonStringer{sched.Schedule.Policies})) +} + +func (g *GeneratorTaskHandler) Validate( + ctx chasm.Context, + generator *Generator, + attrs chasm.TaskAttributes, + _ *schedulerpb.GeneratorTask, +) (bool, error) { + return validateTaskHighWaterMark( + generator.GetLastProcessedTime(), + attrs.ScheduledTime, + ) +} diff --git a/chasm/lib/scheduler/generator_tasks_test.go b/chasm/lib/scheduler/generator_tasks_test.go new file mode 100644 index 00000000000..21f18a527cc --- /dev/null +++ b/chasm/lib/scheduler/generator_tasks_test.go @@ -0,0 +1,185 @@ +package scheduler_test + +import ( + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" + schedulepb "go.temporal.io/api/schedule/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/metrics" + queueerrors "go.temporal.io/server/service/history/queues/errors" + "go.temporal.io/server/service/history/tasks" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func newGeneratorHandler(env *testEnv) *scheduler.GeneratorTaskHandler { + return scheduler.NewGeneratorTaskHandler(scheduler.GeneratorTaskHandlerOptions{ + Config: defaultConfig(), + MetricsHandler: metrics.NoopMetricsHandler, + BaseLogger: env.Logger, + SpecProcessor: env.SpecProcessor, + SpecBuilder: legacyscheduler.NewSpecBuilder(), + }) +} + +func TestGeneratorTask_Execute_ProcessTimeRangeFails(t *testing.T) { + // Create a custom mock spec processor that fails on ProcessTimeRange. + ctrl := gomock.NewController(t) + specProcessor := scheduler.NewMockSpecProcessor(ctrl) + now := time.Now() + + // First call during newTestEnv's CloseTransaction should succeed. + specProcessor.EXPECT().ProcessTimeRange( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(&scheduler.ProcessedTimeRange{ + NextWakeupTime: now.Add(defaultInterval), + LastActionTime: now, + }, nil).Times(1) + + // Second call during test should fail. + specProcessor.EXPECT().ProcessTimeRange( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil, errors.New("processTimeRange bug")) + + specProcessor.EXPECT().NextTime(gomock.Any(), gomock.Any()).Return(legacyscheduler.GetNextTimeResult{ + Next: now.Add(defaultInterval), + Nominal: now.Add(defaultInterval), + }, nil).AnyTimes() + + env := newTestEnv(t, withSpecProcessor(specProcessor)) + handler := newGeneratorHandler(env) + + ctx := env.MutableContext() + generator := env.Scheduler.Generator.Get(ctx) + + // If ProcessTimeRange fails, we should fail the task as an internal error. + err := handler.Execute(ctx, generator, chasm.TaskAttributes{}, &schedulerpb.GeneratorTask{}) + var target *queueerrors.UnprocessableTaskError + require.ErrorAs(t, err, &target) + require.Equal(t, "failed to process a time range: processTimeRange bug", target.Message) +} + +func TestGeneratorTask_ExecuteBufferTask_Basic(t *testing.T) { + env := newTestEnv(t) + handler := newGeneratorHandler(env) + + ctx := env.MutableContext() + sched := env.Scheduler + generator := sched.Generator.Get(ctx) + + // Move high water mark back in time (Generator always compares high water mark + // against system time) to generate buffered actions. + highWatermark := ctx.Now(generator).UTC().Add(-defaultInterval * 5) + generator.LastProcessedTime = timestamppb.New(highWatermark) + + // Execute the generate task. + err := handler.Execute(ctx, generator, chasm.TaskAttributes{}, &schedulerpb.GeneratorTask{}) + require.NoError(t, err) + + // We expect 5 buffered starts. + invoker := sched.Invoker.Get(ctx) + require.Len(t, invoker.BufferedStarts, 5) + + // Validate RequestId -> WorkflowId mapping. + for _, start := range invoker.BufferedStarts { + require.Equal(t, start.WorkflowId, invoker.RunningWorkflowID(start.RequestId)) + } + + // Generator's high water mark should have advanced. + newHighWatermark := generator.LastProcessedTime.AsTime() + require.True(t, newHighWatermark.After(highWatermark)) + + // Ensure we scheduled a physical side-effect task on the tree at immediate time. + // The InvokerExecuteTask is a side-effect task that starts workflows. + // The InvokerProcessBufferTask (pure) executes inline during CloseTransaction. + require.NoError(t, env.CloseTransaction()) + require.True(t, env.HasTask(&tasks.ChasmTask{}, chasm.TaskScheduledTimeImmediate)) +} + +func TestGeneratorTask_UpdateFutureActionTimes_UnlimitedActions(t *testing.T) { + env := newTestEnv(t) + handler := newGeneratorHandler(env) + + ctx := env.MutableContext() + generator := env.Scheduler.Generator.Get(ctx) + + err := handler.Execute(ctx, generator, chasm.TaskAttributes{}, &schedulerpb.GeneratorTask{}) + require.NoError(t, err) + + require.NotEmpty(t, generator.FutureActionTimes) + require.Len(t, generator.FutureActionTimes, 10) +} + +func TestGeneratorTask_UpdateFutureActionTimes_LimitedActions(t *testing.T) { + env := newTestEnv(t) + handler := newGeneratorHandler(env) + + ctx := env.MutableContext() + sched := env.Scheduler + generator := sched.Generator.Get(ctx) + + sched.Schedule.State.LimitedActions = true + sched.Schedule.State.RemainingActions = 2 + + err := handler.Execute(ctx, generator, chasm.TaskAttributes{}, &schedulerpb.GeneratorTask{}) + require.NoError(t, err) + + require.Len(t, generator.FutureActionTimes, 2) +} + +func TestGeneratorTask_UpdateFutureActionTimes_SkipsBeforeUpdateTime(t *testing.T) { + env := newTestEnv(t) + handler := newGeneratorHandler(env) + + ctx := env.MutableContext() + sched := env.Scheduler + generator := sched.Generator.Get(ctx) + + // UpdateTime acts as a floor - action times at or before it are skipped. + baseTime := ctx.Now(generator).UTC() + updateTime := baseTime.Add(defaultInterval / 2) + sched.Info.UpdateTime = timestamppb.New(updateTime) + + err := handler.Execute(ctx, generator, chasm.TaskAttributes{}, &schedulerpb.GeneratorTask{}) + require.NoError(t, err) + + require.NotEmpty(t, generator.FutureActionTimes) + for _, futureTime := range generator.FutureActionTimes { + require.True(t, futureTime.AsTime().After(updateTime)) + } +} + +func TestUnpause_ResumesProcessing(t *testing.T) { + env := newTestEnv(t) + + // Pause the schedule. + env.Scheduler.Schedule.State.Paused = true + require.NoError(t, env.CloseTransaction()) + + // Clear tasks from setup, then unpause. UpdateTime is captured at T0. + env.NodeBackend.TasksByCategory = nil + ctx := env.MutableContext() + _, err := env.Scheduler.Patch(ctx, &schedulerpb.PatchScheduleRequest{ + FrontendRequest: &workflowservice.PatchScheduleRequest{ + Patch: &schedulepb.SchedulePatch{Unpause: "resuming"}, + }, + }) + require.NoError(t, err) + + // Advance time before closing so the generator has actions to process. + env.TimeSource.Update(env.TimeSource.Now().Add(defaultInterval * 3)) + require.NoError(t, env.CloseTransaction()) + + // With the fix, Patch kicks an immediate generator task. During CloseTransaction + // it processes the elapsed interval, buffers starts, and the invoker schedules + // side-effect tasks to start workflows. Without the fix, nothing runs. + require.True(t, env.HasTask(&tasks.ChasmTask{}, chasm.TaskScheduledTimeImmediate), + "schedule should resume processing after unpause") +} diff --git a/chasm/lib/scheduler/handler.go b/chasm/lib/scheduler/handler.go new file mode 100644 index 00000000000..710cee1743a --- /dev/null +++ b/chasm/lib/scheduler/handler.go @@ -0,0 +1,282 @@ +package scheduler + +import ( + "context" + "errors" + "fmt" + + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" +) + +type handler struct { + schedulerpb.UnimplementedSchedulerServiceServer + + logger log.Logger + specBuilder *legacyscheduler.SpecBuilder +} + +func newHandler(logger log.Logger, specBuilder *legacyscheduler.SpecBuilder) *handler { + return &handler{ + logger: logger, + specBuilder: specBuilder, + } +} + +func (h *handler) CreateSchedule(ctx context.Context, req *schedulerpb.CreateScheduleRequest) (resp *schedulerpb.CreateScheduleResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + _, err = chasm.StartExecution( + ctx, + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.FrontendRequest.ScheduleId, + }, + CreateScheduler, + req, + chasm.WithRequestID(req.FrontendRequest.RequestId), + ) + + var alreadyStartedErr *chasm.ExecutionAlreadyStartedError + if errors.As(err, &alreadyStartedErr) { + // Check if the existing schedule is a sentinel. + // + // TODO lina@ - this can be removed (as well as all other sentinel business) + // after fully migrated to CHASM schedulers. + _, readErr := chasm.ReadComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.FrontendRequest.ScheduleId, + }, + ), + func(s *Scheduler, ctx chasm.Context, _ *struct{}) (*struct{}, error) { + if s.IsSentinel() { + return nil, ErrSentinel + } + return nil, nil + }, + (*struct{})(nil), + ) + if readErr != nil { + return nil, readErr // Returns ErrSentinel (404) if sentinel + } + return nil, serviceerror.NewAlreadyExistsf("schedule %q is already registered", req.FrontendRequest.ScheduleId) + } + + return &schedulerpb.CreateScheduleResponse{ + FrontendResponse: &workflowservice.CreateScheduleResponse{ + ConflictToken: initialSerializedConflictToken, + }, + }, err +} + +// CreateFromMigrationState creates a CHASM schedule from migrated V1 state. +// Used during migration from workflow-backed schedules to CHASM schedules. +func (h *handler) CreateFromMigrationState(ctx context.Context, req *schedulerpb.CreateFromMigrationStateRequest) (resp *schedulerpb.CreateFromMigrationStateResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + scheduleID := req.GetState().GetSchedulerState().GetScheduleId() + _, err = chasm.StartExecution( + ctx, + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: scheduleID, + }, + CreateSchedulerFromMigration, + req, + ) + + var alreadyStartedErr *chasm.ExecutionAlreadyStartedError + if errors.As(err, &alreadyStartedErr) { + // Check if the existing schedule is a sentinel. Sentinels are + // auto-deleted SentinelIdleTime after schedule creation; the + // V1 schedule will keep retrying migration until it expires. + _, readErr := chasm.ReadComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: scheduleID, + }, + ), + func(s *Scheduler, ctx chasm.Context, _ *struct{}) (*struct{}, error) { + if s.IsSentinel() { + return nil, ErrSentinel + } + return nil, nil + }, + (*struct{})(nil), + ) + if readErr != nil { + if errors.Is(readErr, ErrSentinel) { + h.logger.Warn( + fmt.Sprintf("Migration blocked by sentinel schedule; sentinel will auto-delete %v after schedule creation", SentinelIdleTime), + tag.NewStringTag("schedule-id", scheduleID), + ) + return nil, ErrSentinelBlocked + } + return nil, readErr + } + return nil, serviceerror.NewAlreadyExistsf("schedule %q is already registered", scheduleID) + } + + return &schedulerpb.CreateFromMigrationStateResponse{}, err +} + +func (h *handler) CreateSentinel(ctx context.Context, req *schedulerpb.CreateSentinelRequest) (resp *schedulerpb.CreateSentinelResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + _, err = chasm.StartExecution( + ctx, + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.ScheduleId, + }, + CreateSentinelFn, + req, + ) + + var alreadyStartedErr *chasm.ExecutionAlreadyStartedError + if errors.As(err, &alreadyStartedErr) { + // If a sentinel already exists, succeed idempotently. + // If a real scheduler exists, fail. + _, readErr := chasm.ReadComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.ScheduleId, + }, + ), + func(s *Scheduler, ctx chasm.Context, _ *struct{}) (*struct{}, error) { + if s.IsSentinel() { + return nil, nil + } + return nil, serviceerror.NewAlreadyExistsf("schedule %q is already registered", req.ScheduleId) + }, + (*struct{})(nil), + ) + return &schedulerpb.CreateSentinelResponse{}, readErr + } + if err != nil { + return nil, err + } + + return &schedulerpb.CreateSentinelResponse{}, nil +} + +func (h *handler) UpdateSchedule(ctx context.Context, req *schedulerpb.UpdateScheduleRequest) (resp *schedulerpb.UpdateScheduleResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + resp, _, err = chasm.UpdateComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.FrontendRequest.ScheduleId, + }, + ), + (*Scheduler).Update, + req, + ) + return resp, err +} + +func (h *handler) PatchSchedule(ctx context.Context, req *schedulerpb.PatchScheduleRequest) (resp *schedulerpb.PatchScheduleResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + resp, _, err = chasm.UpdateComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.FrontendRequest.ScheduleId, + }, + ), + (*Scheduler).Patch, + req, + ) + return resp, err +} + +func (h *handler) DeleteSchedule(ctx context.Context, req *schedulerpb.DeleteScheduleRequest) (resp *schedulerpb.DeleteScheduleResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + resp, _, err = chasm.UpdateComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.FrontendRequest.ScheduleId, + }, + ), + (*Scheduler).Delete, + req, + ) + return resp, err +} + +func (h *handler) MigrateToWorkflow(ctx context.Context, req *schedulerpb.MigrateToWorkflowRequest) (resp *schedulerpb.MigrateToWorkflowResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + resp, _, err = chasm.UpdateComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.ScheduleId, + }, + ), + (*Scheduler).MigrateToWorkflow, + req, + ) + if errors.Is(err, ErrSentinel) { + return nil, ErrSentinelBlocked + } + return resp, err +} + +func (h *handler) DescribeSchedule(ctx context.Context, req *schedulerpb.DescribeScheduleRequest) (resp *schedulerpb.DescribeScheduleResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + resp, err = chasm.ReadComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.FrontendRequest.ScheduleId, + }, + ), + func(s *Scheduler, ctx chasm.Context, req *schedulerpb.DescribeScheduleRequest) (*schedulerpb.DescribeScheduleResponse, error) { + return s.Describe(ctx, req, h.specBuilder) + }, + req, + ) + return resp, err +} + +func (h *handler) ListScheduleMatchingTimes(ctx context.Context, req *schedulerpb.ListScheduleMatchingTimesRequest) (resp *schedulerpb.ListScheduleMatchingTimesResponse, err error) { + defer log.CapturePanic(h.logger, &err) + + resp, err = chasm.ReadComponent( + ctx, + chasm.NewComponentRef[*Scheduler]( + chasm.ExecutionKey{ + NamespaceID: req.NamespaceId, + BusinessID: req.FrontendRequest.ScheduleId, + }, + ), + func(s *Scheduler, ctx chasm.Context, req *schedulerpb.ListScheduleMatchingTimesRequest) (*schedulerpb.ListScheduleMatchingTimesResponse, error) { + return s.ListMatchingTimes(ctx, req, h.specBuilder) + }, + req, + ) + return resp, err +} diff --git a/chasm/lib/scheduler/handler_test.go b/chasm/lib/scheduler/handler_test.go new file mode 100644 index 00000000000..2594ed4efec --- /dev/null +++ b/chasm/lib/scheduler/handler_test.go @@ -0,0 +1,183 @@ +package scheduler_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/chasmtest" + "go.temporal.io/server/chasm/lib/scheduler" + schedulerpb "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/log" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// runSentinelHandlerTestCase asserts that the given operation returns +// NotFound when invoked on a sentinel scheduler. +func runSentinelHandlerTestCase( + t *testing.T, + callFn func(sentinel *scheduler.Scheduler, ctx chasm.MutableContext, specBuilder *legacyscheduler.SpecBuilder) error, +) { + sentinel, ctx, _ := setupSentinelForTest(t) + specBuilder := legacyscheduler.NewSpecBuilder() + + err := callFn(sentinel, ctx, specBuilder) + + require.Error(t, err) + var notFoundErr *serviceerror.NotFound + require.ErrorAs(t, err, ¬FoundErr, "expected NotFound error for sentinel") +} + +func TestSentinelHandler_DescribeSchedule(t *testing.T) { + runSentinelHandlerTestCase(t, func(sentinel *scheduler.Scheduler, ctx chasm.MutableContext, specBuilder *legacyscheduler.SpecBuilder) error { + _, err := sentinel.Describe(ctx, &schedulerpb.DescribeScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.DescribeScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + }, + }, specBuilder) + return err + }) +} + +func TestSentinelHandler_ListScheduleMatchingTimes(t *testing.T) { + runSentinelHandlerTestCase(t, func(sentinel *scheduler.Scheduler, ctx chasm.MutableContext, specBuilder *legacyscheduler.SpecBuilder) error { + _, err := sentinel.ListMatchingTimes(ctx, &schedulerpb.ListScheduleMatchingTimesRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.ListScheduleMatchingTimesRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + StartTime: timestamppb.Now(), + EndTime: timestamppb.Now(), + }, + }, specBuilder) + return err + }) +} + +func TestSentinelHandler_UpdateSchedule(t *testing.T) { + runSentinelHandlerTestCase(t, func(sentinel *scheduler.Scheduler, ctx chasm.MutableContext, _ *legacyscheduler.SpecBuilder) error { + _, err := sentinel.Update(ctx, &schedulerpb.UpdateScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.UpdateScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + }, + }) + return err + }) +} + +func TestSentinelHandler_PatchSchedule(t *testing.T) { + runSentinelHandlerTestCase(t, func(sentinel *scheduler.Scheduler, ctx chasm.MutableContext, _ *legacyscheduler.SpecBuilder) error { + _, err := sentinel.Patch(ctx, &schedulerpb.PatchScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.PatchScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + }, + }) + return err + }) +} + +func TestSentinelHandler_DeleteSchedule(t *testing.T) { + runSentinelHandlerTestCase(t, func(sentinel *scheduler.Scheduler, ctx chasm.MutableContext, _ *legacyscheduler.SpecBuilder) error { + _, err := sentinel.Delete(ctx, &schedulerpb.DeleteScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.DeleteScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + }, + }) + return err + }) +} + +func TestSentinelHandler_MigrateToWorkflow(t *testing.T) { + runSentinelHandlerTestCase(t, func(sentinel *scheduler.Scheduler, ctx chasm.MutableContext, _ *legacyscheduler.SpecBuilder) error { + _, err := sentinel.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + return err + }) +} + +func TestHandler_CreateFromMigrationState_Sentinel(t *testing.T) { + ctrl := gomock.NewController(t) + logger := log.NewTestLogger() + registry := chasm.NewRegistry(logger) + require.NoError(t, registry.Register(&chasm.CoreLibrary{})) + require.NoError(t, registry.Register(newTestLibrary(logger, newRealSpecProcessor(ctrl, logger)))) + + h := scheduler.NewTestHandler(logger) + testEngine := chasmtest.NewEngine(t, registry) + engineCtx := chasm.NewEngineContext(context.Background(), testEngine) + _, err := chasm.StartExecution( + engineCtx, + chasm.ExecutionKey{ + NamespaceID: namespaceID, + BusinessID: scheduleID, + }, + func(ctx chasm.MutableContext, _ struct{}) (*scheduler.Scheduler, error) { + return scheduler.NewSentinel(ctx, namespace, namespaceID, scheduleID), nil + }, + struct{}{}, + ) + require.NoError(t, err) + + _, err = h.TestCreateFromMigrationState(engineCtx, &schedulerpb.CreateFromMigrationStateRequest{ + NamespaceId: namespaceID, + State: &schedulerpb.SchedulerMigrationState{ + SchedulerState: &schedulerpb.SchedulerState{ + ScheduleId: scheduleID, + }, + }, + }) + + require.Error(t, err) + require.ErrorIs(t, err, scheduler.ErrSentinelBlocked) + var unavailableErr *serviceerror.Unavailable + require.ErrorAs(t, err, &unavailableErr) +} + +func TestHandler_MigrateToWorkflow_Sentinel(t *testing.T) { + ctrl := gomock.NewController(t) + logger := log.NewTestLogger() + registry := chasm.NewRegistry(logger) + require.NoError(t, registry.Register(&chasm.CoreLibrary{})) + require.NoError(t, registry.Register(newTestLibrary(logger, newRealSpecProcessor(ctrl, logger)))) + + h := scheduler.NewTestHandler(logger) + testEngine := chasmtest.NewEngine(t, registry) + engineCtx := chasm.NewEngineContext(context.Background(), testEngine) + _, err := chasm.StartExecution( + engineCtx, + chasm.ExecutionKey{ + NamespaceID: namespaceID, + BusinessID: scheduleID, + }, + func(ctx chasm.MutableContext, _ struct{}) (*scheduler.Scheduler, error) { + return scheduler.NewSentinel(ctx, namespace, namespaceID, scheduleID), nil + }, + struct{}{}, + ) + require.NoError(t, err) + + _, err = h.TestMigrateToWorkflow(engineCtx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + + require.Error(t, err) + require.ErrorIs(t, err, scheduler.ErrSentinelBlocked) + var unavailableErr *serviceerror.Unavailable + require.ErrorAs(t, err, &unavailableErr) +} diff --git a/chasm/lib/scheduler/helper_test.go b/chasm/lib/scheduler/helper_test.go new file mode 100644 index 00000000000..f28ff945cec --- /dev/null +++ b/chasm/lib/scheduler/helper_test.go @@ -0,0 +1,415 @@ +package scheduler_test + +import ( + "context" + "reflect" + "testing" + "time" + + commonpb "go.temporal.io/api/common/v1" + schedulepb "go.temporal.io/api/schedule/v1" + workflowpb "go.temporal.io/api/workflow/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/testing/testlogger" + "go.temporal.io/server/common/testing/testvars" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + namespace = "ns" + namespaceID = "ns-id" + scheduleID = "sched-id" + + defaultInterval = 1 * time.Minute + defaultCatchupWindow = 5 * time.Minute +) + +// defaultSchedule returns a protobuf definition for a schedule matching this +// package's other testing defaults. +func defaultSchedule() *schedulepb.Schedule { + return &schedulepb.Schedule{ + Spec: &schedulepb.ScheduleSpec{ + Interval: []*schedulepb.IntervalSpec{ + { + Interval: durationpb.New(defaultInterval), + Phase: durationpb.New(0), + }, + }, + }, + Action: &schedulepb.ScheduleAction{ + Action: &schedulepb.ScheduleAction_StartWorkflow{ + StartWorkflow: &workflowpb.NewWorkflowExecutionInfo{ + WorkflowId: "scheduled-wf", + WorkflowType: &commonpb.WorkflowType{Name: "scheduled-wf-type"}, + }, + }, + }, + Policies: &schedulepb.SchedulePolicies{ + CatchupWindow: durationpb.New(defaultCatchupWindow), + }, + State: &schedulepb.ScheduleState{ + Paused: false, + LimitedActions: false, + RemainingActions: 0, + }, + } +} + +func defaultConfig() *scheduler.Config { + return &scheduler.Config{ + Tweakables: func(_ string) scheduler.Tweakables { + return scheduler.DefaultTweakables + }, + ServiceCallTimeout: func() time.Duration { + return 5 * time.Second + }, + RetryPolicy: func() backoff.RetryPolicy { + return backoff.NewExponentialRetryPolicy(1 * time.Second) + }, + } +} + +func newTestLibrary(logger log.Logger, specProcessor scheduler.SpecProcessor) *scheduler.Library { + config := defaultConfig() + specBuilder := legacyscheduler.NewSpecBuilder() + invokerOpts := scheduler.InvokerTaskHandlerOptions{ + Config: config, + MetricsHandler: metrics.NoopMetricsHandler, + BaseLogger: logger, + SpecProcessor: specProcessor, + } + return scheduler.NewLibrary( + nil, + scheduler.NewSchedulerIdleTaskHandler(scheduler.SchedulerIdleTaskHandlerOptions{ + Config: config, + }), + scheduler.NewSchedulerCallbacksTaskHandler(scheduler.SchedulerCallbacksTaskHandlerOptions{ + Config: config, + }), + scheduler.NewGeneratorTaskHandler(scheduler.GeneratorTaskHandlerOptions{ + Config: config, + MetricsHandler: metrics.NoopMetricsHandler, + BaseLogger: logger, + SpecProcessor: specProcessor, + SpecBuilder: specBuilder, + }), + scheduler.NewInvokerExecuteTaskHandler(invokerOpts), + scheduler.NewInvokerProcessBufferTaskHandler(invokerOpts), + scheduler.NewBackfillerTaskHandler(scheduler.BackfillerTaskHandlerOptions{ + Config: config, + MetricsHandler: metrics.NoopMetricsHandler, + BaseLogger: logger, + SpecProcessor: specProcessor, + }), + scheduler.NewSchedulerMigrateToWorkflowTaskHandler(scheduler.SchedulerMigrateToWorkflowTaskHandlerOptions{ + Config: config, + MetricsHandler: metrics.NoopMetricsHandler, + BaseLogger: logger, + }), + ) +} + +// testEnv holds all components needed for scheduler tests. +type testEnv struct { + t *testing.T // only used within these setup helpers + Ctrl *gomock.Controller + Registry *chasm.Registry + Node *chasm.Node + NodeBackend *chasm.MockNodeBackend + TimeSource *clock.EventTimeSource + Scheduler *scheduler.Scheduler + SpecProcessor scheduler.SpecProcessor + MockEngine *chasm.MockEngine + Logger log.Logger +} + +// testEnvConfig holds configuration options for testEnv. +type testEnvConfig struct { + specProcessor scheduler.SpecProcessor + withMockEngine bool +} + +// testEnvOption is a functional option for configuring testEnv. +type testEnvOption func(*testEnvConfig) + +// withSpecProcessor configures testEnv with a custom SpecProcessor. +// By default, testEnv uses a real SpecProcessor. Use this option only +// when you need to mock specific SpecProcessor behavior (e.g., simulating failures). +func withSpecProcessor(sp scheduler.SpecProcessor) testEnvOption { + return func(c *testEnvConfig) { + c.specProcessor = sp + } +} + +// withMockEngine configures testEnv to include a mock CHASM engine for side-effect tasks. +func withMockEngine() testEnvOption { + return func(c *testEnvConfig) { + c.withMockEngine = true + } +} + +// newRealSpecProcessor creates a real SpecProcessor for tests. +func newRealSpecProcessor(ctrl *gomock.Controller, logger log.Logger) scheduler.SpecProcessor { + mockMetrics := metrics.NewMockHandler(ctrl) + mockMetrics.EXPECT().Counter(gomock.Any()).Return(metrics.NoopCounterMetricFunc).AnyTimes() + mockMetrics.EXPECT().WithTags(gomock.Any()).Return(mockMetrics).AnyTimes() + mockMetrics.EXPECT().Timer(gomock.Any()).Return(metrics.NoopTimerMetricFunc).AnyTimes() + + return scheduler.NewSpecProcessor( + defaultConfig(), + mockMetrics, + logger, + legacyscheduler.NewSpecBuilder(), + ) +} + +// newTestEnv creates a new test environment with the given options. +func newTestEnv(t *testing.T, opts ...testEnvOption) *testEnv { + config := &testEnvConfig{} + for _, opt := range opts { + opt(config) + } + + ctrl := gomock.NewController(t) + logger := testlogger.NewTestLogger(t, testlogger.FailOnExpectedErrorOnly) + nodePathEncoder := chasm.DefaultPathEncoder + + // Configure spec processor: use custom if provided, otherwise use real. + var specProcessor scheduler.SpecProcessor + if config.specProcessor != nil { + specProcessor = config.specProcessor + } else { + specProcessor = newRealSpecProcessor(ctrl, logger) + } + + registry := chasm.NewRegistry(logger) + if err := registry.Register(&chasm.CoreLibrary{}); err != nil { + t.Fatalf("failed to register core library: %v", err) + } + if err := registry.Register(newTestLibrary(logger, specProcessor)); err != nil { + t.Fatalf("failed to register scheduler library: %v", err) + } + + timeSource := clock.NewEventTimeSource() + now := time.Now() + timeSource.Update(now) + + tv := testvars.New(t) + nodeBackend := &chasm.MockNodeBackend{ + HandleNextTransitionCount: func() int64 { return 2 }, + HandleGetCurrentVersion: func() int64 { return 1 }, + HandleGetWorkflowKey: tv.Any().WorkflowKey, + HandleIsWorkflow: func() bool { return false }, + HandleCurrentVersionedTransition: func() *persistencespb.VersionedTransition { + return &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + } + }, + } + + node := chasm.NewEmptyTree(registry, timeSource, nodeBackend, nodePathEncoder, logger, metrics.NoopMetricsHandler) + ctx := chasm.NewMutableContext(context.Background(), node) + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, defaultSchedule(), nil) + if err != nil { + t.Fatalf("failed to create scheduler: %v", err) + } + if err = node.SetRootComponent(sched); err != nil { + t.Fatalf("failed to set root component: %v", err) + } + + // Advance Generator's high water mark to 'now'. + generator := sched.Generator.Get(ctx) + generator.LastProcessedTime = timestamppb.New(now) + + _, err = node.CloseTransaction() + if err != nil { + t.Fatalf("failed to close initial transaction: %v", err) + } + + env := &testEnv{ + t: t, + Ctrl: ctrl, + Registry: registry, + Node: node, + NodeBackend: nodeBackend, + TimeSource: timeSource, + Scheduler: sched, + SpecProcessor: specProcessor, + Logger: logger, + } + + if config.withMockEngine { + env.MockEngine = chasm.NewMockEngine(ctrl) + } + + return env +} + +// MutableContext returns a new mutable CHASM context. +func (e *testEnv) MutableContext() chasm.MutableContext { + return chasm.NewMutableContext(context.Background(), e.Node) +} + +// ReadContext returns a new read-only CHASM context. +func (e *testEnv) ReadContext() chasm.Context { + return chasm.NewContext(context.Background(), e.Node) +} + +// CloseTransaction closes the current CHASM transaction. +func (e *testEnv) CloseTransaction() error { + _, err := e.Node.CloseTransaction() + return err +} + +// HasTask returns true if the given task type was added with the given visibilityTime. +func (e *testEnv) HasTask(task any, visibilityTime time.Time) bool { + taskType := reflect.TypeOf(task) + for _, tasks := range e.NodeBackend.TasksByCategory { + for _, t := range tasks { + if reflect.TypeOf(t) == taskType && + t.GetVisibilityTime().Equal(visibilityTime) { + return true + } + } + } + return false +} + +// EngineContext returns a context with a mock engine. Requires withMockEngine(). +func (e *testEnv) EngineContext() context.Context { + if e.MockEngine == nil { + e.t.Fatal("EngineContext requires withMockEngine() option") + } + return chasm.NewEngineContext(context.Background(), e.MockEngine) +} + +// ExpectReadComponent sets up mock expectations for reading a component. +func (e *testEnv) ExpectReadComponent(ctx chasm.Context, returnedComponent chasm.Component) { + if e.MockEngine == nil { + e.t.Fatal("ExpectReadComponent requires withMockEngine() option") + } + e.MockEngine.EXPECT().ReadComponent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ chasm.ComponentRef, readFn func(chasm.Context, chasm.Component) error, _ ...chasm.TransitionOption) error { + return readFn(ctx, returnedComponent) + }).Times(1) +} + +// ExpectUpdateComponent sets up mock expectations for updating a component. +func (e *testEnv) ExpectUpdateComponent(ctx chasm.MutableContext, componentToUpdate chasm.Component) { + if e.MockEngine == nil { + e.t.Fatal("ExpectUpdateComponent requires withMockEngine() option") + } + e.MockEngine.EXPECT().UpdateComponent(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, _ chasm.ComponentRef, updateFn func(chasm.MutableContext, chasm.Component) error, _ ...chasm.TransitionOption) ([]byte, error) { + err := updateFn(ctx, componentToUpdate) + return nil, err + }).Times(1) +} + +type testInfra struct { + node *chasm.Node + nodeBackend *chasm.MockNodeBackend + logger log.Logger +} + +// setupTestInfra creates the common test infrastructure for scheduler tests. +func setupTestInfra(t *testing.T, specProcessor scheduler.SpecProcessor) *testInfra { + nodeBackend := &chasm.MockNodeBackend{} + logger := testlogger.NewTestLogger(t, testlogger.FailOnExpectedErrorOnly) + nodePathEncoder := chasm.DefaultPathEncoder + + registry := chasm.NewRegistry(logger) + err := registry.Register(&chasm.CoreLibrary{}) + if err != nil { + t.Fatalf("failed to register core library: %v", err) + } + err = registry.Register(newTestLibrary(logger, specProcessor)) + if err != nil { + t.Fatalf("failed to register scheduler library: %v", err) + } + + timeSource := clock.NewEventTimeSource() + timeSource.Update(time.Now()) + + tv := testvars.New(t) + nodeBackend.HandleNextTransitionCount = func() int64 { return 2 } + nodeBackend.HandleGetCurrentVersion = func() int64 { return 1 } + nodeBackend.HandleGetWorkflowKey = tv.Any().WorkflowKey + nodeBackend.HandleIsWorkflow = func() bool { return false } + nodeBackend.HandleCurrentVersionedTransition = func() *persistencespb.VersionedTransition { + return &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + } + } + + node := chasm.NewEmptyTree(registry, timeSource, nodeBackend, nodePathEncoder, logger, metrics.NoopMetricsHandler) + return &testInfra{ + node: node, + nodeBackend: nodeBackend, + logger: logger, + } +} + +func setupSchedulerForTest(t *testing.T) (*scheduler.Scheduler, chasm.MutableContext, *chasm.Node) { + ctrl := gomock.NewController(t) + specProcessor := scheduler.NewMockSpecProcessor(ctrl) + specProcessor.EXPECT().ProcessTimeRange( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(&scheduler.ProcessedTimeRange{ + NextWakeupTime: time.Now().Add(time.Hour), + LastActionTime: time.Now(), + }, nil).AnyTimes() + specProcessor.EXPECT().NextTime(gomock.Any(), gomock.Any()).Return(legacyscheduler.GetNextTimeResult{ + Next: time.Now().Add(time.Hour), + Nominal: time.Now().Add(time.Hour), + }, nil).AnyTimes() + + infra := setupTestInfra(t, specProcessor) + ctx := chasm.NewMutableContext(context.Background(), infra.node) + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, defaultSchedule(), nil) + if err != nil { + t.Fatalf("failed to create scheduler: %v", err) + } + err = infra.node.SetRootComponent(sched) + if err != nil { + t.Fatalf("failed to set root component: %v", err) + } + _, err = infra.node.CloseTransaction() + if err != nil { + t.Fatalf("failed to close initial transaction: %v", err) + } + + ctx = chasm.NewMutableContext(context.Background(), infra.node) + return sched, ctx, infra.node +} + +func setupSentinelForTest(t *testing.T) (*scheduler.Scheduler, chasm.MutableContext, *chasm.Node) { + ctrl := gomock.NewController(t) + specProcessor := scheduler.NewMockSpecProcessor(ctrl) + + infra := setupTestInfra(t, specProcessor) + ctx := chasm.NewMutableContext(context.Background(), infra.node) + sentinel := scheduler.NewSentinel(ctx, namespace, namespaceID, scheduleID) + err := infra.node.SetRootComponent(sentinel) + if err != nil { + t.Fatalf("failed to set root component: %v", err) + } + _, err = infra.node.CloseTransaction() + if err != nil { + t.Fatalf("failed to close initial transaction: %v", err) + } + + ctx = chasm.NewMutableContext(context.Background(), infra.node) + return sentinel, ctx, infra.node +} diff --git a/chasm/lib/scheduler/invoker.go b/chasm/lib/scheduler/invoker.go new file mode 100644 index 00000000000..424554e5b6e --- /dev/null +++ b/chasm/lib/scheduler/invoker.go @@ -0,0 +1,381 @@ +package scheduler + +import ( + "slices" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + schedulepb "go.temporal.io/api/schedule/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/util" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// The Invoker component is responsible for executing buffered actions. +type Invoker struct { + chasm.UnimplementedComponent + + *schedulerpb.InvokerState + + Scheduler chasm.ParentPtr[*Scheduler] +} + +func (i *Invoker) LifecycleState(ctx chasm.Context) chasm.LifecycleState { + return chasm.LifecycleStateRunning +} + +// NewInvoker returns an initialized Invoker component, which should +// be parented under a Scheduler root component. +func NewInvoker(ctx chasm.MutableContext) *Invoker { + return newInvokerWithState(ctx, &schedulerpb.InvokerState{ + BufferedStarts: []*schedulespb.BufferedStart{}, + }) +} + +func newInvokerWithState(ctx chasm.MutableContext, state *schedulerpb.InvokerState) *Invoker { + i := &Invoker{ + InvokerState: state, + } + return i +} + +// EnqueueBufferedStarts adds new BufferedStarts to the invocation queue, +// immediately kicking off a processing task. +func (i *Invoker) EnqueueBufferedStarts(ctx chasm.MutableContext, starts []*schedulespb.BufferedStart) { + i.BufferedStarts = append(i.BufferedStarts, starts...) + i.addTasks(ctx) +} + +type processBufferResult struct { + startWorkflows []*schedulespb.BufferedStart + cancelWorkflows []*commonpb.WorkflowExecution + terminateWorkflows []*commonpb.WorkflowExecution + + // discardStarts will be dropped from the Invoker's BufferedStarts without execution. + discardStarts []*schedulespb.BufferedStart + + // Number of buffered starts dropped due to overlap policy during processing. + overlapSkipped int64 + + // Nunmber of buffered starts dropped from missing the catchup window. + missedCatchupWindow int64 +} + +// recordProcessBufferResult updates the Invoker's internal state based on result, as well as the +// LastProcessedTime watermark. Tasks to continue execution are added, if needed. +func (i *Invoker) recordProcessBufferResult(ctx chasm.MutableContext, result *processBufferResult) { + discards := make(map[string]bool) // request ID -> is present + ready := make(map[string]bool) + for _, start := range result.discardStarts { + discards[start.RequestId] = true + } + for _, start := range result.startWorkflows { + ready[start.RequestId] = true + } + + // Drop discarded starts, and update requested starts for execution. + var starts []*schedulespb.BufferedStart + for _, start := range i.GetBufferedStarts() { + if discards[start.RequestId] { + continue + } + + // Starts ready for execution are set to their first attempt. + if ready[start.RequestId] && start.Attempt < 1 { + start.Attempt = 1 + } else if start.Attempt == 0 { + // Start was processed but deferred (e.g., BUFFER_ONE policy with running workflow). + // Mark as deferred (-1) to distinguish from newly-enqueued starts. This prevents + // processingDeadline() from scheduling an immediate task for deferred starts. + start.Attempt = -1 + } + + starts = append(starts, start) + } + + // Update internal state. + i.BufferedStarts = starts + i.CancelWorkflows = append(i.GetCancelWorkflows(), result.cancelWorkflows...) + i.TerminateWorkflows = append(i.GetTerminateWorkflows(), result.terminateWorkflows...) + i.LastProcessedTime = timestamppb.New(ctx.Now(i)) + + // Only schedule new tasks if this processBuffer call actually did something. + // This prevents duplicate task scheduling when multiple ProcessBuffer tasks + // run in the same transaction (e.g., from multiple backfillers). + if len(result.startWorkflows) > 0 || + len(result.discardStarts) > 0 || + len(result.cancelWorkflows) > 0 || + len(result.terminateWorkflows) > 0 { + i.addTasks(ctx) + } +} + +type executeResult struct { + // Starts that executed successfully. Their RunId and StartTime should be + // copied to the corresponding BufferedStart in the buffer. + CompletedStarts []*schedulespb.BufferedStart + + // Starts that failed with a retryable error should be updated and kept in the buffer. + RetryableStarts []*schedulespb.BufferedStart + + // Starts that failed with a non-retryable error can be removed from the buffer. + FailedStarts []*schedulespb.BufferedStart + + CompletedCancels []*commonpb.WorkflowExecution + CompletedTerminates []*commonpb.WorkflowExecution +} + +// Append combines two executeResults (no deduplication is done). +func (e *executeResult) Append(o executeResult) executeResult { + return executeResult{ + CompletedStarts: append(e.CompletedStarts, o.CompletedStarts...), + RetryableStarts: append(e.RetryableStarts, o.RetryableStarts...), + FailedStarts: append(e.FailedStarts, o.FailedStarts...), + CompletedCancels: append(e.CompletedCancels, o.CompletedCancels...), + CompletedTerminates: append(e.CompletedTerminates, o.CompletedTerminates...), + } +} + +// recordExecuteResult updates the Invoker's internal state with the results of a +// completed InvokerExecuteTask. Tasks to continue execution are added, if needed. +func (i *Invoker) recordExecuteResult(ctx chasm.MutableContext, result *executeResult) { + completed := make(map[string]*schedulespb.BufferedStart) // request ID -> BufferedStart with RunId/StartTime + failed := make(map[string]bool) // request ID -> is present + retryable := make(map[string]*schedulespb.BufferedStart) // request ID -> *BufferedStart + canceled := make(map[string]bool) // run ID -> is present + terminated := make(map[string]bool) // run ID -> is present + + for _, start := range result.CompletedStarts { + completed[start.RequestId] = start + } + for _, start := range result.FailedStarts { + failed[start.RequestId] = true + } + for _, start := range result.RetryableStarts { + retryable[start.RequestId] = start + } + for _, wf := range result.CompletedCancels { + canceled[wf.RunId] = true + } + for _, wf := range result.CompletedTerminates { + terminated[wf.RunId] = true + } + + // Remove failed (non-retryable) starts from the buffer. + i.BufferedStarts = slices.DeleteFunc(i.GetBufferedStarts(), func(start *schedulespb.BufferedStart) bool { + return failed[start.RequestId] + }) + i.CancelWorkflows = slices.DeleteFunc(i.GetCancelWorkflows(), func(we *commonpb.WorkflowExecution) bool { + return canceled[we.RunId] + }) + i.TerminateWorkflows = slices.DeleteFunc(i.GetTerminateWorkflows(), func(we *commonpb.WorkflowExecution) bool { + return terminated[we.RunId] + }) + + // Update BufferedStarts with results. + for _, start := range i.GetBufferedStarts() { + if completedStart, ok := completed[start.RequestId]; ok { + start.RunId = completedStart.GetRunId() + start.StartTime = completedStart.GetStartTime() + } + if retry, ok := retryable[start.RequestId]; ok { + start.Attempt++ + start.BackoffTime = retry.GetBackoffTime() + } + } + + // Add tasks if other actions are backing off or still pending execution. + i.addTasks(ctx) +} + +// runningWorkflowID returns the workflow ID associated with the given +// outstanding request. +func (i *Invoker) runningWorkflowID(requestID string) string { + for _, start := range i.GetBufferedStarts() { + if start.GetRequestId() == requestID && start.GetCompleted() == nil { + return start.GetWorkflowId() + } + } + return "" +} + +// recordCompletedAction updates Invoker metadata and kicks off tasks after +// an action completes. It marks the BufferedStart as completed by setting +// the Completed field. +// +// Returns the schedule time of the completed action for metrics. +func (i *Invoker) recordCompletedAction( + ctx chasm.MutableContext, + completed *schedulespb.CompletedResult, + requestID string, +) (scheduleTime time.Time) { + // Find the BufferedStart and mark it as completed. + for _, start := range i.BufferedStarts { + if start.GetRequestId() == requestID { + scheduleTime = start.DesiredTime.AsTime() + start.Completed = completed + break + } + } + + // Re-enable deferred starts (Attempt == -1) so they can be re-processed by + // ProcessBuffer now that a workflow has completed. This allows the overlap + // policy to be re-evaluated. + for _, start := range i.BufferedStarts { + if start.Attempt == -1 { + start.Attempt = 0 + } + } + + // Update DesiredTime on the first pending start for metrics. DesiredTime is used + // to drive action latency between buffered starts (the time it takes between + // completing one start and kicking off the next). We set that on the first start + // pending execution. + idx := slices.IndexFunc(i.BufferedStarts, func(start *schedulespb.BufferedStart) bool { + return start.Attempt == 0 + }) + if idx >= 0 { + i.BufferedStarts[idx].DesiredTime = timestamppb.New(completed.GetCloseTime().AsTime()) + } + + // Apply retention to keep only the last N completed actions. + i.applyCompletedRetention() + + // addTasks will add an immediate ProcessBufferTask if we have any starts pending + // kick-off. + i.addTasks(ctx) + + return +} + +// addTasks adds both ProcessBuffer and Execute tasks as needed. It should be +// called when completing processing/executing tasks, to drive backoff/retry. +func (i *Invoker) addTasks(ctx chasm.MutableContext) { + totalStarts := len(i.GetBufferedStarts()) + eligibleStarts := len(i.getEligibleBufferedStarts()) + + // Add a ProcessBuffer pure task whenever there are BufferedStarts that are + // backing off, or are still pending initial processing. + if (totalStarts - eligibleStarts) > 0 { + ctx.AddTask(i, chasm.TaskAttributes{ + ScheduledTime: i.processingDeadline(), + }, &schedulerpb.InvokerProcessBufferTask{}) + } + + // Add an Execute side effect task whenever there are any eligible actions + // pending execution. + if len(i.GetCancelWorkflows()) > 0 || len(i.GetTerminateWorkflows()) > 0 || eligibleStarts > 0 { + ctx.AddTask(i, chasm.TaskAttributes{}, &schedulerpb.InvokerExecuteTask{}) + } +} + +// processingDeadline returns the earliest possible time that the BufferedStarts +// queue should be processed, taking into account starts that have not yet been +// attempted, as well as those that are pending backoff to retry. If the buffer +// is empty, the return value will be Time's zero value. +func (i *Invoker) processingDeadline() time.Time { + var deadline time.Time + for _, start := range i.GetBufferedStarts() { + if start.GetAttempt() == 0 { + // Return zero time to schedule an immediate task for unprocessed starts. + return chasm.TaskScheduledTimeImmediate + } + backoff := start.GetBackoffTime().AsTime() + if deadline.IsZero() || backoff.Before(deadline) { + deadline = backoff + } + } + return deadline +} + +// getEligibleBufferedStarts returns all BufferedStarts that are marked for +// execution (Attempt > 0), haven't been started yet (no RunId), and aren't +// presently backing off, based on last processed time. +func (i *Invoker) getEligibleBufferedStarts() []*schedulespb.BufferedStart { + return util.FilterSlice(i.GetBufferedStarts(), func(start *schedulespb.BufferedStart) bool { + return start.Attempt > 0 && + start.GetRunId() == "" && + start.BackoffTime.AsTime().Before(i.GetLastProcessedTime().AsTime()) + }) +} + +// isWorkflowStarted returns true if a workflow with the given ID has already +// been started (has a RunId set). +func (i *Invoker) isWorkflowStarted(workflowID string) bool { + for _, start := range i.GetBufferedStarts() { + if start.GetWorkflowId() == workflowID && start.GetRunId() != "" { + return true + } + } + return false +} + +// runningWorkflowExecutions returns the list of workflow executions that +// have been started but not yet completed. +func (i *Invoker) runningWorkflowExecutions() []*commonpb.WorkflowExecution { + var running []*commonpb.WorkflowExecution + for _, start := range i.GetBufferedStarts() { + if start.GetRunId() != "" && start.GetCompleted() == nil { + running = append(running, &commonpb.WorkflowExecution{ + WorkflowId: start.GetWorkflowId(), + RunId: start.GetRunId(), + }) + } + } + return running +} + +// recentActions returns started/completed actions as ScheduleActionResults. +// This includes both running workflows (with status RUNNING) and completed +// workflows (with their final status). +func (i *Invoker) recentActions() []*schedulepb.ScheduleActionResult { + var results []*schedulepb.ScheduleActionResult + for _, start := range i.GetBufferedStarts() { + // Only include workflows that have been started (have a RunId). + if start.GetRunId() == "" { + continue + } + status := enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING + if start.GetCompleted() != nil { + status = start.GetCompleted().GetStatus() + } + results = append(results, &schedulepb.ScheduleActionResult{ + ScheduleTime: start.GetActualTime(), + ActualTime: start.GetStartTime(), + StartWorkflowResult: &commonpb.WorkflowExecution{ + WorkflowId: start.GetWorkflowId(), + RunId: start.GetRunId(), + }, + StartWorkflowStatus: status, + }) + } + return results +} + +// applyCompletedRetention removes the oldest completed BufferedStarts beyond +// the retention limit. +func (i *Invoker) applyCompletedRetention() { + var completed []*schedulespb.BufferedStart + var nonCompleted []*schedulespb.BufferedStart + + for _, start := range i.BufferedStarts { + if start.GetCompleted() != nil { + completed = append(completed, start) + } else { + nonCompleted = append(nonCompleted, start) + } + } + + // Sort by oldest first. + slices.SortFunc(completed, func(a, b *schedulespb.BufferedStart) int { + return a.GetCompleted().GetCloseTime().AsTime().Compare(b.GetCompleted().GetCloseTime().AsTime()) + }) + + keepFrom := max(0, len(completed)-recentActionCount) + completed = completed[keepFrom:] + + i.BufferedStarts = append(nonCompleted, completed...) +} diff --git a/chasm/lib/scheduler/invoker_execute_task_test.go b/chasm/lib/scheduler/invoker_execute_task_test.go new file mode 100644 index 00000000000..34732f6b5a3 --- /dev/null +++ b/chasm/lib/scheduler/invoker_execute_task_test.go @@ -0,0 +1,423 @@ +package scheduler_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservicemock/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/testing/mockapi/workflowservicemock/v1" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// invokerExecuteTestEnv extends testEnv with mock clients for invoker execute tests. +type invokerExecuteTestEnv struct { + *testEnv + handler *scheduler.InvokerExecuteTaskHandler + mockFrontendClient *workflowservicemock.MockWorkflowServiceClient + mockHistoryClient *historyservicemock.MockHistoryServiceClient +} + +func newInvokerExecuteTestEnv(t *testing.T) *invokerExecuteTestEnv { + env := newTestEnv(t, withMockEngine()) + + mockFrontendClient := workflowservicemock.NewMockWorkflowServiceClient(env.Ctrl) + mockHistoryClient := historyservicemock.NewMockHistoryServiceClient(env.Ctrl) + + handler := scheduler.NewInvokerExecuteTaskHandler(scheduler.InvokerTaskHandlerOptions{ + Config: defaultConfig(), + MetricsHandler: metrics.NoopMetricsHandler, + BaseLogger: env.Logger, + SpecProcessor: env.SpecProcessor, + HistoryClient: mockHistoryClient, + FrontendClient: mockFrontendClient, + }) + + return &invokerExecuteTestEnv{ + testEnv: env, + handler: handler, + mockFrontendClient: mockFrontendClient, + mockHistoryClient: mockHistoryClient, + } +} + +type executeTestCase struct { + InitialBufferedStarts []*schedulespb.BufferedStart + InitialCancelWorkflows []*commonpb.WorkflowExecution + InitialTerminateWorkflows []*commonpb.WorkflowExecution + InitialRunningWorkflows []*commonpb.WorkflowExecution + + ExpectedBufferedStarts int + ExpectedRunningWorkflows int + ExpectedTerminateWorkflows int + ExpectedCancelWorkflows int + ExpectedActionCount int64 + ExpectedOverlapSkipped int64 + ExpectedMissedCatchupWindow int64 + + ValidateInvoker func(t *testing.T, invoker *scheduler.Invoker, env *invokerExecuteTestEnv) +} + +func runExecuteTestCase(t *testing.T, env *invokerExecuteTestEnv, c *executeTestCase) { + ctx := env.MutableContext() + invoker := env.Scheduler.Invoker.Get(ctx) + + // Set up initial state. Note: InitialRunningWorkflows is now represented by + // BufferedStarts that have RunId set but no Completed field. + invoker.BufferedStarts = c.InitialBufferedStarts + invoker.CancelWorkflows = c.InitialCancelWorkflows + invoker.TerminateWorkflows = c.InitialTerminateWorkflows + + // Add initial running workflows as BufferedStarts with RunId set. + for _, wf := range c.InitialRunningWorkflows { + invoker.BufferedStarts = append(invoker.BufferedStarts, &schedulespb.BufferedStart{ + RequestId: wf.WorkflowId + "-req", + WorkflowId: wf.WorkflowId, + RunId: wf.RunId, + Attempt: 1, + }) + } + + // Set LastProcessedTime to current time to ensure time checks pass. + invoker.LastProcessedTime = timestamppb.New(env.TimeSource.Now()) + + // Set expectations. The read and update calls will also update the Scheduler + // component, within the same transition. + env.ExpectReadComponent(ctx, invoker) + env.ExpectUpdateComponent(ctx, invoker) + + // Create engine context for side effect task execution. + engineCtx := env.EngineContext() + err := env.handler.Execute(engineCtx, chasm.ComponentRef{}, chasm.TaskAttributes{}, &schedulerpb.InvokerExecuteTask{}) + require.NoError(t, err) + require.NoError(t, env.CloseTransaction()) + + // Validate the results. + // BufferedStarts now includes both pending and running starts (they're kept after starting). + require.Len(t, invoker.GetBufferedStarts(), c.ExpectedBufferedStarts) + + // Count running workflows from BufferedStarts (has RunId but no Completed). + runningCount := 0 + for _, start := range invoker.GetBufferedStarts() { + if start.GetRunId() != "" && start.GetCompleted() == nil { + runningCount++ + } + } + require.Equal(t, c.ExpectedRunningWorkflows, runningCount) + + require.Len(t, invoker.TerminateWorkflows, c.ExpectedTerminateWorkflows) + require.Len(t, invoker.CancelWorkflows, c.ExpectedCancelWorkflows) + require.Equal(t, c.ExpectedActionCount, env.Scheduler.Info.ActionCount) + require.Equal(t, c.ExpectedOverlapSkipped, env.Scheduler.Info.OverlapSkipped) + require.Equal(t, c.ExpectedMissedCatchupWindow, env.Scheduler.Info.MissedCatchupWindow) + + // Callbacks. + if c.ValidateInvoker != nil { + c.ValidateInvoker(t, invoker, env) + } +} + +// Execute success case. +func TestExecuteTask_Basic(t *testing.T) { + env := newInvokerExecuteTestEnv(t) + startTime := timestamppb.New(env.TimeSource.Now()) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req1", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 1, + }, + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: true, + RequestId: "req2", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 1, + }, + } + + // Expect both buffered starts to result in workflow executions. + env.mockFrontendClient.EXPECT(). + StartWorkflowExecution(gomock.Any(), gomock.Any()). + Times(2). + Return(&workflowservice.StartWorkflowExecutionResponse{ + RunId: "run-id", + }, nil) + + // After execution, both BufferedStarts are kept (with RunId set). + // They become "running" workflows. + runExecuteTestCase(t, env, &executeTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: 2, // kept after starting + ExpectedRunningWorkflows: 2, + ExpectedActionCount: 2, + }) +} + +// Execute is scheduled with an empty buffer. +func TestExecuteTask_Empty(t *testing.T) { + env := newInvokerExecuteTestEnv(t) + runExecuteTestCase(t, env, &executeTestCase{ + InitialBufferedStarts: nil, + }) +} + +// A buffered start fails with a retryable error. +func TestExecuteTask_RetryableFailure(t *testing.T) { + env := newInvokerExecuteTestEnv(t) + + // Set up the Invoker's buffer with a two starts. One will succeed immediately, + // one will fail. + startTime := timestamppb.New(env.TimeSource.Now()) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "fail", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 1, + }, + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: true, + RequestId: "pass", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 1, + }, + } + + // Fail the first start, and succeed the second. + env.mockFrontendClient.EXPECT(). + StartWorkflowExecution(gomock.Any(), startWorkflowExecutionRequestIDMatches("fail")). + Times(1). + Return(nil, serviceerror.NewDeadlineExceeded("deadline exceeded")) + env.mockFrontendClient.EXPECT(). + StartWorkflowExecution(gomock.Any(), startWorkflowExecutionRequestIDMatches("pass")). + Times(1). + Return(&workflowservice.StartWorkflowExecutionResponse{ + RunId: "run-id", + }, nil) + + // After execution: + // - Failed start stays in buffer with backoff (pending) + // - Successful start stays in buffer with RunId set (running) + runExecuteTestCase(t, env, &executeTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: 2, // both kept: 1 failed (backoff) + 1 running + ExpectedRunningWorkflows: 1, + ExpectedActionCount: 1, + ValidateInvoker: func(t *testing.T, invoker *scheduler.Invoker, env *invokerExecuteTestEnv) { + // Find the failed start (no RunId, has backoff). + for _, start := range invoker.BufferedStarts { + if start.GetRunId() == "" { + backoffTime := start.BackoffTime.AsTime() + require.True(t, backoffTime.After(env.TimeSource.Now())) + require.Equal(t, int64(2), start.Attempt) + return + } + } + require.Fail(t, "expected to find failed start with backoff") + }, + }) +} + +// A buffered start fails when a duplicate workflow has already been started. +func TestExecuteTask_AlreadyStarted(t *testing.T) { + env := newInvokerExecuteTestEnv(t) + startTime := timestamppb.New(env.TimeSource.Now()) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 1, + }, + } + + // Fail with WorkflowExecutionAlreadyStarted. + env.mockFrontendClient.EXPECT(). + StartWorkflowExecution(gomock.Any(), gomock.Any()). + Times(1). + Return(nil, serviceerror.NewWorkflowExecutionAlreadyStarted("workflow already started", "", "")) + + runExecuteTestCase(t, env, &executeTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: 0, + ExpectedRunningWorkflows: 0, + ExpectedActionCount: 0, + }) +} + +// A buffered start fails from having exceeded its maximum retry limit. +func TestExecuteTask_ExceedsMaxAttempts(t *testing.T) { + env := newInvokerExecuteTestEnv(t) + startTime := timestamppb.New(env.TimeSource.Now()) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: scheduler.InvokerMaxStartAttempts, + }, + } + + runExecuteTestCase(t, env, &executeTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: 0, + ExpectedRunningWorkflows: 0, + ExpectedActionCount: 0, + }) +} + +// An execute task runs with cancels/terminations queued, which fail to execute. +func TestExecuteTask_CancelTerminateFailure(t *testing.T) { + env := newInvokerExecuteTestEnv(t) + cancelWorkflows := []*commonpb.WorkflowExecution{ + { + WorkflowId: "wf", + RunId: "run1", + }, + } + terminateWorkflows := []*commonpb.WorkflowExecution{ + { + WorkflowId: "wf", + RunId: "run2", + }, + } + + // Fail both service calls. + env.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), gomock.Any()).Times(1). + Return(nil, serviceerror.NewInternal("internal failure")) + env.mockHistoryClient.EXPECT().TerminateWorkflowExecution(gomock.Any(), gomock.Any()).Times(1). + Return(nil, serviceerror.NewInternal("internal failure")) + + // Terminate and Cancel are both attempted only once. Regardless of the service + // call's outcome, they should have been removed from the Invoker's queue. + runExecuteTestCase(t, env, &executeTestCase{ + InitialBufferedStarts: nil, + InitialCancelWorkflows: cancelWorkflows, + InitialTerminateWorkflows: terminateWorkflows, + ExpectedBufferedStarts: 0, + ExpectedRunningWorkflows: 0, + ExpectedActionCount: 0, + ExpectedCancelWorkflows: 0, + ExpectedTerminateWorkflows: 0, + }) +} + +// An Execute task runs with cancels/terminations queued, resulting in success. +func TestExecuteTask_CancelTerminateSucceed(t *testing.T) { + env := newInvokerExecuteTestEnv(t) + cancelWorkflows := []*commonpb.WorkflowExecution{ + { + WorkflowId: "wf", + RunId: "run1", + }, + } + terminateWorkflows := []*commonpb.WorkflowExecution{ + { + WorkflowId: "wf", + RunId: "run2", + }, + } + + // Succeed both service calls. + env.mockHistoryClient.EXPECT().RequestCancelWorkflowExecution(gomock.Any(), gomock.Any()).Times(1). + Return(nil, nil) + env.mockHistoryClient.EXPECT().TerminateWorkflowExecution(gomock.Any(), gomock.Any()).Times(1). + Return(nil, nil) + + runExecuteTestCase(t, env, &executeTestCase{ + InitialBufferedStarts: nil, + InitialCancelWorkflows: cancelWorkflows, + InitialTerminateWorkflows: terminateWorkflows, + ExpectedBufferedStarts: 0, + ExpectedRunningWorkflows: 0, + ExpectedActionCount: 0, + ExpectedCancelWorkflows: 0, + ExpectedTerminateWorkflows: 0, + }) +} + +// Tests when the ExecuteTask should yield by completing and committing any +// completed work. +func TestExecuteTask_ExceedsMaxActionsPerExecution(t *testing.T) { + env := newInvokerExecuteTestEnv(t) + startTime := timestamppb.New(env.TimeSource.Now()) + var bufferedStarts []*schedulespb.BufferedStart + maxStarts := scheduler.DefaultTweakables.MaxActionsPerExecution + for i := range maxStarts * 2 { + bufferedStarts = append(bufferedStarts, + &schedulespb.BufferedStart{ + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: fmt.Sprintf("req-%d", i), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 1, + }) + } + + // Expect up to the maximum buffered start limit to result in workflow + // executions. + env.mockFrontendClient.EXPECT(). + StartWorkflowExecution(gomock.Any(), gomock.Any()). + Times(maxStarts). + Return(&workflowservice.StartWorkflowExecutionResponse{ + RunId: "run-id", + }, nil) + + // All BufferedStarts are kept: maxStarts get RunId set (running), the rest stay pending. + runExecuteTestCase(t, env, &executeTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: maxStarts * 2, // all kept: started + pending + ExpectedRunningWorkflows: maxStarts, // only started ones + ExpectedActionCount: int64(maxStarts), + }) +} + +type startWorkflowExecutionRequestIDMatcher struct { + RequestID string +} + +var _ gomock.Matcher = &startWorkflowExecutionRequestIDMatcher{} + +func startWorkflowExecutionRequestIDMatches(requestID string) *startWorkflowExecutionRequestIDMatcher { + return &startWorkflowExecutionRequestIDMatcher{requestID} +} + +func (s *startWorkflowExecutionRequestIDMatcher) String() string { + return fmt.Sprintf("StartWorkflowExecutionRequest{RequestId: \"%s\"}", s.RequestID) +} + +func (s *startWorkflowExecutionRequestIDMatcher) Matches(x any) bool { + req, ok := x.(*workflowservice.StartWorkflowExecutionRequest) + return ok && req.RequestId == s.RequestID +} diff --git a/chasm/lib/scheduler/invoker_process_buffer_task_test.go b/chasm/lib/scheduler/invoker_process_buffer_task_test.go new file mode 100644 index 00000000000..9178a44667d --- /dev/null +++ b/chasm/lib/scheduler/invoker_process_buffer_task_test.go @@ -0,0 +1,351 @@ +package scheduler_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/util" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func newProcessBufferHandler(env *testEnv) *scheduler.InvokerProcessBufferTaskHandler { + return scheduler.NewInvokerProcessBufferTaskHandler(scheduler.InvokerTaskHandlerOptions{ + Config: defaultConfig(), + MetricsHandler: metrics.NoopMetricsHandler, + BaseLogger: env.Logger, + SpecProcessor: env.SpecProcessor, + }) +} + +type processBufferTestCase struct { + InitialBufferedStarts []*schedulespb.BufferedStart + InitialCancelWorkflows []*commonpb.WorkflowExecution + InitialTerminateWorkflows []*commonpb.WorkflowExecution + InitialRunningWorkflows []*commonpb.WorkflowExecution + + ExpectedBufferedStarts int + ExpectedRunningWorkflows int + ExpectedTerminateWorkflows int + ExpectedCancelWorkflows int + ExpectedOverlapSkipped int64 + ExpectedMissedCatchupWindow int64 + + ValidateInvoker func(t *testing.T, invoker *scheduler.Invoker) +} + +func runProcessBufferTestCase(t *testing.T, env *testEnv, c *processBufferTestCase) { + ctx := env.MutableContext() + invoker := env.Scheduler.Invoker.Get(ctx) + + // Set up initial state. Note: InitialRunningWorkflows is now represented by + // BufferedStarts that have RunId set but no Completed field. + invoker.BufferedStarts = c.InitialBufferedStarts + invoker.CancelWorkflows = c.InitialCancelWorkflows + invoker.TerminateWorkflows = c.InitialTerminateWorkflows + + // Add initial running workflows as BufferedStarts with RunId set. + for _, wf := range c.InitialRunningWorkflows { + invoker.BufferedStarts = append(invoker.BufferedStarts, &schedulespb.BufferedStart{ + RequestId: wf.WorkflowId + "-req", + WorkflowId: wf.WorkflowId, + RunId: wf.RunId, + Attempt: 1, + }) + } + + // Set LastProcessedTime to current time to ensure time checks pass. + invoker.LastProcessedTime = timestamppb.New(env.TimeSource.Now()) + + handler := newProcessBufferHandler(env) + err := handler.Execute(ctx, invoker, chasm.TaskAttributes{}, &schedulerpb.InvokerProcessBufferTask{}) + require.NoError(t, err) + require.NoError(t, env.CloseTransaction()) + + // Validate the results. + // Count BufferedStarts (excluding running ones added from InitialRunningWorkflows). + require.Len(t, invoker.GetBufferedStarts(), c.ExpectedBufferedStarts+len(c.InitialRunningWorkflows)) + + // Count running workflows from BufferedStarts (has RunId but no Completed). + runningCount := 0 + for _, start := range invoker.GetBufferedStarts() { + if start.GetRunId() != "" && start.GetCompleted() == nil { + runningCount++ + } + } + require.Equal(t, c.ExpectedRunningWorkflows, runningCount) + + require.Len(t, invoker.TerminateWorkflows, c.ExpectedTerminateWorkflows) + require.Len(t, invoker.CancelWorkflows, c.ExpectedCancelWorkflows) + require.Equal(t, c.ExpectedOverlapSkipped, env.Scheduler.Info.OverlapSkipped) + require.Equal(t, c.ExpectedMissedCatchupWindow, env.Scheduler.Info.MissedCatchupWindow) + + // Callbacks. + if c.ValidateInvoker != nil { + c.ValidateInvoker(t, invoker) + } +} + +// ProcessBuffer attempts all buffered starts with ALLOW_ALL policy. +func TestProcessBufferTask_AllowAll(t *testing.T) { + env := newTestEnv(t) + startTime := timestamppb.New(env.TimeSource.Now()) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req1", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req2", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req3", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + } + + runProcessBufferTestCase(t, env, &processBufferTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: 3, + ExpectedOverlapSkipped: 0, + ValidateInvoker: func(t *testing.T, invoker *scheduler.Invoker) { + require.Len(t, util.FilterSlice(invoker.GetBufferedStarts(), func(start *schedulespb.BufferedStart) bool { + return start.Attempt > 0 + }), 3) + }, + }) +} + +// ProcessBuffer processes a start that missed the catchup window. +func TestProcessBufferTask_MissedCatchupWindow(t *testing.T) { + env := newTestEnv(t) + now := env.TimeSource.Now() + startTime := now.Add(-defaultCatchupWindow * 2) + startTimestamp := timestamppb.New(startTime) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTimestamp, + ActualTime: startTimestamp, + DesiredTime: startTimestamp, + Manual: false, + RequestId: "req1", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + } + + runProcessBufferTestCase(t, env, &processBufferTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: 0, + ExpectedOverlapSkipped: 0, + ExpectedMissedCatchupWindow: 1, + }) +} + +// ProcessBuffer defers a start (from overlap policy) by placing it into NewBuffer. +func TestProcessBufferTask_BufferOne(t *testing.T) { + env := newTestEnv(t) + startTime := timestamppb.New(env.TimeSource.Now()) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req1", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_BUFFER_ONE, + }, + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req2", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_BUFFER_ONE, + }, + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req3", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_BUFFER_ONE, + }, + } + + runProcessBufferTestCase(t, env, &processBufferTestCase{ + InitialBufferedStarts: bufferedStarts, + // Because no workflows are running, we'll immediately kick off one + // BufferedStart, and then buffer the next. This leaves us with 1 ready start, + // and 1 still buffered. + ExpectedBufferedStarts: 2, + ExpectedOverlapSkipped: 1, + ValidateInvoker: func(t *testing.T, invoker *scheduler.Invoker) { + // Only one start should be set for execution (Attempt > 0). + require.Len(t, util.FilterSlice(invoker.GetBufferedStarts(), func(start *schedulespb.BufferedStart) bool { + return start.Attempt > 0 + }), 1) + }, + }) +} + +// ProcessBuffer is scheduled with an empty buffer. +func TestProcessBufferTask_Empty(t *testing.T) { + env := newTestEnv(t) + runProcessBufferTestCase(t, env, &processBufferTestCase{ + InitialBufferedStarts: nil, + }) +} + +// ProcessBuffer is scheduled with a buffer of starts all backing off. +func TestProcessBufferTask_BackingOff(t *testing.T) { + env := newTestEnv(t) + startTime := timestamppb.New(env.TimeSource.Now()) + backoffTime := startTime.AsTime().Add(30 * time.Minute) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req1", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 2, + BackoffTime: timestamppb.New(backoffTime), + }, + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: true, + RequestId: "req2", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 3, + BackoffTime: timestamppb.New(backoffTime), + }, + } + + runProcessBufferTestCase(t, env, &processBufferTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: 2, + }) +} + +// ProcessBuffer is scheduled with a start that was backing off, but ready to retry. +func TestProcessBufferTask_BackingOffReady(t *testing.T) { + env := newTestEnv(t) + startTime := timestamppb.New(env.TimeSource.Now()) + backoffTime := env.TimeSource.Now().Add(-1 * time.Minute) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "req1", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + Attempt: 2, + BackoffTime: timestamppb.New(backoffTime), + }, + } + + runProcessBufferTestCase(t, env, &processBufferTestCase{ + InitialBufferedStarts: bufferedStarts, + ExpectedBufferedStarts: 1, + ValidateInvoker: func(t *testing.T, invoker *scheduler.Invoker) { + // The start should be ready for execution (Attempt > 0). + require.Len(t, util.FilterSlice(invoker.GetBufferedStarts(), func(start *schedulespb.BufferedStart) bool { + return start.Attempt > 0 + }), 1) + }, + }) +} + +// A buffered start with an overlap policy to terminate other workflows is processed. +func TestProcessBufferTask_NeedsTerminate(t *testing.T) { + env := newTestEnv(t) + + // Add a running workflow to the Scheduler. + initialRunningWorkflows := []*commonpb.WorkflowExecution{{ + WorkflowId: "existing-wf", + RunId: "existing-run", + }} + + // Set up the BufferedStart with a policy that will terminate existing workflows. + startTime := timestamppb.New(env.TimeSource.Now()) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "new-wf", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_TERMINATE_OTHER, + }, + } + + runProcessBufferTestCase(t, env, &processBufferTestCase{ + InitialBufferedStarts: bufferedStarts, + InitialRunningWorkflows: initialRunningWorkflows, + // Buffer should still contain the buffered start. The existing workflow will still + // remain in RunningWorkflows as well, since it is the Watcher's job to remove it + // after termination/cancelation takes effect. + ExpectedBufferedStarts: 1, + ExpectedRunningWorkflows: 1, + ExpectedTerminateWorkflows: 1, + }) +} + +// A buffered start with an overlap policy to cancel other workflows is processed. +func TestProcessBufferTask_NeedsCancel(t *testing.T) { + env := newTestEnv(t) + + // Add a running workflow to the Scheduler. + initialRunningWorkflows := []*commonpb.WorkflowExecution{{ + WorkflowId: "existing-wf", + RunId: "existing-run", + }} + + // Set up the BufferedStart with a policy that will cancel existing workflows. + startTime := timestamppb.New(env.TimeSource.Now()) + bufferedStarts := []*schedulespb.BufferedStart{ + { + NominalTime: startTime, + ActualTime: startTime, + DesiredTime: startTime, + Manual: false, + RequestId: "new-wf", + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_CANCEL_OTHER, + }, + } + + runProcessBufferTestCase(t, env, &processBufferTestCase{ + InitialBufferedStarts: bufferedStarts, + InitialRunningWorkflows: initialRunningWorkflows, + // Buffer should still contain the buffered start. The existing workflow will still + // remain in RunningWorkflows as well, since it is the Watcher's job to remove it + // after termination/cancelation takes effect. + ExpectedBufferedStarts: 1, + ExpectedRunningWorkflows: 1, + ExpectedCancelWorkflows: 1, + }) +} diff --git a/chasm/lib/scheduler/invoker_tasks.go b/chasm/lib/scheduler/invoker_tasks.go new file mode 100644 index 00000000000..19d5bc17309 --- /dev/null +++ b/chasm/lib/scheduler/invoker_tasks.go @@ -0,0 +1,718 @@ +package scheduler + +import ( + "cmp" + "context" + "errors" + "fmt" + "sync" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + schedulepb "go.temporal.io/api/schedule/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/util" + queueerrors "go.temporal.io/server/service/history/queues/errors" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" + "go.uber.org/fx" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type ( + InvokerTaskHandlerOptions struct { + fx.In + + Config *Config + MetricsHandler metrics.Handler + BaseLogger log.Logger + SpecProcessor SpecProcessor + + HistoryClient resource.HistoryClient + + // FrontendClient is used for specifically StartWorkflow calls, to ensure that + // the request makes it through metering's interceptor. Because we don't change for + // terminate/cancels, we can go directly to history for other service calls. + FrontendClient workflowservice.WorkflowServiceClient + } + + InvokerExecuteTaskHandler struct { + chasm.SideEffectTaskHandlerBase[*schedulerpb.InvokerExecuteTask] + config *Config + metricsHandler metrics.Handler + baseLogger log.Logger + historyClient resource.HistoryClient + frontendClient workflowservice.WorkflowServiceClient + } + + InvokerProcessBufferTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config + metricsHandler metrics.Handler + baseLogger log.Logger + historyClient resource.HistoryClient + frontendClient workflowservice.WorkflowServiceClient + } + + // Per-task context. + invokerTaskHandlerContext struct { + context.Context + + actionsTaken int + maxActions int + } + + rateLimitedError struct { + // The requested interval to delay processing by rescheduilng. + delay time.Duration + } +) + +const ( + // Lower bound for the deadline in which buffered actions are dropped. + startWorkflowMinDeadline = 5 * time.Second + + // Upper bound on how many times starting an individual buffered action should be retried. + InvokerMaxStartAttempts = 10 // TODO - dial this up/remove it +) + +var ( + errRetryLimitExceeded = queueerrors.NewUnprocessableTaskError("retry limit exceeded") + _ error = &rateLimitedError{} +) + +func NewInvokerExecuteTaskHandler(opts InvokerTaskHandlerOptions) *InvokerExecuteTaskHandler { + return &InvokerExecuteTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + baseLogger: opts.BaseLogger, + historyClient: opts.HistoryClient, + frontendClient: opts.FrontendClient, + } +} + +func NewInvokerProcessBufferTaskHandler(opts InvokerTaskHandlerOptions) *InvokerProcessBufferTaskHandler { + return &InvokerProcessBufferTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + baseLogger: opts.BaseLogger, + historyClient: opts.HistoryClient, + frontendClient: opts.FrontendClient, + } +} + +func (h *InvokerExecuteTaskHandler) Validate( + _ chasm.Context, + invoker *Invoker, + _ chasm.TaskAttributes, + _ *schedulerpb.InvokerExecuteTask, +) (bool, error) { + // If another execute task already happened to kick everything off, we don't need + // this one. + eligibleStarts := invoker.getEligibleBufferedStarts() + valid := len(invoker.GetTerminateWorkflows())+ + len(invoker.GetCancelWorkflows())+ + len(eligibleStarts) > 0 + return valid, nil +} + +func (h *InvokerExecuteTaskHandler) Execute( + ctx context.Context, + invokerRef chasm.ComponentRef, + _ chasm.TaskAttributes, + _ *schedulerpb.InvokerExecuteTask, +) error { + var result executeResult + + var invoker *Invoker + var scheduler *Scheduler + var lastCompletionState *schedulerpb.LastCompletionResult + var callback *commonpb.Callback + + // Read and deep copy returned components, since we'll continue to access them + // outside of this function (outside of the MS lock). + _, err := chasm.ReadComponent( + ctx, + invokerRef, + func(i *Invoker, ctx chasm.Context, _ any) (struct{}, error) { + invoker = &Invoker{ + InvokerState: common.CloneProto(i.InvokerState), + } + + s := i.Scheduler.Get(ctx) + scheduler = &Scheduler{ + SchedulerState: common.CloneProto(s.SchedulerState), + cacheConflictToken: s.cacheConflictToken, + compiledSpec: s.compiledSpec, + } + + lcs := s.LastCompletionResult.Get(ctx) + lastCompletionState = common.CloneProto(lcs) + + // Set up the completion callback to handle workflow results. + cb, err := chasm.GenerateNexusCallback(ctx, s) + if err != nil { + return struct{}{}, err + } + callback = common.CloneProto(cb) + + return struct{}{}, nil + }, + nil, + ) + if err != nil { + return fmt.Errorf("failed to read component: %w", err) + } + + logger := newTaggedLogger(h.baseLogger, scheduler) + metricsHandler := newTaggedMetricsHandler(h.metricsHandler, scheduler) + + // Terminate, cancel, and start workflows. The result struct contains the + // complete outcome of all requests executed in a single batch. + // + // Invoker will never have work pending for more than one of these calls (terminate, + // cancel, start) at a time, so it isn't sensible to run them in parallel. The + // structure below is simply for code simplicity. + ictx := h.newInvokerTaskHandlerContext(ctx, scheduler) + result = result.Append(h.terminateWorkflows(ictx, logger, metricsHandler, scheduler, invoker.GetTerminateWorkflows())) + result = result.Append(h.cancelWorkflows(ictx, logger, metricsHandler, scheduler, invoker.GetCancelWorkflows())) + sres, startResults := h.startWorkflows(ictx, logger, metricsHandler, scheduler, invoker, lastCompletionState, callback) + result = result.Append(sres) + + // Record action results on the Invoker (internal state), as well as the + // Scheduler (user-facing metrics). + _, _, err = chasm.UpdateComponent( + ctx, + invokerRef, + func(i *Invoker, ctx chasm.MutableContext, _ any) (chasm.NoValue, error) { + s := i.Scheduler.Get(ctx) + + i.recordExecuteResult(ctx, &result) + s.recordActionResult(&schedulerActionResult{actionCount: int64(len(startResults))}) + + return nil, nil + }, + nil, + ) + if err != nil { + return fmt.Errorf("failed to update component state: %w", err) + } + + return nil +} + +// takeNextAction increments the context's actionTaken counter, returning true if +// the action should be executed, and false if the task should instead yield. +func (i *invokerTaskHandlerContext) takeNextAction() bool { + allowed := i.actionsTaken < i.maxActions + if allowed { + i.actionsTaken++ + } + return allowed +} + +// cancelWorkflows does a best-effort attempt to cancel all workflow executions provided in targets. +func (h *InvokerExecuteTaskHandler) cancelWorkflows( + ctx invokerTaskHandlerContext, + logger log.Logger, + metricsHandler metrics.Handler, + scheduler *Scheduler, + targets []*commonpb.WorkflowExecution, +) (result executeResult) { + var wg sync.WaitGroup + var resultMutex sync.Mutex + + for _, wf := range targets { + if !ctx.takeNextAction() { + break + } + + // Run all cancels concurrently. + newCtx := ctx.Clone() + wg.Go(func() { + err := h.cancelWorkflow(newCtx, scheduler, wf) + + resultMutex.Lock() + defer resultMutex.Unlock() + + if err != nil { + logger.Info("failed to cancel workflow", tag.Error(err), tag.WorkflowID(wf.WorkflowId)) + metricsHandler.Counter(metrics.ScheduleCancelWorkflowErrors.Name()).Record(1) + } + + // Cancels are only attempted once. + result.CompletedCancels = append(result.CompletedCancels, wf) + }) + } + + wg.Wait() + return +} + +// terminateWorkflows does a best-effort attempt to terminate all workflow executions provided in targets. +func (h *InvokerExecuteTaskHandler) terminateWorkflows( + ctx invokerTaskHandlerContext, + logger log.Logger, + metricsHandler metrics.Handler, + scheduler *Scheduler, + targets []*commonpb.WorkflowExecution, +) (result executeResult) { + var wg sync.WaitGroup + var resultMutex sync.Mutex + + for _, wf := range targets { + if !ctx.takeNextAction() { + break + } + + // Run all terminates concurrently. + newCtx := ctx.Clone() + wg.Go(func() { + err := h.terminateWorkflow(newCtx, scheduler, wf) + + resultMutex.Lock() + defer resultMutex.Unlock() + + if err != nil { + logger.Info("failed to terminate workflow", tag.Error(err), tag.WorkflowID(wf.WorkflowId)) + metricsHandler.Counter(metrics.ScheduleTerminateWorkflowErrors.Name()).Record(1) + } + + // Terminates are only attempted once. + result.CompletedTerminates = append(result.CompletedTerminates, wf) + }) + } + + wg.Wait() + return +} + +// startWorkflows executes the provided list of starts, returning a result with their outcomes. +func (h *InvokerExecuteTaskHandler) startWorkflows( + ctx invokerTaskHandlerContext, + logger log.Logger, + metricsHandler metrics.Handler, + scheduler *Scheduler, + invoker *Invoker, + lastCompletionState *schedulerpb.LastCompletionResult, + callback *commonpb.Callback, +) (result executeResult, startResults []*schedulepb.ScheduleActionResult) { + metricsWithTag := metricsHandler.WithTags( + metrics.StringTag(metrics.ScheduleActionTypeTag, metrics.ScheduleActionStartWorkflow)) + + var wg sync.WaitGroup + var resultMutex sync.Mutex + + for _, start := range invoker.getEligibleBufferedStarts() { + // Starts that haven't been executed yet will remain in `BufferedStarts`, + // without change, so another ExecuteTask will be immediately created to continue + // processing in a new task. + if !ctx.takeNextAction() { + break + } + + // Check if this start is already started. If so, we crashed after + // starting a workflow, but before recording the result. + if invoker.isWorkflowStarted(start.WorkflowId) { + logger.Info("skipping already-started workflow", tag.WorkflowID(start.WorkflowId)) + continue + } + + // Clone start before concurrent access. The clone will have RunId/StartTime + // set by startWorkflow, then copied back to the original in recordExecuteResult. + start = common.CloneProto(start) + + // Run all starts concurrently. + newCtx := ctx.Clone() + wg.Go(func() { + startResult, err := h.startWorkflow(newCtx, metricsHandler, scheduler, start, lastCompletionState, callback) + + resultMutex.Lock() + defer resultMutex.Unlock() + + if err != nil { + logger.Info("failed to start workflow", tag.Error(err)) + + // Don't count "already started" for the error metric or retry, as it is most likely + // due to misconfiguration. + if !isAlreadyStartedError(err) { + metricsWithTag.Counter(metrics.ScheduleActionErrors.Name()).Record(1) + } + + if isRetryableError(err) { + // Apply backoff to start and retry. + h.applyBackoff(start, err) + result.RetryableStarts = append(result.RetryableStarts, start) + } else { + // Drop the start from the buffer. + result.FailedStarts = append(result.FailedStarts, start) + } + + return + } + + metricsWithTag.Counter(metrics.ScheduleActionSuccess.Name()).Record(1) + result.CompletedStarts = append(result.CompletedStarts, start) + startResults = append(startResults, startResult) + }) + } + + wg.Wait() + return +} + +func (h *InvokerProcessBufferTaskHandler) Validate( + ctx chasm.Context, + invoker *Invoker, + attrs chasm.TaskAttributes, + _ *schedulerpb.InvokerProcessBufferTask, +) (bool, error) { + return validateTaskHighWaterMark( + invoker.GetLastProcessedTime(), + attrs.ScheduledTime, + ) +} + +func (h *InvokerProcessBufferTaskHandler) Execute( + ctx chasm.MutableContext, + invoker *Invoker, + _ chasm.TaskAttributes, + _ *schedulerpb.InvokerProcessBufferTask, +) error { + scheduler := invoker.Scheduler.Get(ctx) + + // Make sure we have something to start. + executionInfo := scheduler.Schedule.GetAction().GetStartWorkflow() + if executionInfo == nil { + return queueerrors.NewUnprocessableTaskError("schedules must have an Action set") + } + + // Compute actions to take from the current buffer. + result := h.processBuffer(ctx, invoker, scheduler) + + // Update Scheduler metadata. + scheduler.recordActionResult(&schedulerActionResult{ + overlapSkipped: result.overlapSkipped, + missedCatchupWindow: result.missedCatchupWindow, + }) + + // Update internal state and create new tasks. + invoker.recordProcessBufferResult(ctx, &result) + + return nil +} + +// processBuffer resolves the Invoker's buffered starts that haven't yet begun +// execution. This is where the decision is made to drive execution to +// completion, or skip/drop a start. +func (h *InvokerProcessBufferTaskHandler) processBuffer( + ctx chasm.MutableContext, + invoker *Invoker, + scheduler *Scheduler, +) (result processBufferResult) { + runningWorkflows := invoker.runningWorkflowExecutions() + isRunning := len(runningWorkflows) > 0 + + // Processing completely ignores any BufferedStart that's already executing/backing off. + pendingBufferedStarts := util.FilterSlice(invoker.GetBufferedStarts(), func(start *schedulespb.BufferedStart) bool { + return start.Attempt == 0 + }) + + // Resolve overlap policies and trim BufferedStarts that are skipped by policy. + action := legacyscheduler.ProcessBuffer(pendingBufferedStarts, isRunning, scheduler.resolveOverlapPolicy) + + // ProcessBuffer will drop starts by omitting them from NewBuffer. Start with the + // diff between the input and NewBuffer, and add any executing starts. + keepStarts := make(map[string]struct{}) // request ID -> is present + for _, start := range action.NewBuffer { + keepStarts[start.GetRequestId()] = struct{}{} + } + + // Combine all available starts. + readyStarts := action.OverlappingStarts + if action.NonOverlappingStart != nil { + readyStarts = append(readyStarts, action.NonOverlappingStart) + } + + // Update result metrics. + result.overlapSkipped = action.OverlapSkipped + + // Add starting workflows to result, trim others. + for _, start := range readyStarts { + // Ensure we can take more actions. Manual actions are always allowed. + if !start.Manual && !scheduler.useScheduledAction(true) { + // Drop buffered automated actions while paused. + result.discardStarts = append(result.discardStarts, start) + continue + } + + if ctx.Now(invoker).After(h.startWorkflowDeadline(ctx, scheduler, start)) { + // Drop expired starts. + result.missedCatchupWindow++ + result.discardStarts = append(result.discardStarts, start) + continue + } + + // Append for immediate execution. + keepStarts[start.GetRequestId()] = struct{}{} + result.startWorkflows = append(result.startWorkflows, start) + } + + result.discardStarts = util.FilterSlice(pendingBufferedStarts, func(start *schedulespb.BufferedStart) bool { + _, keep := keepStarts[start.GetRequestId()] + return !keep + }) + + // Terminate overrides cancel if both are requested. + if action.NeedTerminate { + result.terminateWorkflows = runningWorkflows + } else if action.NeedCancel { + result.cancelWorkflows = runningWorkflows + } + + return +} + +// applyBackoff updates start's BackoffTime based on err and the retry policy. +func (h *InvokerExecuteTaskHandler) applyBackoff(start *schedulespb.BufferedStart, err error) { + if err == nil { + return + } + + var delay time.Duration + if rateLimitDelay, ok := isRateLimitedError(err); ok { + // If we have the rate limiter's delay, use that. + delay = rateLimitDelay + } else { + // Otherwise, use the backoff policy. Elapsed time is left at 0 because we bound + // on number of attempts. + delay = h.config.RetryPolicy().ComputeNextDelay(0, int(start.Attempt), nil) + } + + start.BackoffTime = timestamppb.New(time.Now().Add(delay)) +} + +// startWorkflowDeadline returns the latest time at which a buffered workflow +// should be started, instead of dropped. The deadline puts an upper bound on +// the number of retry attempts per buffered start. +func (h *InvokerProcessBufferTaskHandler) startWorkflowDeadline( + ctx chasm.Context, + scheduler *Scheduler, + start *schedulespb.BufferedStart, +) time.Time { + var timeout time.Duration + + if start.Manual { + // For manual starts, use a default value in the future, as the catchup window + // doesn't apply. Manual starts may only time out through max attempt count, + // not deadline. + return ctx.Now(scheduler).Add(time.Hour) + } + + // Set request deadline based on the schedule's catchup window, which is the + // latest time that it's acceptable to start this workflow. + tweakables := h.config.Tweakables(scheduler.Namespace) + timeout = catchupWindow(scheduler, tweakables) + + timeout = max(timeout, startWorkflowMinDeadline) + + return start.ActualTime.AsTime().Add(timeout) +} + +func (h *InvokerExecuteTaskHandler) startWorkflow( + ctx context.Context, + metricsHandler metrics.Handler, + scheduler *Scheduler, + start *schedulespb.BufferedStart, + lastCompletionState *schedulerpb.LastCompletionResult, + callback *commonpb.Callback, +) (*schedulepb.ScheduleActionResult, error) { + requestSpec := scheduler.GetSchedule().GetAction().GetStartWorkflow() + + if start.Attempt >= InvokerMaxStartAttempts { + return nil, errRetryLimitExceeded + } + + // Get rate limiter permission once per buffered start, on the first attempt only. + if start.Attempt == 1 { + delay, err := h.getRateLimiterPermission() + if err != nil { + return nil, err + } + if delay > 0 { + return nil, newRateLimitedError(delay) + } + } + + reusePolicy := enumspb.WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE + if start.Manual { + reusePolicy = enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE + } + + var lcr []*commonpb.Payload + if lastCompletionState.Success != nil { + lcr = append(lcr, lastCompletionState.Success) + } + request := &workflowservice.StartWorkflowExecutionRequest{ + CompletionCallbacks: []*commonpb.Callback{callback}, + Header: requestSpec.Header, + Identity: scheduler.identity(), + Input: requestSpec.Input, + Memo: requestSpec.Memo, + Namespace: scheduler.Namespace, + RequestId: start.RequestId, + RetryPolicy: requestSpec.RetryPolicy, + SearchAttributes: scheduler.startWorkflowSearchAttributes(start.NominalTime.AsTime()), + TaskQueue: requestSpec.TaskQueue, + UserMetadata: requestSpec.UserMetadata, + WorkflowExecutionTimeout: requestSpec.WorkflowExecutionTimeout, + WorkflowId: start.WorkflowId, + WorkflowIdReusePolicy: reusePolicy, + WorkflowRunTimeout: requestSpec.WorkflowRunTimeout, + WorkflowTaskTimeout: requestSpec.WorkflowTaskTimeout, + WorkflowType: requestSpec.WorkflowType, + Priority: requestSpec.Priority, + ContinuedFailure: lastCompletionState.Failure, + LastCompletionResult: &commonpb.Payloads{ + Payloads: lcr, + }, + } + + result, err := h.frontendClient.StartWorkflowExecution(ctx, request) + if err != nil { + return nil, err + } + actualStartTime := time.Now() + + // Set metadata on the cloned start. The clone was created in startWorkflows + // before spawning this goroutine, and will be copied back to the Invoker's + // BufferedStarts in recordExecuteResult. + start.RunId = result.RunId + start.StartTime = timestamppb.New(actualStartTime) + start.HasCallback = true + + // Record time taken from action eligible to workflow started. + if !start.Manual { + desiredTime := cmp.Or(start.DesiredTime, start.ActualTime) + metricsHandler. + Timer(metrics.ScheduleActionDelay.Name()). + Record(actualStartTime.Sub(desiredTime.AsTime())) + } + + return &schedulepb.ScheduleActionResult{ + ScheduleTime: start.ActualTime, + ActualTime: timestamppb.New(actualStartTime), + StartWorkflowResult: &commonpb.WorkflowExecution{ + WorkflowId: start.WorkflowId, + RunId: result.RunId, + }, + StartWorkflowStatus: result.Status, // usually should be RUNNING + }, nil +} + +func (h *InvokerExecuteTaskHandler) terminateWorkflow( + ctx context.Context, + scheduler *Scheduler, + target *commonpb.WorkflowExecution, +) error { + request := &historyservice.TerminateWorkflowExecutionRequest{ + NamespaceId: scheduler.NamespaceId, + TerminateRequest: &workflowservice.TerminateWorkflowExecutionRequest{ + Namespace: scheduler.Namespace, + WorkflowExecution: &commonpb.WorkflowExecution{WorkflowId: target.WorkflowId}, + Reason: "terminated by schedule overlap policy", + Identity: scheduler.identity(), + FirstExecutionRunId: target.RunId, + }, + } + _, err := h.historyClient.TerminateWorkflowExecution(ctx, request) + return err +} + +func (h *InvokerExecuteTaskHandler) cancelWorkflow( + ctx context.Context, + scheduler *Scheduler, + target *commonpb.WorkflowExecution, +) error { + request := &historyservice.RequestCancelWorkflowExecutionRequest{ + NamespaceId: scheduler.NamespaceId, + CancelRequest: &workflowservice.RequestCancelWorkflowExecutionRequest{ + Namespace: scheduler.Namespace, + WorkflowExecution: &commonpb.WorkflowExecution{WorkflowId: target.WorkflowId}, + Reason: "cancelled by schedule overlap policy", + Identity: scheduler.identity(), + FirstExecutionRunId: target.RunId, + }, + } + _, err := h.historyClient.RequestCancelWorkflowExecution(ctx, request) + return err +} + +// getRateLimiterPermission returns a delay for which the caller should wait +// before proceeding. If an error is returned, execution should not proceed, and +// reservation should be retried. +func (h *InvokerExecuteTaskHandler) getRateLimiterPermission() (delay time.Duration, err error) { + // For now, we're only going to rate limit via APS. + return +} + +func isAlreadyStartedError(err error) bool { + var expectedErr *serviceerror.WorkflowExecutionAlreadyStarted + return errors.As(err, &expectedErr) +} + +func isRateLimitedError(err error) (time.Duration, bool) { + var expectedErr *rateLimitedError + if errors.As(err, &expectedErr) { + return expectedErr.delay, true + } + return 0, false +} + +func isRetryableError(err error) bool { + _, rateLimited := isRateLimitedError(err) + return !errors.Is(err, errRetryLimitExceeded) && + (rateLimited || + common.IsServiceTransientError(err) || + common.IsContextDeadlineExceededErr(err)) +} + +func newRateLimitedError(delay time.Duration) error { + return &rateLimitedError{delay} +} + +func (r *rateLimitedError) Error() string { + return fmt.Sprintf("rate limited for %s", r.delay) +} + +func (h *InvokerExecuteTaskHandler) newInvokerTaskHandlerContext( + ctx context.Context, + scheduler *Scheduler, +) invokerTaskHandlerContext { + tweakables := h.config.Tweakables(scheduler.Namespace) + maxActions := tweakables.MaxActionsPerExecution + + return invokerTaskHandlerContext{ + Context: ctx, + actionsTaken: 0, + maxActions: maxActions, + } +} + +func (i invokerTaskHandlerContext) Clone() invokerTaskHandlerContext { + return invokerTaskHandlerContext{ + Context: i.Context, + actionsTaken: i.actionsTaken, + maxActions: i.maxActions, + } +} diff --git a/chasm/lib/scheduler/library.go b/chasm/lib/scheduler/library.go new file mode 100644 index 00000000000..183dd486af4 --- /dev/null +++ b/chasm/lib/scheduler/library.go @@ -0,0 +1,105 @@ +package scheduler + +import ( + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "google.golang.org/grpc" +) + +type ( + Library struct { + chasm.UnimplementedLibrary + + handler *handler + + SchedulerIdleTaskHandler *SchedulerIdleTaskHandler + SchedulerCallbacksTaskHandler *SchedulerCallbacksTaskHandler + GeneratorTaskHandler *GeneratorTaskHandler + InvokerExecuteTaskHandler *InvokerExecuteTaskHandler + InvokerProcessBufferTaskHandler *InvokerProcessBufferTaskHandler + BackfillerTaskHandler *BackfillerTaskHandler + MigrateToWorkflowTaskHandler *SchedulerMigrateToWorkflowTaskHandler + } +) + +// NewNilLibrary creates a Library with all nil handlers. Useful for +// registration-only contexts like tdbg where no task execution is needed. +func NewNilLibrary() *Library { + return &Library{} +} + +func NewLibrary( + handler *handler, + SchedulerIdleTaskHandler *SchedulerIdleTaskHandler, + SchedulerCallbacksTaskHandler *SchedulerCallbacksTaskHandler, + GeneratorTaskHandler *GeneratorTaskHandler, + InvokerExecuteTaskHandler *InvokerExecuteTaskHandler, + InvokerProcessBufferTaskHandler *InvokerProcessBufferTaskHandler, + BackfillerTaskHandler *BackfillerTaskHandler, + MigrateToWorkflowTaskHandler *SchedulerMigrateToWorkflowTaskHandler, +) *Library { + return &Library{ + handler: handler, + SchedulerIdleTaskHandler: SchedulerIdleTaskHandler, + SchedulerCallbacksTaskHandler: SchedulerCallbacksTaskHandler, + GeneratorTaskHandler: GeneratorTaskHandler, + InvokerExecuteTaskHandler: InvokerExecuteTaskHandler, + InvokerProcessBufferTaskHandler: InvokerProcessBufferTaskHandler, + BackfillerTaskHandler: BackfillerTaskHandler, + MigrateToWorkflowTaskHandler: MigrateToWorkflowTaskHandler, + } +} + +func (l *Library) Name() string { + return chasm.SchedulerLibraryName +} + +func (l *Library) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*Scheduler]( + chasm.SchedulerComponentName, + chasm.WithBusinessIDAlias("ScheduleId"), + chasm.WithSearchAttributes(executionStatusSearchAttribute), + ), + chasm.NewRegistrableComponent[*Generator]("generator"), + chasm.NewRegistrableComponent[*Invoker]("invoker"), + chasm.NewRegistrableComponent[*Backfiller]("backfiller"), + } +} + +func (l *Library) Tasks() []*chasm.RegistrableTask { + return []*chasm.RegistrableTask{ + chasm.NewRegistrablePureTask( + "idle", + l.SchedulerIdleTaskHandler, + ), + chasm.NewRegistrableSideEffectTask( + "callbacks", + l.SchedulerCallbacksTaskHandler, + ), + chasm.NewRegistrablePureTask( + "generate", + l.GeneratorTaskHandler, + ), + chasm.NewRegistrableSideEffectTask( + "execute", + l.InvokerExecuteTaskHandler, + ), + chasm.NewRegistrablePureTask( + "processBuffer", + l.InvokerProcessBufferTaskHandler, + ), + chasm.NewRegistrablePureTask( + "backfill", + l.BackfillerTaskHandler, + ), + chasm.NewRegistrableSideEffectTask( + "migrateToWorkflow", + l.MigrateToWorkflowTaskHandler, + ), + } +} + +func (l *Library) RegisterServices(server *grpc.Server) { + server.RegisterService(&schedulerpb.SchedulerService_ServiceDesc, l.handler) +} diff --git a/chasm/lib/scheduler/migration/migration.go b/chasm/lib/scheduler/migration/migration.go new file mode 100644 index 00000000000..e9c9c3bbfd8 --- /dev/null +++ b/chasm/lib/scheduler/migration/migration.go @@ -0,0 +1,494 @@ +package migration + +import ( + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + schedulepb "go.temporal.io/api/schedule/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + schedulerpb "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common" + schedulescommon "go.temporal.io/server/common/schedules" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// LegacyToCreateFromMigrationStateRequest converts legacy (workflow-backed) scheduler +// state to a CreateFromMigrationStateRequest proto. This is the primary V1-to-V2 +// migration function. +// +// The migrationTime parameter is used for initializing timestamps that don't have a +// direct mapping from V1 state (e.g., StartTime for running workflows). +// +// State preserved during migration: +// - Schedule spec, action, policies, and state +// - ScheduleInfo metadata (action counts, create/update times) +// - ConflictToken (for optimistic concurrency control) +// - Buffered starts (with V2-specific fields like RequestId, WorkflowId populated) +// - Running workflows (converted to BufferedStarts with RunId/StartTime set) +// - Recent actions (converted to BufferedStarts with Completed field set) +// - Ongoing backfills (converted to Backfiller components) +// - Last completion result and continued failure +// - High water mark (becomes Generator.LastProcessedTime) +// - Search attributes and memo +// +// Note: In V2, RunningWorkflows and RecentActions are computed on-demand from +// BufferedStarts by the Invoker, rather than being stored separately in ScheduleInfo. +func LegacyToCreateFromMigrationStateRequest( + schedule *schedulepb.Schedule, + info *schedulepb.ScheduleInfo, + state *schedulespb.InternalState, + searchAttributes *commonpb.SearchAttributes, + memo *commonpb.Memo, + migrationTime time.Time, +) *schedulerpb.CreateFromMigrationStateRequest { + // V2 computes RunningWorkflows/RecentActions on-demand from BufferedStarts + infoClone := common.CloneProto(info) + infoClone.RunningWorkflows = nil + infoClone.RecentActions = nil + + schedulerState := &schedulerpb.SchedulerState{ + Schedule: common.CloneProto(schedule), + Info: infoClone, + Namespace: state.Namespace, + NamespaceId: state.NamespaceId, + ScheduleId: state.ScheduleId, + ConflictToken: state.ConflictToken, + Closed: false, + } + + generatorState := &schedulerpb.GeneratorState{ + LastProcessedTime: common.CloneProto(state.LastProcessedTime), + FutureActionTimes: nil, // Regenerated by GeneratorTask + } + + pendingBufferedStarts := convertBufferedStartsLegacyToCHASM( + state.BufferedStarts, + state.NamespaceId, + state.ScheduleId, + state.ConflictToken, + getWorkflowID(schedule), + ) + + runningBufferedStarts := convertRunningWorkflowsToBufferedStarts( + info.RunningWorkflows, + state.NamespaceId, + state.ScheduleId, + state.ConflictToken, + migrationTime, + ) + + recentActionsBufferedStarts := convertRecentActionsToBufferedStarts( + info.RecentActions, + info.RunningWorkflows, + state.NamespaceId, + state.ScheduleId, + state.ConflictToken, + migrationTime, + ) + + allBufferedStarts := append(pendingBufferedStarts, runningBufferedStarts...) + allBufferedStarts = append(allBufferedStarts, recentActionsBufferedStarts...) + + invokerState := &schedulerpb.InvokerState{ + BufferedStarts: allBufferedStarts, + LastProcessedTime: timestamppb.New(migrationTime), + } + + backfillers := convertBackfillsLegacyToCHASM(state.OngoingBackfills) + lastCompletion := convertLastCompletionLegacyToCHASM(state.LastCompletionResult, state.ContinuedFailure) + + return &schedulerpb.CreateFromMigrationStateRequest{ + NamespaceId: state.NamespaceId, + State: &schedulerpb.SchedulerMigrationState{ + SchedulerState: schedulerState, + GeneratorState: generatorState, + InvokerState: invokerState, + Backfillers: backfillers, + LastCompletionResult: lastCompletion, + SearchAttributes: searchAttributes.GetIndexedFields(), + Memo: memo.GetFields(), + }, + } +} + +// CHASMToLegacyStartScheduleArgs converts CHASM scheduler state to V1 StartScheduleArgs. +// This is the primary V2-to-V1 migration function. The migrationTime parameter is used +// to initialize missing timestamps. +func CHASMToLegacyStartScheduleArgs( + scheduler *schedulerpb.SchedulerState, + generator *schedulerpb.GeneratorState, + invoker *schedulerpb.InvokerState, + backfillers map[string]*schedulerpb.BackfillerState, + lastCompletionResult *schedulerpb.LastCompletionResult, + searchAttributes map[string]*commonpb.Payload, + memo map[string]*commonpb.Payload, + migrationTime time.Time, +) *schedulespb.StartScheduleArgs { + schedulerState := common.CloneProto(scheduler) + if schedulerState == nil { + schedulerState = &schedulerpb.SchedulerState{} + } + + schedule := common.CloneProto(schedulerState.Schedule) + if schedule == nil { + schedule = &schedulepb.Schedule{} + } + + info := common.CloneProto(schedulerState.Info) + if info == nil { + info = &schedulepb.ScheduleInfo{} + } + + var invokerBuffered []*schedulespb.BufferedStart + if invoker != nil { + invokerBuffered = invoker.GetBufferedStarts() + } + bufferedStarts, running, recent := splitBufferedStartsForLegacy(invokerBuffered) + ongoingBackfills, triggerStarts := convertBackfillersCHASMToLegacy(backfillers, migrationTime) + bufferedStarts = append(bufferedStarts, triggerStarts...) + + var generatorLastProcessed *timestamppb.Timestamp + if generator != nil { + generatorLastProcessed = generator.GetLastProcessedTime() + } + lastProcessedTime := common.CloneProto(generatorLastProcessed) + if lastProcessedTime == nil { + lastProcessedTime = timestamppb.New(migrationTime) + } + + resultPayloads, continuedFailure := convertLastCompletionCHASMToLegacy(lastCompletionResult) + + info.RunningWorkflows = running + info.RecentActions = recent + + state := &schedulespb.InternalState{ + Namespace: schedulerState.Namespace, + NamespaceId: schedulerState.NamespaceId, + ScheduleId: schedulerState.ScheduleId, + LastProcessedTime: lastProcessedTime, + BufferedStarts: bufferedStarts, + OngoingBackfills: ongoingBackfills, + LastCompletionResult: resultPayloads, + ContinuedFailure: continuedFailure, + ConflictToken: schedulerState.ConflictToken, + NeedRefresh: len(running) > 0, + } + + return &schedulespb.StartScheduleArgs{ + Schedule: schedule, + Info: info, + State: state, + } +} + +// convertBufferedStartsLegacyToCHASM transforms V1 buffered starts to V2 format. +// V2 requires request_id, workflow_id, attempt, and backoff_time fields. +func convertBufferedStartsLegacyToCHASM( + v1Starts []*schedulespb.BufferedStart, + namespaceID, scheduleID string, + conflictToken int64, + baseWorkflowID string, +) []*schedulespb.BufferedStart { + if len(v1Starts) == 0 { + return nil + } + + v2Starts := make([]*schedulespb.BufferedStart, len(v1Starts)) + for i, v1Start := range v1Starts { + v2Start := common.CloneProto(v1Start) + + if v2Start.RequestId == "" { + v2Start.RequestId = schedulescommon.GenerateRequestID( + namespaceID, + scheduleID, + conflictToken, + "migrated", + v1Start.GetNominalTime().AsTime(), + v1Start.GetActualTime().AsTime(), + ) + } + + if v2Start.WorkflowId == "" { + v2Start.WorkflowId = schedulescommon.GenerateWorkflowID( + baseWorkflowID, + v1Start.GetNominalTime().AsTime(), + ) + } + + v2Start.Attempt = 0 + v2Start.BackoffTime = nil + + v2Starts[i] = v2Start + } + + return v2Starts +} + +// convertRunningWorkflowsToBufferedStarts converts V1's RunningWorkflows list to V2's +// BufferedStarts format. In V2, running workflows are represented as BufferedStarts with +// RunId and StartTime populated, and Completed field empty. +func convertRunningWorkflowsToBufferedStarts( + runningWorkflows []*commonpb.WorkflowExecution, + namespaceID, scheduleID string, + conflictToken int64, + migrationTime time.Time, +) []*schedulespb.BufferedStart { + if len(runningWorkflows) == 0 { + return nil + } + + bufferedStarts := make([]*schedulespb.BufferedStart, len(runningWorkflows)) + for i, wf := range runningWorkflows { + bufferedStarts[i] = &schedulespb.BufferedStart{ + NominalTime: timestamppb.New(migrationTime), + ActualTime: timestamppb.New(migrationTime), + StartTime: timestamppb.New(migrationTime), + WorkflowId: wf.WorkflowId, + RunId: wf.RunId, + // RequestId will be used with AttachRequestID to register Nexus + // callbacks for tracking workflow completion after migration. + // Include the RunId in the tag to ensure each running workflow + // gets a unique RequestId (important for ALLOW_ALL overlap + // policy where multiple workflows may be running concurrently). + RequestId: schedulescommon.GenerateRequestID( + namespaceID, + scheduleID, + conflictToken, + "migrated-running-"+wf.RunId, + migrationTime, + migrationTime, + ), + Attempt: 1, + Completed: nil, + // Migrated running workflows must have a Nexus callback attached once the + // migrated schedule target has been created. + HasCallback: false, + } + } + + return bufferedStarts +} + +// convertRecentActionsToBufferedStarts converts V1's RecentActions list to V2's +// BufferedStarts format. In V2, completed actions are represented as BufferedStarts with +// RunId, StartTime, and Completed fields all populated. +// +// runningWorkflows is the set of currently running workflow executions (from +// info.RunningWorkflows). These are excluded because they are already converted +// separately by convertRunningWorkflowsToBufferedStarts. In V1, recordAction +// adds the same workflow to both RecentActions and RunningWorkflows, so without +// this filter the same execution would appear twice in the CHASM BufferedStarts. +func convertRecentActionsToBufferedStarts( + recentActions []*schedulepb.ScheduleActionResult, + runningWorkflows []*commonpb.WorkflowExecution, + namespaceID, scheduleID string, + conflictToken int64, + migrationTime time.Time, +) []*schedulespb.BufferedStart { + if len(recentActions) == 0 { + return nil + } + + // Build a set of running workflow run IDs to exclude from recent actions, + // since those are already converted by convertRunningWorkflowsToBufferedStarts. + runningRunIDs := make(map[string]struct{}, len(runningWorkflows)) + for _, wf := range runningWorkflows { + runningRunIDs[wf.GetRunId()] = struct{}{} + } + + bufferedStarts := make([]*schedulespb.BufferedStart, 0, len(recentActions)) + for _, action := range recentActions { + if action.StartWorkflowResult == nil { + continue + } + + // Skip actions for workflows that are still running — those are handled + // by convertRunningWorkflowsToBufferedStarts. + if _, ok := runningRunIDs[action.StartWorkflowResult.GetRunId()]; ok { + continue + } + + bufferedStarts = append(bufferedStarts, &schedulespb.BufferedStart{ + NominalTime: action.ScheduleTime, + ActualTime: action.ActualTime, + StartTime: action.ActualTime, + WorkflowId: action.StartWorkflowResult.WorkflowId, + RunId: action.StartWorkflowResult.RunId, + RequestId: schedulescommon.GenerateRequestID( + namespaceID, + scheduleID, + conflictToken, + "migrated-completed", + action.ScheduleTime.AsTime(), + action.ActualTime.AsTime(), + ), + Attempt: 1, + Completed: &schedulespb.CompletedResult{ + Status: action.StartWorkflowStatus, + CloseTime: timestamppb.New(migrationTime), + }, + }) + } + + return bufferedStarts +} + +func convertBackfillsLegacyToCHASM( + legacyBackfills []*schedulepb.BackfillRequest) map[string]*schedulerpb.BackfillerState { + if len(legacyBackfills) == 0 { + return nil + } + + backfillers := make(map[string]*schedulerpb.BackfillerState, len(legacyBackfills)) + for _, v1Backfill := range legacyBackfills { + backfillID := schedulescommon.GenerateBackfillerID() + + backfillers[backfillID] = &schedulerpb.BackfillerState{ + Request: &schedulerpb.BackfillerState_BackfillRequest{ + BackfillRequest: common.CloneProto(v1Backfill), + }, + BackfillId: backfillID, + LastProcessedTime: nil, + Attempt: 0, + } + } + + return backfillers +} + +// convertLastCompletionLegacyToCHASM transforms V1 completion result to V2 format. +// V1 uses Payloads (plural), V2 uses single Payload. +func convertLastCompletionLegacyToCHASM( + result *commonpb.Payloads, + failure *failurepb.Failure, +) *schedulerpb.LastCompletionResult { + if result == nil && failure == nil { + return nil + } + + lcr := &schedulerpb.LastCompletionResult{} + if result != nil && len(result.Payloads) > 0 { + lcr.Success = common.CloneProto(result.Payloads[0]) + } + + if failure != nil { + lcr.Failure = common.CloneProto(failure) + } + + return lcr +} + +// getWorkflowID extracts the workflow ID from the schedule's action. +// This is the workflow ID specified in the schedule spec. During workflow start +// generation, nominal time is suffixed to this ID. +func getWorkflowID(schedule *schedulepb.Schedule) string { + if schedule == nil { + return "" + } + return schedule.GetAction().GetStartWorkflow().GetWorkflowId() +} + +func splitBufferedStartsForLegacy( + starts []*schedulespb.BufferedStart, +) ([]*schedulespb.BufferedStart, []*commonpb.WorkflowExecution, []*schedulepb.ScheduleActionResult) { + if len(starts) == 0 { + return nil, nil, nil + } + + var buffered []*schedulespb.BufferedStart + var running []*commonpb.WorkflowExecution + var recent []*schedulepb.ScheduleActionResult + + for _, start := range starts { + if start.GetRunId() == "" && start.GetCompleted() == nil { + buffered = append(buffered, common.CloneProto(start)) + continue + } + + if start.GetRunId() == "" { + continue + } + + status := enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING + if start.GetCompleted() != nil { + status = start.GetCompleted().GetStatus() + } + + recent = append(recent, &schedulepb.ScheduleActionResult{ + ScheduleTime: start.GetActualTime(), + ActualTime: start.GetStartTime(), + StartWorkflowResult: &commonpb.WorkflowExecution{ + WorkflowId: start.GetWorkflowId(), + RunId: start.GetRunId(), + }, + StartWorkflowStatus: status, + }) + + if start.GetCompleted() == nil { + running = append(running, &commonpb.WorkflowExecution{ + WorkflowId: start.GetWorkflowId(), + RunId: start.GetRunId(), + }) + } + } + + return buffered, running, recent +} + +func convertBackfillersCHASMToLegacy( + backfillers map[string]*schedulerpb.BackfillerState, + migrationTime time.Time, +) ([]*schedulepb.BackfillRequest, []*schedulespb.BufferedStart) { + if len(backfillers) == 0 { + return nil, nil + } + + var ongoing []*schedulepb.BackfillRequest + var triggerStarts []*schedulespb.BufferedStart + + for _, backfiller := range backfillers { + if request := backfiller.GetBackfillRequest(); request != nil { + backfill := common.CloneProto(request) + if backfiller.GetAttempt() > 0 && backfiller.GetLastProcessedTime() != nil { + backfill.StartTime = common.CloneProto(backfiller.GetLastProcessedTime()) + } + ongoing = append(ongoing, backfill) + continue + } + + if trigger := backfiller.GetTriggerRequest(); trigger != nil { + when := backfiller.GetLastProcessedTime() + if when == nil { + when = timestamppb.New(migrationTime) + } + triggerStarts = append(triggerStarts, &schedulespb.BufferedStart{ + NominalTime: when, + ActualTime: when, + DesiredTime: when, + OverlapPolicy: trigger.GetOverlapPolicy(), + Manual: true, + }) + } + } + + return ongoing, triggerStarts +} + +func convertLastCompletionCHASMToLegacy( + result *schedulerpb.LastCompletionResult, +) (*commonpb.Payloads, *failurepb.Failure) { + if result == nil { + return nil, nil + } + + var payloads *commonpb.Payloads + if result.Success != nil { + payloads = &commonpb.Payloads{ + Payloads: []*commonpb.Payload{common.CloneProto(result.Success)}, + } + } + + return payloads, common.CloneProto(result.Failure) +} diff --git a/chasm/lib/scheduler/migration/migration_test.go b/chasm/lib/scheduler/migration/migration_test.go new file mode 100644 index 00000000000..41a17192fec --- /dev/null +++ b/chasm/lib/scheduler/migration/migration_test.go @@ -0,0 +1,336 @@ +package migration + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + schedulepb "go.temporal.io/api/schedule/v1" + workflowpb "go.temporal.io/api/workflow/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + schedulerpb "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func newTestSchedule() *schedulepb.Schedule { + return &schedulepb.Schedule{ + Spec: &schedulepb.ScheduleSpec{ + Interval: []*schedulepb.IntervalSpec{{Interval: durationpb.New(time.Minute)}}, + }, + Action: &schedulepb.ScheduleAction{ + Action: &schedulepb.ScheduleAction_StartWorkflow{ + StartWorkflow: &workflowpb.NewWorkflowExecutionInfo{ + WorkflowId: "test-wf", + WorkflowType: &commonpb.WorkflowType{Name: "test-wf-type"}, + }, + }, + }, + Policies: &schedulepb.SchedulePolicies{CatchupWindow: durationpb.New(5 * time.Minute)}, + State: &schedulepb.ScheduleState{}, + } +} + +func TestLegacyToCreateFromMigrationStateRequest(t *testing.T) { + now := time.Now().UTC() + state := &schedulespb.InternalState{ + Namespace: "test-ns", + NamespaceId: "test-ns-id", + ScheduleId: "test-sched-id", + LastProcessedTime: timestamppb.New(now), + ConflictToken: 42, + BufferedStarts: []*schedulespb.BufferedStart{ + { + NominalTime: timestamppb.New(now), + ActualTime: timestamppb.New(now.Add(time.Second)), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, + }, + }, + OngoingBackfills: []*schedulepb.BackfillRequest{ + { + StartTime: timestamppb.New(now.Add(-time.Hour)), + EndTime: timestamppb.New(now), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + }, + LastCompletionResult: &commonpb.Payloads{ + Payloads: []*commonpb.Payload{{Data: []byte("result")}}, + }, + ContinuedFailure: &failurepb.Failure{Message: "last failure"}, + } + info := &schedulepb.ScheduleInfo{ + ActionCount: 5, + RunningWorkflows: []*commonpb.WorkflowExecution{ + {WorkflowId: "wf-1", RunId: "run-1"}, + }, + RecentActions: []*schedulepb.ScheduleActionResult{ + { + ScheduleTime: timestamppb.New(now.Add(-time.Hour)), + ActualTime: timestamppb.New(now.Add(-time.Millisecond)), + StartWorkflowResult: &commonpb.WorkflowExecution{WorkflowId: "wf-2", RunId: "run-2"}, + StartWorkflowStatus: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + }, + }, + } + searchAttrs := &commonpb.SearchAttributes{IndexedFields: map[string]*commonpb.Payload{"Attr": {Data: []byte("value")}}} + memo := &commonpb.Memo{Fields: map[string]*commonpb.Payload{"Memo": {Data: []byte("memo")}}} + + req := LegacyToCreateFromMigrationStateRequest(newTestSchedule(), info, state, searchAttrs, memo, now) + + require.NotNil(t, req) + require.Equal(t, "test-ns-id", req.NamespaceId) + + migrationState := req.State + // Scheduler state + require.NotNil(t, migrationState) + require.NotNil(t, migrationState.SchedulerState) + require.False(t, migrationState.SchedulerState.Schedule.State.Paused, "schedule should preserve unpaused state") + require.Equal(t, "test-ns", migrationState.SchedulerState.Namespace) + require.Equal(t, "test-ns-id", migrationState.SchedulerState.NamespaceId) + require.Equal(t, "test-sched-id", migrationState.SchedulerState.ScheduleId) + require.Equal(t, int64(42), migrationState.SchedulerState.ConflictToken) + require.False(t, migrationState.SchedulerState.Closed) + + // Generator state + require.NotNil(t, migrationState.GeneratorState) + require.Equal(t, now, migrationState.GeneratorState.LastProcessedTime.AsTime()) + + // Invoker state - buffered starts + running workflows + completed + require.NotNil(t, migrationState.InvokerState) + require.Len(t, migrationState.InvokerState.BufferedStarts, 3) // 1 buffered + 1 running + 1 completed + + var buffered, running, completed int + for _, start := range migrationState.InvokerState.BufferedStarts { + require.NotEmpty(t, start.RequestId) + require.NotEmpty(t, start.WorkflowId) + switch { + case start.RunId == "" && start.Completed == nil: + buffered++ + case start.RunId != "" && start.Completed == nil: + running++ + require.Equal(t, "wf-1", start.WorkflowId) + require.Equal(t, "run-1", start.RunId) + require.False(t, start.HasCallback) + case start.Completed != nil: + completed++ + require.Equal(t, "wf-2", start.WorkflowId) + require.Equal(t, "run-2", start.RunId) + require.Equal(t, enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, start.Completed.Status) + default: + t.Fatalf("unexpected buffered start state: RunId=%q, Completed=%v", start.RunId, start.Completed) + } + } + require.Equal(t, 1, buffered, "expected 1 pending buffered start") + require.Equal(t, 1, running, "expected 1 running workflow") + require.Equal(t, 1, completed, "expected 1 completed workflow") + + // Backfillers + require.Len(t, migrationState.Backfillers, 1) + for id, backfiller := range migrationState.Backfillers { + require.Equal(t, id, backfiller.BackfillId) + require.NotNil(t, backfiller.GetBackfillRequest()) + require.Equal(t, now.Add(-time.Hour), backfiller.GetBackfillRequest().StartTime.AsTime()) + } + + // Last completion result + require.NotNil(t, migrationState.LastCompletionResult) + require.Equal(t, []byte("result"), migrationState.LastCompletionResult.Success.Data) + require.Equal(t, "last failure", migrationState.LastCompletionResult.Failure.Message) + + // Search attributes and memo + require.Equal(t, searchAttrs.GetIndexedFields(), migrationState.SearchAttributes) + require.Equal(t, memo.GetFields(), migrationState.Memo) +} + +func TestCHASMToLegacyStartScheduleArgs(t *testing.T) { + now := time.Date(2024, 6, 1, 12, 0, 0, 0, time.UTC) + scheduler := &schedulerpb.SchedulerState{ + Namespace: "ns", + NamespaceId: "ns-id", + ScheduleId: "sched-id", + ConflictToken: 7, + Schedule: newTestSchedule(), + Info: &schedulepb.ScheduleInfo{ActionCount: 12}, + } + generator := &schedulerpb.GeneratorState{LastProcessedTime: timestamppb.New(now.Add(-time.Minute))} + invoker := &schedulerpb.InvokerState{ + BufferedStarts: []*schedulespb.BufferedStart{ + { + NominalTime: timestamppb.New(now.Add(-10 * time.Minute)), + ActualTime: timestamppb.New(now.Add(-9 * time.Minute)), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, + }, + { + NominalTime: timestamppb.New(now.Add(-5 * time.Minute)), + ActualTime: timestamppb.New(now.Add(-5 * time.Minute)), + StartTime: timestamppb.New(now.Add(-5 * time.Minute)), + WorkflowId: "wf-running", + RunId: "run-running", + }, + { + NominalTime: timestamppb.New(now.Add(-20 * time.Minute)), + ActualTime: timestamppb.New(now.Add(-20 * time.Minute)), + StartTime: timestamppb.New(now.Add(-20 * time.Minute)), + WorkflowId: "wf-done", + RunId: "run-done", + Completed: &schedulespb.CompletedResult{ + Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + CloseTime: timestamppb.New(now.Add(-10 * time.Minute)), + }, + }, + }, + } + backfillProgress := timestamppb.New(now.Add(-2 * time.Hour)) + backfillers := map[string]*schedulerpb.BackfillerState{ + "bf-1": { + BackfillId: "bf-1", + LastProcessedTime: backfillProgress, + Attempt: 2, + Request: &schedulerpb.BackfillerState_BackfillRequest{ + BackfillRequest: &schedulepb.BackfillRequest{ + StartTime: timestamppb.New(now.Add(-6 * time.Hour)), + EndTime: timestamppb.New(now.Add(-1 * time.Hour)), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + }, + }, + "trigger-1": { + BackfillId: "trigger-1", + LastProcessedTime: timestamppb.New(now.Add(-30 * time.Second)), + Request: &schedulerpb.BackfillerState_TriggerRequest{ + TriggerRequest: &schedulepb.TriggerImmediatelyRequest{ + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + }, + }, + } + lastCompletion := &schedulerpb.LastCompletionResult{ + Success: &commonpb.Payload{Data: []byte("ok")}, + Failure: &failurepb.Failure{Message: "last failure"}, + } + + args := CHASMToLegacyStartScheduleArgs(scheduler, generator, invoker, backfillers, lastCompletion, nil, nil, now) + + require.Equal(t, "ns-id", args.State.NamespaceId) + require.Equal(t, "sched-id", args.State.ScheduleId) + require.Equal(t, int64(7), args.State.ConflictToken) + require.Equal(t, generator.LastProcessedTime.AsTime(), args.State.LastProcessedTime.AsTime()) + + require.Len(t, args.Info.RunningWorkflows, 1) + require.Equal(t, "wf-running", args.Info.RunningWorkflows[0].WorkflowId) + require.Equal(t, "run-running", args.Info.RunningWorkflows[0].RunId) + + require.Len(t, args.Info.RecentActions, 2) + require.Len(t, args.State.BufferedStarts, 2) // pending + trigger + require.Len(t, args.State.OngoingBackfills, 1) + require.Equal(t, backfillProgress.AsTime(), args.State.OngoingBackfills[0].StartTime.AsTime()) + + require.NotNil(t, args.State.LastCompletionResult) + require.Equal(t, []byte("ok"), args.State.LastCompletionResult.Payloads[0].Data) + require.Equal(t, "last failure", args.State.ContinuedFailure.Message) + + var triggerFound bool + for _, start := range args.State.BufferedStarts { + if start.Manual { + triggerFound = true + } + } + require.True(t, triggerFound) +} + +func TestLegacyToCreateFromMigrationStateRequest_DeduplicatesRunningWorkflows(t *testing.T) { + // V1's recordAction puts the same workflow in both RecentActions (with + // RUNNING status) and RunningWorkflows. The migration should not create + // duplicate BufferedStarts for the same execution. + now := time.Now().UTC() + state := &schedulespb.InternalState{ + Namespace: "test-ns", + NamespaceId: "test-ns-id", + ScheduleId: "test-sched-id", + ConflictToken: 1, + } + info := &schedulepb.ScheduleInfo{ + RunningWorkflows: []*commonpb.WorkflowExecution{ + {WorkflowId: "wf-1", RunId: "run-1"}, + }, + RecentActions: []*schedulepb.ScheduleActionResult{ + { + // Completed action - should be kept. + ScheduleTime: timestamppb.New(now.Add(-2 * time.Hour)), + ActualTime: timestamppb.New(now.Add(-2 * time.Hour)), + StartWorkflowResult: &commonpb.WorkflowExecution{WorkflowId: "wf-old", RunId: "run-old"}, + StartWorkflowStatus: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + }, + { + // Same workflow as RunningWorkflows - should be deduplicated. + ScheduleTime: timestamppb.New(now.Add(-time.Hour)), + ActualTime: timestamppb.New(now.Add(-time.Hour)), + StartWorkflowResult: &commonpb.WorkflowExecution{WorkflowId: "wf-1", RunId: "run-1"}, + StartWorkflowStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + }, + }, + } + + req := LegacyToCreateFromMigrationStateRequest(newTestSchedule(), info, state, nil, nil, now) + + // Should have 2 BufferedStarts: 1 running (from RunningWorkflows) + 1 completed (from RecentActions). + // The running entry in RecentActions should be excluded since it duplicates RunningWorkflows. + require.Len(t, req.State.InvokerState.BufferedStarts, 2) + + var running, completed int + for _, start := range req.State.InvokerState.BufferedStarts { + switch { + case start.RunId != "" && start.Completed == nil: + running++ + require.Equal(t, "wf-1", start.WorkflowId) + require.Equal(t, "run-1", start.RunId) + case start.Completed != nil: + completed++ + require.Equal(t, "wf-old", start.WorkflowId) + require.Equal(t, "run-old", start.RunId) + default: + t.Fatalf("unexpected buffered start state: RunId=%q, Completed=%v", start.RunId, start.Completed) + } + } + require.Equal(t, 1, running, "expected exactly 1 running workflow (not duplicated)") + require.Equal(t, 1, completed, "expected 1 completed workflow from recent actions") + + // Verify the round-trip: converting back to legacy should also have no + // duplicate RunIds in RecentActions. + _, _, recentActions := splitBufferedStartsForLegacy(req.State.InvokerState.BufferedStarts) + seen := make(map[string]bool) + for _, action := range recentActions { + runID := action.GetStartWorkflowResult().GetRunId() + require.False(t, seen[runID], "duplicate RunId %q in round-tripped RecentActions", runID) + seen[runID] = true + } +} + +func TestConvertRunningWorkflowsToBufferedStarts_UniqueRequestIDs(t *testing.T) { + // With ALLOW_ALL overlap policy, multiple workflows can be running + // concurrently. Each must get a unique RequestId so that + // recordCompletedAction matches the correct BufferedStart. + now := time.Now().UTC() + running := []*commonpb.WorkflowExecution{ + {WorkflowId: "wf-1", RunId: "run-aaa"}, + {WorkflowId: "wf-2", RunId: "run-bbb"}, + {WorkflowId: "wf-3", RunId: "run-ccc"}, + } + + starts := convertRunningWorkflowsToBufferedStarts( + running, "ns-id", "sched-id", 1, now, + ) + require.Len(t, starts, 3) + + requestIDs := make(map[string]string) // requestId -> runId + for _, start := range starts { + if prev, ok := requestIDs[start.RequestId]; ok { + t.Fatalf("duplicate RequestId %q: used by both RunId %q and %q", + start.RequestId, prev, start.RunId) + } + requestIDs[start.RequestId] = start.RunId + } +} diff --git a/chasm/lib/scheduler/proto/v1/message.proto b/chasm/lib/scheduler/proto/v1/message.proto new file mode 100644 index 00000000000..999dd406dc9 --- /dev/null +++ b/chasm/lib/scheduler/proto/v1/message.proto @@ -0,0 +1,122 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.scheduler.proto.v1; + +import "google/protobuf/timestamp.proto"; +import "temporal/api/common/v1/message.proto"; +import "temporal/api/failure/v1/message.proto"; +import "temporal/api/schedule/v1/message.proto"; +import "temporal/server/api/schedule/v1/message.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb;schedulerpb"; + +// CHASM scheduler top-level state. +message SchedulerState { + // Scheduler request parameters and metadata. + temporal.api.schedule.v1.Schedule schedule = 2; + temporal.api.schedule.v1.ScheduleInfo info = 3; + + // State common to all generators is stored in the top-level machine. + string namespace = 5; + string namespace_id = 6; + string schedule_id = 7; + + // Implemented as a sequence number. Used for optimistic locking against + // update requests. + int64 conflict_token = 8; + + // The closed flag is set true after a schedule completes, and the idle timer + // expires. + bool closed = 9; + + // When true, this scheduler is a sentinel that exists only to reserve the + // schedule ID. All API operations return NotFound. + bool sentinel = 10; + + // Set when a migration to workflow-backed scheduler (V1) is pending. + // Unpause operations are blocked while this is set. + WorkflowMigrationState workflow_migration = 11; +} + +// WorkflowMigrationState tracks the state of an in-progress V2-to-V1 migration. +message WorkflowMigrationState { + // The schedule's paused state before migration was initiated. Used to + // restore the correct paused state when passing state to the V1 workflow. + bool pre_migration_paused = 1; + + // The schedule's notes before migration was initiated. + string pre_migration_notes = 2; +} + +// CHASM scheduler's Generator internal state. +message GeneratorState { + // High water mark. + google.protobuf.Timestamp last_processed_time = 3; + + // A list of upcoming times an action will be triggered. + repeated google.protobuf.Timestamp future_action_times = 4; +} + +// CHASM scheduler's Invoker internal state. +message InvokerState { + // Buffered starts that will be started by the Invoker. + repeated temporal.server.api.schedule.v1.BufferedStart buffered_starts = 2; + + // Workflow executions that will be cancelled due to overlap policy. + repeated temporal.api.common.v1.WorkflowExecution cancel_workflows = 3; + + // Workflow executions that will be terminated due to overlap policy. + repeated temporal.api.common.v1.WorkflowExecution terminate_workflows = 4; + + // High water mark, used for evaluating when to fire tasks that are backing + // off from a retry. LastProcessedTime is stored as state so that task + // generation will be consistent, regardless of when generation occurs, such + // as after applying a replicated state (as opposed to evaluating based on + // present time). + google.protobuf.Timestamp last_processed_time = 5; + + reserved 6; +} + +// CHASM scheduler's Backfiller internal state. Backfill requests are 1:1 +// with Backfiller nodes. Backfiller nodes also handle immediate trigger requests. +message BackfillerState { + oneof request { + temporal.api.schedule.v1.BackfillRequest backfill_request = 1; + + // When set, immediately buffer a single manual action. + temporal.api.schedule.v1.TriggerImmediatelyRequest trigger_request = 2; + } + + // Every Backfiller should be assigned a unique ID upon creation, used + // for deduplication. + string backfill_id = 6; + + // High water mark. + google.protobuf.Timestamp last_processed_time = 7; + + // Attempt count, incremented when the buffer is full and the Backfiller + // needs to back off before retrying to fill. + int64 attempt = 8; +} + +// CHASM scheduler retains the payload data for the last completed workflow. Both +// last success and failure are stored simultaneously. +message LastCompletionResult { + temporal.api.common.v1.Payload success = 1; + temporal.api.failure.v1.Failure failure = 2; +} + +// SchedulerMigrationState is a stack-agnostic interchange format for migrating +// scheduler state between V1 (workflow-backed) and V2 (CHASM) implementations. +message SchedulerMigrationState { + SchedulerState scheduler_state = 1; + GeneratorState generator_state = 2; + InvokerState invoker_state = 3; + map backfillers = 4; + LastCompletionResult last_completion_result = 5; + + // Visibility data. + map search_attributes = 6; + map memo = 7; +} diff --git a/chasm/lib/scheduler/proto/v1/request_response.proto b/chasm/lib/scheduler/proto/v1/request_response.proto new file mode 100644 index 00000000000..9dab1efa974 --- /dev/null +++ b/chasm/lib/scheduler/proto/v1/request_response.proto @@ -0,0 +1,107 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.scheduler.proto.v1; + +import "chasm/lib/scheduler/proto/v1/message.proto"; +import "temporal/api/workflowservice/v1/request_response.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb;schedulerpb"; + +message CreateScheduleRequest { + // Internal namespace ID (UUID). + string namespace_id = 1; + + temporal.api.workflowservice.v1.CreateScheduleRequest frontend_request = 2; +} + +message CreateScheduleResponse { + temporal.api.workflowservice.v1.CreateScheduleResponse frontend_response = 1; +} + +message UpdateScheduleRequest { + // Internal namespace ID (UUID). + string namespace_id = 1; + + temporal.api.workflowservice.v1.UpdateScheduleRequest frontend_request = 2; +} + +message UpdateScheduleResponse { + temporal.api.workflowservice.v1.UpdateScheduleResponse frontend_response = 1; +} + +message PatchScheduleRequest { + // Internal namespace ID (UUID). + string namespace_id = 1; + + temporal.api.workflowservice.v1.PatchScheduleRequest frontend_request = 2; +} + +message PatchScheduleResponse { + temporal.api.workflowservice.v1.PatchScheduleResponse frontend_response = 1; +} + +message DeleteScheduleRequest { + // Internal namespace ID (UUID). + string namespace_id = 1; + + temporal.api.workflowservice.v1.DeleteScheduleRequest frontend_request = 2; +} + +message DeleteScheduleResponse { + temporal.api.workflowservice.v1.DeleteScheduleResponse frontend_response = 1; +} + +message DescribeScheduleRequest { + // Internal namespace ID (UUID). + string namespace_id = 1; + + temporal.api.workflowservice.v1.DescribeScheduleRequest frontend_request = 2; +} + +message DescribeScheduleResponse { + temporal.api.workflowservice.v1.DescribeScheduleResponse frontend_response = 1; +} + +message ListScheduleMatchingTimesRequest { + // Internal namespace ID (UUID). + string namespace_id = 1; + + temporal.api.workflowservice.v1.ListScheduleMatchingTimesRequest frontend_request = 2; +} + +message ListScheduleMatchingTimesResponse { + temporal.api.workflowservice.v1.ListScheduleMatchingTimesResponse frontend_response = 1; +} + +message CreateFromMigrationStateRequest { + // Internal namespace ID (UUID). + string namespace_id = 1; + + SchedulerMigrationState state = 2; +} + +message CreateFromMigrationStateResponse {} + +message CreateSentinelRequest { + // Internal namespace ID (UUID). + string namespace_id = 1; + + string namespace = 2; + + string schedule_id = 3; +} + +message CreateSentinelResponse {} + +message MigrateToWorkflowRequest { + // The namespace ID of the schedule to migrate. + string namespace_id = 1; + // The schedule ID to migrate from CHASM to workflow-backed. + string schedule_id = 2; + // The identity of the caller initiating the migration. + string identity = 3; + // A unique request ID for idempotency. + string request_id = 4; +} + +message MigrateToWorkflowResponse {} diff --git a/chasm/lib/scheduler/proto/v1/service.proto b/chasm/lib/scheduler/proto/v1/service.proto new file mode 100644 index 00000000000..65c21e949c8 --- /dev/null +++ b/chasm/lib/scheduler/proto/v1/service.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.scheduler.proto.v1; + +import "chasm/lib/scheduler/proto/v1/request_response.proto"; +import "temporal/server/api/common/v1/api_category.proto"; +import "temporal/server/api/routing/v1/extension.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb;schedulerpb"; + +service SchedulerService { + rpc CreateSchedule(CreateScheduleRequest) returns (CreateScheduleResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.schedule_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc UpdateSchedule(UpdateScheduleRequest) returns (UpdateScheduleResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.schedule_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc PatchSchedule(PatchScheduleRequest) returns (PatchScheduleResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.schedule_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc DeleteSchedule(DeleteScheduleRequest) returns (DeleteScheduleResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.schedule_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc DescribeSchedule(DescribeScheduleRequest) returns (DescribeScheduleResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.schedule_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + + rpc ListScheduleMatchingTimes(ListScheduleMatchingTimesRequest) returns (ListScheduleMatchingTimesResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "frontend_request.schedule_id"; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } + rpc CreateFromMigrationState(CreateFromMigrationStateRequest) returns (CreateFromMigrationStateResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "state.scheduler_state.schedule_id"; + } + + rpc CreateSentinel(CreateSentinelRequest) returns (CreateSentinelResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "schedule_id"; + } + rpc MigrateToWorkflow(MigrateToWorkflowRequest) returns (MigrateToWorkflowResponse) { + option (temporal.server.api.routing.v1.routing).business_id = "schedule_id"; + } +} diff --git a/chasm/lib/scheduler/proto/v1/tasks.proto b/chasm/lib/scheduler/proto/v1/tasks.proto new file mode 100644 index 00000000000..077e71fe0be --- /dev/null +++ b/chasm/lib/scheduler/proto/v1/tasks.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.scheduler.proto.v1; + +import "google/protobuf/duration.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb;schedulerpb"; + +// Fires when the scheduler's idle period has lapsed, and the scheduler should +// be closed. +message SchedulerIdleTask { + // Idle time total is set at time of task creation, so that if the dynamic config key + // controlling idle time changes, task validation will be aware. + google.protobuf.Duration idle_time_total = 1; +} + +// Ensures that callbacks for all running buffered starts are attached. Used only +// during migration from V1, as workflows started by CHASM scheduler are started +// with callbacks attached. +message SchedulerCallbacksTask {} + +// Buffers actions based on the schedule's specification. +message GeneratorTask {} + +// Processes buffered actions, deciding whether to execute, delay, or discard. +message InvokerProcessBufferTask {} + +// Drives execution of pending buffered actions to completion by starting, +// canceling, or terminating workflows. +message InvokerExecuteTask {} + +// Buffers actions based on a manually-requested backfill. +message BackfillerTask {} + +// Triggers migration from CHASM (V2) to workflow-backed (V1) scheduler. +message SchedulerMigrateToWorkflowTask {} diff --git a/chasm/lib/scheduler/scheduler.go b/chasm/lib/scheduler/scheduler.go new file mode 100644 index 00000000000..621e273b97c --- /dev/null +++ b/chasm/lib/scheduler/scheduler.go @@ -0,0 +1,953 @@ +package scheduler + +import ( + "bytes" + "fmt" + "strings" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + schedulepb "go.temporal.io/api/schedule/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/contextutil" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute/sadefs" + "go.temporal.io/server/common/util" + "go.temporal.io/server/service/worker/scheduler" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Scheduler is the root component of a CHASM scheduler tree. The rest of the +// tree will consist of 2 or more sub-components: +// - Generator: buffers actions according to the schedule specification +// - Invoker: executes buffered actions +// - Backfiller: buffers actions according to requested backfills +type Scheduler struct { + chasm.UnimplementedComponent + + // Persisted internal state, consisting of state relevant to all components in + // the scheduler tree. + *schedulerpb.SchedulerState + + // Last success/failure payloads, stored on this separate data node + // to minimize write traffic. + LastCompletionResult chasm.Field[*schedulerpb.LastCompletionResult] + + Generator chasm.Field[*Generator] + Invoker chasm.Field[*Invoker] + Backfillers chasm.Map[string, *Backfiller] // Backfill ID => *Backfiller + + Visibility chasm.Field[*chasm.Visibility] + + // Locally-cached state, invalidated whenever cacheConflictToken != ConflictToken. + cacheConflictToken int64 + compiledSpec *scheduler.CompiledSpec // compiledSpec is only ever replaced whole, not mutated. +} + +var ( + _ (chasm.VisibilitySearchAttributesProvider) = (*Scheduler)(nil) + _ (chasm.VisibilityMemoProvider) = (*Scheduler)(nil) +) + +var ( + executionStatusRunning = "Running" + executionStatusCompleted = "Completed" +) + +var executionStatusSearchAttribute = chasm.NewSearchAttributeKeyword("ExecutionStatus", chasm.SearchAttributeFieldLowCardinalityKeyword01) +var initialSerializedConflictToken = serializeConflictToken(scheduler.InitialConflictToken) + +const ( + // How many recent actions to keep on the Info.RecentActions list. + recentActionCount = 10 + + // Item limit per spec field on the ScheduleInfo memo. + listInfoSpecFieldLimit = 10 + + // Field in which the schedule's memo is stored. + visibilityMemoFieldInfo = "ScheduleInfo" + + // Maximum number of matching times to return. + maxListMatchingTimesCount = 1000 + + // Maximum number of backfillers allowed on a single scheduler. + maxBackfillers = 100 +) + +var ( + ErrConflictTokenMismatch = serviceerror.NewFailedPrecondition("mismatched conflict token") + ErrClosed = serviceerror.NewFailedPrecondition("schedule closed") + ErrTooManyBackfillers = serviceerror.NewFailedPrecondition("too many concurrent backfillers") + ErrInvalidQuery = serviceerror.NewInvalidArgument("missing or invalid query") + ErrSentinel = serviceerror.NewNotFound("schedule is a sentinel") + ErrSentinelBlocked = serviceerror.NewUnavailable("schedule is a sentinel; please retry after sentinel expires") + ErrMigrationPending = serviceerror.NewUnavailable("schedule has a pending migration to workflow; please retry later") +) + +// NewScheduler returns an initialized CHASM scheduler root component. +func NewScheduler( + ctx chasm.MutableContext, + namespace, namespaceID, scheduleID string, + input *schedulepb.Schedule, + patch *schedulepb.SchedulePatch, +) (*Scheduler, error) { + var zero time.Time + + sched := &Scheduler{ + SchedulerState: &schedulerpb.SchedulerState{ + Schedule: input, + Info: &schedulepb.ScheduleInfo{ + UpdateTime: timestamppb.New(zero), + }, + Namespace: namespace, + NamespaceId: namespaceID, + ScheduleId: scheduleID, + ConflictToken: scheduler.InitialConflictToken, + }, + cacheConflictToken: scheduler.InitialConflictToken, + Backfillers: make(chasm.Map[string, *Backfiller]), + LastCompletionResult: chasm.NewDataField(ctx, &schedulerpb.LastCompletionResult{}), + } + sched.setNullableFields() + sched.Info.CreateTime = timestamppb.New(ctx.Now(sched)) + + invoker := NewInvoker(ctx) + sched.Invoker = chasm.NewComponentField(ctx, invoker) + + generator := NewGenerator(ctx) + sched.Generator = chasm.NewComponentField(ctx, generator) + + // Create backfillers to fulfill initialPatch. + if err := sched.handlePatch(ctx, patch); err != nil { + return nil, err + } + visibility := chasm.NewVisibility(ctx) + sched.Visibility = chasm.NewComponentField(ctx, visibility) + + return sched, nil +} + +// NewSentinel returns a sentinel CHASM scheduler that exists only to reserve +// the schedule ID. Sentinels have no sub-components (other than Info for idle +// tracking) and return NotFound on all API operations. An idle task auto-closes +// the sentinel after SentinelIdleTime. +func NewSentinel( + ctx chasm.MutableContext, + namespace, namespaceID, scheduleID string, +) *Scheduler { + s := &Scheduler{ + SchedulerState: &schedulerpb.SchedulerState{ + Namespace: namespace, + NamespaceId: namespaceID, + ScheduleId: scheduleID, + Sentinel: true, + ConflictToken: scheduler.InitialConflictToken, + Info: &schedulepb.ScheduleInfo{}, + }, + cacheConflictToken: scheduler.InitialConflictToken, + } + now := ctx.Now(s) + s.Info.CreateTime = timestamppb.New(now) + + ctx.AddTask(s, chasm.TaskAttributes{ + ScheduledTime: now.Add(SentinelIdleTime), + }, &schedulerpb.SchedulerIdleTask{ + IdleTimeTotal: durationpb.New(SentinelIdleTime), + }) + + return s +} + +// CreateSentinelFn is the chasm.StartExecution factory for creating sentinel +// schedulers. Used by the V1 path to reserve the CHASM key space. +func CreateSentinelFn( + ctx chasm.MutableContext, + req *schedulerpb.CreateSentinelRequest, +) (*Scheduler, error) { + return NewSentinel(ctx, req.Namespace, req.NamespaceId, req.ScheduleId), nil +} + +// IsSentinel returns true if this is a sentinel scheduler. +func (s *Scheduler) IsSentinel() bool { + return s.Sentinel +} + +// setNullableFields sets fields that are nullable in API requests. +func (s *Scheduler) setNullableFields() { + if s.Schedule.Policies == nil { + s.Schedule.Policies = &schedulepb.SchedulePolicies{} + } + if s.Schedule.State == nil { + s.Schedule.State = &schedulepb.ScheduleState{} + } +} + +// handlePatch creates backfillers to fulfill the given patch request. +func (s *Scheduler) handlePatch(ctx chasm.MutableContext, patch *schedulepb.SchedulePatch) error { + if patch == nil { + return nil + } + + // Each TriggerImmediately and BackfillRequest creates exactly one backfiller. + newCount := len(patch.BackfillRequest) + if patch.TriggerImmediately != nil { + newCount++ + } + if len(s.Backfillers)+newCount > maxBackfillers { + return ErrTooManyBackfillers + } + + if patch.TriggerImmediately != nil { + s.NewImmediateBackfiller(ctx, patch.TriggerImmediately) + } + for _, backfill := range patch.BackfillRequest { + s.NewRangeBackfiller(ctx, backfill) + } + return nil +} + +// CreateScheduler initializes a new Scheduler for CreateSchedule requests. +func CreateScheduler( + ctx chasm.MutableContext, + req *schedulerpb.CreateScheduleRequest, +) (*Scheduler, error) { + sched, err := NewScheduler( + ctx, + req.FrontendRequest.Namespace, + req.NamespaceId, + req.FrontendRequest.ScheduleId, + req.FrontendRequest.Schedule, + req.FrontendRequest.InitialPatch, + ) + if err != nil { + return nil, err + } + + // Update visibility with custom attributes. + visibility := sched.Visibility.Get(ctx) + visibility.MergeCustomSearchAttributes(ctx, req.FrontendRequest.GetSearchAttributes().GetIndexedFields()) + visibility.MergeCustomMemo(ctx, req.FrontendRequest.GetMemo().GetFields()) + + return sched, nil +} + +// CreateSchedulerFromMigration initializes a CHASM scheduler from migrated V1 state. +// Unlike CreateScheduler, this preserves the conflict token and other state from V1. +// +// The migrated state components (scheduler, generator, invoker, backfillers) are +// directly initialized from the request, preserving all state including the +// conflict token for client compatibility. +func CreateSchedulerFromMigration( + ctx chasm.MutableContext, + req *schedulerpb.CreateFromMigrationStateRequest, +) (*Scheduler, error) { + state := req.GetState() + + sched := &Scheduler{ + SchedulerState: state.GetSchedulerState(), + cacheConflictToken: state.GetSchedulerState().GetConflictToken(), + Backfillers: make(chasm.Map[string, *Backfiller]), + LastCompletionResult: chasm.NewDataField(ctx, state.GetLastCompletionResult()), + } + sched.setNullableFields() + + // These components won't start with any tasks, as stale running workflow entries + // can cause immediate computation after migration to drop actions due to overlap + // policy. Instead, SchedulerCallbacksTask fires both tasks after ensuring cached + // running workflow state is up-to-date. + sched.Invoker = chasm.NewComponentField(ctx, newInvokerWithState(ctx, state.GetInvokerState())) + sched.Generator = chasm.NewComponentField(ctx, newGeneratorWithState(ctx, state.GetGeneratorState())) + + for backfillID, backfillerState := range state.GetBackfillers() { + sched.Backfillers[backfillID] = chasm.NewComponentField(ctx, newBackfillerWithState(ctx, backfillerState)) + } + + visibility := chasm.NewVisibility(ctx) + sched.Visibility = chasm.NewComponentField(ctx, visibility) + visibility.MergeCustomSearchAttributes(ctx, state.GetSearchAttributes()) + visibility.MergeCustomMemo(ctx, state.GetMemo()) + + // Schedule a callbacks task to attach Nexus callbacks to any migrated + // running workflows. The task self-invalidates if there's no work to do. + ctx.AddTask(sched, chasm.TaskAttributes{}, &schedulerpb.SchedulerCallbacksTask{}) + + return sched, nil +} + +// LifecycleState implements the chasm.Component interface. +func (s *Scheduler) LifecycleState(ctx chasm.Context) chasm.LifecycleState { + if s.Closed { + return chasm.LifecycleStateCompleted + } + + return chasm.LifecycleStateRunning +} + +func (s *Scheduler) ContextMetadata(_ chasm.Context) map[string]string { + md := make(map[string]string, 2) + if wfType := s.Schedule.GetAction().GetStartWorkflow().GetWorkflowType().GetName(); wfType != "" { + md[contextutil.MetadataKeyWorkflowType] = wfType + } + if tq := s.Schedule.GetAction().GetStartWorkflow().GetTaskQueue().GetName(); tq != "" { + md[contextutil.MetadataKeyWorkflowTaskQueue] = tq + } + if len(md) == 0 { + return nil + } + return md +} + +// Terminate implements the chasm.RootComponent interface. +func (s *Scheduler) Terminate( + _ chasm.MutableContext, + _ chasm.TerminateComponentRequest, +) (chasm.TerminateComponentResponse, error) { + // TODO: Implement terminate logic. + return chasm.TerminateComponentResponse{}, nil +} + +// NewRangeBackfiller returns an intialized Backfiller component, which should +// be parented under a Scheduler root node. +func (s *Scheduler) NewRangeBackfiller( + ctx chasm.MutableContext, + request *schedulepb.BackfillRequest, +) *Backfiller { + backfiller := addBackfiller(ctx, s) + backfiller.Request = &schedulerpb.BackfillerState_BackfillRequest{ + BackfillRequest: request, + } + return backfiller +} + +// NewImmediateBackfiller returns an intialized Backfiller component, which should +// be parented under a Scheduler root node. +func (s *Scheduler) NewImmediateBackfiller( + ctx chasm.MutableContext, + request *schedulepb.TriggerImmediatelyRequest, +) *Backfiller { + backfiller := addBackfiller(ctx, s) + backfiller.Request = &schedulerpb.BackfillerState_TriggerRequest{ + TriggerRequest: request, + } + return backfiller +} + +// useScheduledAction returns true when the Scheduler should allow scheduled +// actions to be taken. +// +// When decrement is true, the schedule's state's `RemainingActions` counter is +// decremented when an action can be taken. When decrement is false, no state +// is mutated. +func (s *Scheduler) useScheduledAction(decrement bool) bool { + scheduleState := s.Schedule.GetState() + + // If paused, don't do anything. + if scheduleState.Paused { + return false + } + + // If unlimited actions, allow. + if !scheduleState.LimitedActions { + return true + } + + // Otherwise check and decrement limit. + if scheduleState.RemainingActions > 0 { + if decrement { + scheduleState.RemainingActions-- + + // The conflict token is updated because a client might be in the process of + // preparing an update request that increments their schedule's RemainingActions + // field. + s.updateConflictToken() + } + return true + } + + // No actions left + return false +} + +func (s *Scheduler) getCompiledSpec(specBuilder *scheduler.SpecBuilder) (*scheduler.CompiledSpec, error) { + s.validateCachedState() + + // Cache compiled spec. + if s.compiledSpec == nil { + cspec, err := specBuilder.NewCompiledSpec(s.Schedule.Spec) + if err != nil { + return nil, err + } + s.compiledSpec = cspec + } + + return s.compiledSpec, nil +} + +// WorkflowID returns the Workflow ID given as part of the request spec. +// During start generation, nominal time is suffixed to this ID. +func (s *Scheduler) WorkflowID() string { + return s.Schedule.GetAction().GetStartWorkflow().GetWorkflowId() +} + +func (s *Scheduler) jitterSeed() string { + return fmt.Sprintf("%s-%s", s.NamespaceId, s.ScheduleId) +} + +func (s *Scheduler) identity() string { + return fmt.Sprintf("temporal-scheduler-%s-%s", s.Namespace, s.ScheduleId) +} + +func (s *Scheduler) overlapPolicy() enumspb.ScheduleOverlapPolicy { + policy := s.Schedule.GetPolicies().GetOverlapPolicy() + if policy == enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED { + policy = enumspb.SCHEDULE_OVERLAP_POLICY_SKIP + } + return policy +} + +func (s *Scheduler) resolveOverlapPolicy(overlapPolicy enumspb.ScheduleOverlapPolicy) enumspb.ScheduleOverlapPolicy { + if overlapPolicy == enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED { + overlapPolicy = s.overlapPolicy() + } + return overlapPolicy +} + +// validateCachedState clears cached fields whenever the Scheduler's +// ConflictToken doesn't match its cacheConflictToken field. Validation is only +// as effective as the Scheduler's backing persisted state is up-to-date. +func (s *Scheduler) validateCachedState() { + if s.cacheConflictToken != s.ConflictToken { + // Bust stale cached fields. + s.compiledSpec = nil + + // We're now up-to-date. + s.cacheConflictToken = s.ConflictToken + } +} + +// updateConflictToken bumps the Scheduler's conflict token. This has a side +// effect of invalidating the local cache. Use whenever applying a mutation that +// should invalidate other in-flight updates. +func (s *Scheduler) updateConflictToken() { + s.ConflictToken++ +} + +// getLastEventTime returns the time of the last "event" to happen to the schedule. +// An event here is the schedule getting created or updated, or an action. This +// value is used for calculating the retention time (how long an idle schedule +// lives after becoming idle). +func (s *Scheduler) getLastEventTime(ctx chasm.Context) time.Time { + var lastEvent time.Time + invoker := s.Invoker.Get(ctx) + recentActions := invoker.recentActions() + if len(recentActions) > 0 { + lastEvent = recentActions[len(recentActions)-1].GetActualTime().AsTime() + } + lastEvent = util.MaxTime(lastEvent, s.Info.GetCreateTime().AsTime()) + lastEvent = util.MaxTime(lastEvent, s.Info.GetUpdateTime().AsTime()) + return lastEvent +} + +// getIdleExpiration returns an idle close time and the boolean value of 'true' +// for when a schedule is idle (pending soft delete). +func (s *Scheduler) getIdleExpiration( + ctx chasm.Context, + idleTime time.Duration, + nextWakeup time.Time, +) (time.Time, bool) { + // The idle timer to close off the component is started only for schedules with + // no more work to do. Paused schedules are held open indefinitely. + if idleTime == 0 || + s.GetSchedule().GetState().GetPaused() || + (!nextWakeup.IsZero() && s.useScheduledAction(false)) || + s.hasMoreAllowAllBackfills(ctx) { + return time.Time{}, false + } + + if s.IsSentinel() { + return s.Info.GetCreateTime().AsTime().Add(idleTime), true + } + return s.getLastEventTime(ctx).Add(idleTime), true +} + +func (s *Scheduler) hasMoreAllowAllBackfills(ctx chasm.Context) bool { + for _, field := range s.Backfillers { + backfiller := field.Get(ctx) + var policy enumspb.ScheduleOverlapPolicy + switch request := backfiller.GetRequest().(type) { + case *schedulerpb.BackfillerState_BackfillRequest: + policy = request.BackfillRequest.OverlapPolicy + case *schedulerpb.BackfillerState_TriggerRequest: + policy = request.TriggerRequest.OverlapPolicy + default: + return false + } + + if enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL == s.resolveOverlapPolicy(policy) { + return true + } + } + + return false +} + +type schedulerActionResult struct { + overlapSkipped int64 + missedCatchupWindow int64 + actionCount int64 +} + +// recordActionResult updates the Scheduler's customer-facing metrics. +// RunningWorkflows and RecentActions are computed from BufferedStarts. +func (s *Scheduler) recordActionResult(result *schedulerActionResult) { + s.Info.ActionCount += result.actionCount + s.Info.OverlapSkipped += result.overlapSkipped + s.Info.MissedCatchupWindow += result.missedCatchupWindow +} + +var _ chasm.NexusCompletionHandler = &Scheduler{} + +func executionStatusFromFailure(failure *failurepb.Failure) enumspb.WorkflowExecutionStatus { + switch failure.FailureInfo.(type) { + case *failurepb.Failure_CanceledFailureInfo: + return enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED + case *failurepb.Failure_TimeoutFailureInfo: + return enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT + default: + return enumspb.WORKFLOW_EXECUTION_STATUS_FAILED + } +} + +// HandleNexusCompletion allows Scheduler to record workflow completions from +// worfklows started by the same scheduler tree's Invoker. +func (s *Scheduler) HandleNexusCompletion( + ctx chasm.MutableContext, + info *persistencespb.ChasmNexusCompletion, +) error { + invoker := s.Invoker.Get(ctx) + + workflowID := invoker.runningWorkflowID(info.RequestId) + if workflowID == "" { + // If the request ID was removed, the request must have already been processed; + // fast-succeed. + return nil + } + + // Handle last completed/failed status and payloads. + // + // TODO - also record payload sizes once we have metrics wired into CHASM context. + var wfStatus enumspb.WorkflowExecutionStatus + switch outcome := info.Outcome.(type) { + case *persistencespb.ChasmNexusCompletion_Failure: + previousResult := s.LastCompletionResult.Get(ctx) // Most-recent success is kept after failure. + wfStatus = executionStatusFromFailure(outcome.Failure) + s.LastCompletionResult = chasm.NewDataField(ctx, &schedulerpb.LastCompletionResult{ + Failure: outcome.Failure, + Success: previousResult.Success, + }) + case *persistencespb.ChasmNexusCompletion_Success: + wfStatus = enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED + s.LastCompletionResult = chasm.NewDataField(ctx, &schedulerpb.LastCompletionResult{ + Success: outcome.Success, + }) + default: + wfStatus = enumspb.WORKFLOW_EXECUTION_STATUS_FAILED + } + + // Handle pause-on-failure. + if wfStatus != enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED && + s.Schedule.Policies.PauseOnFailure && !s.Schedule.State.Paused { + s.Schedule.State.Paused = true + s.Schedule.State.Notes = fmt.Sprintf( + "paused, workflow %s: %s", + strings.ToLower(wfStatus.String()), + workflowID, + ) + } + + // Record the completed action in the Invoker. + completed := &schedulespb.CompletedResult{ + Status: wfStatus, + CloseTime: info.CloseTime, + } + invoker.recordCompletedAction(ctx, completed, info.RequestId) + + return nil +} + +// Describe returns the current state of the Scheduler for DescribeSchedule requests. +func (s *Scheduler) Describe( + ctx chasm.Context, + req *schedulerpb.DescribeScheduleRequest, + specBuilder *scheduler.SpecBuilder, +) (*schedulerpb.DescribeScheduleResponse, error) { + if s.Sentinel { + return nil, ErrSentinel + } + if s.Closed { + return nil, ErrClosed + } + + visibility := s.Visibility.Get(ctx) + memo := visibility.CustomMemo(ctx) + delete(memo, visibilityMemoFieldInfo) // We don't need to return a redundant info block. + + if s.Schedule.GetPolicies().GetOverlapPolicy() == enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED { + s.Schedule.Policies.OverlapPolicy = s.overlapPolicy() + } + if !s.Schedule.GetPolicies().GetCatchupWindow().IsValid() { + // TODO - this should be set from Tweakables.DefaultCatchupWindow. + s.Schedule.Policies.CatchupWindow = durationpb.New(365 * 24 * time.Hour) + } + + schedule := common.CloneProto(s.Schedule) + cleanSpec(schedule.Spec) + + generator := s.Generator.Get(ctx) + if generator.GetFutureActionTimes() == nil { + // FutureActionTimes is populated asynchronously by the GeneratorTask. If a + // newly-created schedule is described before the task executes, this field may be + // nil. In that case, compute it on-demand. + generator.UpdateFutureActionTimes(ctx, specBuilder) + } + + // Populate computed views from Invoker's BufferedStarts. + invoker := s.Invoker.Get(ctx) + info := common.CloneProto(s.Info) + info.RunningWorkflows = invoker.runningWorkflowExecutions() + info.RecentActions = invoker.recentActions() + info.FutureActionTimes = generator.GetFutureActionTimes() + // BufferedStarts holds waiting, running, and recently-completed entries; only the + // waiting portion (those not yet surfaced via RecentActions) counts as buffered. + info.BufferSize = int64(len(invoker.GetBufferedStarts()) - len(info.RecentActions)) + + return &schedulerpb.DescribeScheduleResponse{ + FrontendResponse: &workflowservice.DescribeScheduleResponse{ + Schedule: schedule, + Info: info, + ConflictToken: s.generateConflictToken(), + Memo: &commonpb.Memo{Fields: memo}, + SearchAttributes: &commonpb.SearchAttributes{IndexedFields: visibility.CustomSearchAttributes(ctx)}, + }, + }, nil +} + +// cleanSpec sets default values in ranges for the DescribeSchedule response. +func cleanSpec(spec *schedulepb.ScheduleSpec) { + cleanRanges := func(ranges []*schedulepb.Range) { + for _, r := range ranges { + if r.End < r.Start { + r.End = r.Start + } + if r.Step == 0 { + r.Step = 1 + } + } + } + cleanCal := func(structured *schedulepb.StructuredCalendarSpec) { + cleanRanges(structured.Second) + cleanRanges(structured.Minute) + cleanRanges(structured.Hour) + cleanRanges(structured.DayOfMonth) + cleanRanges(structured.Month) + cleanRanges(structured.Year) + cleanRanges(structured.DayOfWeek) + } + for _, structured := range spec.StructuredCalendar { + cleanCal(structured) + } + for _, structured := range spec.ExcludeStructuredCalendar { + cleanCal(structured) + } +} + +// ListMatchingTimes returns the upcoming times that the schedule will trigger +// within the given time range. +func (s *Scheduler) ListMatchingTimes( + ctx chasm.Context, + req *schedulerpb.ListScheduleMatchingTimesRequest, + specBuilder *scheduler.SpecBuilder, +) (*schedulerpb.ListScheduleMatchingTimesResponse, error) { + if s.Sentinel { + return nil, ErrSentinel + } + if s.Closed { + return nil, ErrClosed + } + + frontendReq := req.FrontendRequest + if frontendReq == nil || frontendReq.StartTime == nil || frontendReq.EndTime == nil { + return nil, ErrInvalidQuery + } + + cspec, err := s.getCompiledSpec(specBuilder) + if err != nil { + return nil, serviceerror.NewInvalidArgumentf("invalid schedule: %v", err) + } + + var out []*timestamppb.Timestamp + t1 := timestamp.TimeValue(frontendReq.StartTime) + for range maxListMatchingTimesCount { + t1 = cspec.GetNextTime(s.jitterSeed(), t1).Next + if t1.IsZero() || t1.After(timestamp.TimeValue(frontendReq.EndTime)) { + break + } + out = append(out, timestamppb.New(t1)) + } + + return &schedulerpb.ListScheduleMatchingTimesResponse{ + FrontendResponse: &workflowservice.ListScheduleMatchingTimesResponse{ + StartTime: out, + }, + }, nil +} + +// Delete marks the Scheduler as closed without an idle timer. +func (s *Scheduler) Delete( + ctx chasm.MutableContext, + req *schedulerpb.DeleteScheduleRequest, +) (*schedulerpb.DeleteScheduleResponse, error) { + if s.Closed { + return nil, ErrClosed + } + if s.Sentinel { + return nil, ErrSentinel + } + s.Closed = true + return &schedulerpb.DeleteScheduleResponse{ + FrontendResponse: &workflowservice.DeleteScheduleResponse{}, + }, nil +} + +// MigrateToWorkflow pauses the schedule and schedules a side-effect task to +// start the V1 workflow. This is the CHASM-side operation for V2-to-V1 migration. +// It is idempotent: if a migration is already pending, it returns success +// without taking any action. +func (s *Scheduler) MigrateToWorkflow( + ctx chasm.MutableContext, + req *schedulerpb.MigrateToWorkflowRequest, +) (*schedulerpb.MigrateToWorkflowResponse, error) { + if s.Sentinel { + return nil, ErrSentinel + } + if s.Closed { + return nil, ErrClosed + } + if s.WorkflowMigration != nil { + return &schedulerpb.MigrateToWorkflowResponse{}, nil + } + + // Save pre-migration paused state, mark migration as pending, then pause. + s.WorkflowMigration = &schedulerpb.WorkflowMigrationState{ + PreMigrationPaused: s.Schedule.State.Paused, + PreMigrationNotes: s.Schedule.State.Notes, + } + s.Schedule.State.Paused = true + s.Schedule.State.Notes = "paused for migration to workflow-backed scheduler" + + // Schedule a side-effect task to export state and start the V1 workflow. + ctx.AddTask(s, chasm.TaskAttributes{}, &schedulerpb.SchedulerMigrateToWorkflowTask{}) + + return &schedulerpb.MigrateToWorkflowResponse{}, nil +} + +// Update replaces the schedule with a new one for UpdateSchedule requests. +func (s *Scheduler) Update( + ctx chasm.MutableContext, + req *schedulerpb.UpdateScheduleRequest, +) (*schedulerpb.UpdateScheduleResponse, error) { + if s.Sentinel { + return nil, ErrSentinel + } + // UpdateComponent does not reject mutations on completed executions, + // so we must check explicitly here. + if s.Closed { + return nil, ErrClosed + } + if !s.validateConflictToken(req.FrontendRequest.ConflictToken) { + return nil, ErrConflictTokenMismatch + } + + // Update custom search attributes. + if req.FrontendRequest.GetSearchAttributes() != nil { + // To preserve compatibility with V1 scheduler, we do a full replacement + // of search attributes, dropping any that aren't a part of the update's + // `CustomSearchAttributes` map. Search attribute replacement is ignored entirely + // when that map is unset, however, an allocated yet empty map will clear all + // attributes. + + // Preserve the old custom memo in the new Visibility component. + oldVisibility := s.Visibility.Get(ctx) + oldMemo := oldVisibility.CustomMemo(ctx) + + visibility := chasm.NewVisibilityWithData(ctx, req.FrontendRequest.GetSearchAttributes().GetIndexedFields(), oldMemo) + s.Visibility = chasm.NewComponentField(ctx, visibility) + } + + // Reject updates outright when a migration is pending so that changes are + // not silently lost during the migration window. + if s.WorkflowMigration != nil { + return nil, ErrMigrationPending + } + + // Update custom memo. + if req.FrontendRequest.GetMemo() != nil { + visibility := s.Visibility.Get(ctx) + visibility.ReplaceCustomMemo(ctx, req.FrontendRequest.GetMemo().GetFields()) + } + + s.Schedule = req.FrontendRequest.Schedule + s.setNullableFields() + + s.Info.UpdateTime = timestamppb.New(ctx.Now(s)) + s.updateConflictToken() + + // Since the spec may have been updated, kick off the generator. + generator := s.Generator.Get(ctx) + generator.Generate(ctx) + + return &schedulerpb.UpdateScheduleResponse{ + FrontendResponse: &workflowservice.UpdateScheduleResponse{}, + }, nil +} + +// Patch applies a patch to the schedule for PatchSchedule requests. +func (s *Scheduler) Patch( + ctx chasm.MutableContext, + req *schedulerpb.PatchScheduleRequest, +) (*schedulerpb.PatchScheduleResponse, error) { + if s.Sentinel { + return nil, ErrSentinel + } + // UpdateComponent does not reject mutations on completed executions, + // so we must check explicitly here. + if s.Closed { + return nil, ErrClosed + } + // Handle paused status. + if req.FrontendRequest.Patch.Pause != "" { + s.Schedule.State.Paused = true + s.Schedule.State.Notes = req.FrontendRequest.Patch.Pause + } + if req.FrontendRequest.Patch.Unpause != "" { + if s.WorkflowMigration != nil { + return nil, ErrMigrationPending + } + s.Schedule.State.Paused = false + s.Schedule.State.Notes = req.FrontendRequest.Patch.Unpause + s.Generator.Get(ctx).Generate(ctx) + } + + if err := s.handlePatch(ctx, req.FrontendRequest.Patch); err != nil { + return nil, err + } + + s.Info.UpdateTime = timestamppb.New(ctx.Now(s)) + s.updateConflictToken() + + return &schedulerpb.PatchScheduleResponse{ + FrontendResponse: &workflowservice.PatchScheduleResponse{}, + }, nil +} + +func (s *Scheduler) generateConflictToken() []byte { + return serializeConflictToken(s.ConflictToken) +} + +func (s *Scheduler) validateConflictToken(token []byte) bool { + // When unset in mutate requests, the schedule should update unconditionally. + if token == nil { + return true + } + + current := s.generateConflictToken() + return bytes.Equal(current, token) +} + +func (s *Scheduler) executionStatus() string { + if s.Closed { + return executionStatusCompleted + } + return executionStatusRunning +} + +// SearchAttributes returns the Temporal-managed key values for visibility. +func (s *Scheduler) SearchAttributes(chasm.Context) []chasm.SearchAttributeKeyValue { + if s.Sentinel { + return []chasm.SearchAttributeKeyValue{ + executionStatusSearchAttribute.Value(s.executionStatus()), + } + } + return []chasm.SearchAttributeKeyValue{ + executionStatusSearchAttribute.Value(s.executionStatus()), + chasm.SearchAttributeTemporalSchedulePaused.Value(s.Schedule.GetState().GetPaused()), + } +} + +// Memo returns the scheduler's info block for visibility. +func (s *Scheduler) Memo( + ctx chasm.Context, +) proto.Message { + if s.Sentinel { + return nil + } + return s.ListInfo(ctx) +} + +// ListInfo returns the ScheduleListInfo, used as the visibility memo, and to +// answer List queries. +func (s *Scheduler) ListInfo( + ctx chasm.Context, +) *schedulepb.ScheduleListInfo { + spec := common.CloneProto(s.Schedule.Spec) + + // Clear fields that are too large/not useful for the list view. + spec.TimezoneData = nil + + // Limit the number of specs and exclusions stored on the memo. + spec.ExcludeStructuredCalendar = util.SliceHead(spec.ExcludeStructuredCalendar, listInfoSpecFieldLimit) + spec.Interval = util.SliceHead(spec.Interval, listInfoSpecFieldLimit) + spec.StructuredCalendar = util.SliceHead(spec.StructuredCalendar, listInfoSpecFieldLimit) + + generator := s.Generator.Get(ctx) + invoker := s.Invoker.Get(ctx) + + return &schedulepb.ScheduleListInfo{ + Spec: spec, + WorkflowType: s.Schedule.Action.GetStartWorkflow().GetWorkflowType(), + Notes: s.Schedule.State.Notes, + Paused: s.Schedule.State.Paused, + RecentActions: invoker.recentActions(), + FutureActionTimes: generator.FutureActionTimes, + } +} + +// startWorkflowSearchAttributes returns the search attributes to be applied to +// workflows kicked off. Includes custom search attributes and Temporal-managed. +func (s *Scheduler) startWorkflowSearchAttributes( + nominal time.Time, +) *commonpb.SearchAttributes { + attributes := s.Schedule.GetAction().GetStartWorkflow().GetSearchAttributes() + + fields := util.CloneMapNonNil(attributes.GetIndexedFields()) + if p, err := payload.Encode(nominal); err == nil { + fields[sadefs.TemporalScheduledStartTime] = p + } + if p, err := payload.Encode(s.ScheduleId); err == nil { + fields[sadefs.TemporalScheduledById] = p + } + return &commonpb.SearchAttributes{ + IndexedFields: fields, + } +} diff --git a/chasm/lib/scheduler/scheduler_idle_tasks_test.go b/chasm/lib/scheduler/scheduler_idle_tasks_test.go new file mode 100644 index 00000000000..075e838b719 --- /dev/null +++ b/chasm/lib/scheduler/scheduler_idle_tasks_test.go @@ -0,0 +1,125 @@ +package scheduler_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type idleValidateTestCase struct { + configIdleTime time.Duration + taskIdleTimeTotal time.Duration + scheduledTime time.Time + schedulerClosed bool + idleMatchesScheduledTime bool + setupScheduler func(*scheduler.Scheduler, chasm.Context) + expectedValid bool +} + +func runIdleValidateTestCase(t *testing.T, env *testEnv, c *idleValidateTestCase) { + ctx := env.MutableContext() + sched := env.Scheduler + + sched.Closed = c.schedulerClosed + + if c.setupScheduler != nil { + c.setupScheduler(sched, ctx) + } + + config := &scheduler.Config{ + Tweakables: func(_ string) scheduler.Tweakables { + tweakables := scheduler.DefaultTweakables + tweakables.IdleTime = c.configIdleTime + return tweakables + }, + } + + handler := scheduler.NewSchedulerIdleTaskHandler(scheduler.SchedulerIdleTaskHandlerOptions{ + Config: config, + }) + + task := &schedulerpb.SchedulerIdleTask{ + IdleTimeTotal: durationpb.New(c.taskIdleTimeTotal), + } + + scheduledTime := c.scheduledTime + if c.idleMatchesScheduledTime { + lastEventTime := scheduledTime.Add(-c.configIdleTime) + sched.Info.UpdateTime = timestamppb.New(lastEventTime) + sched.Info.CreateTime = timestamppb.New(lastEventTime) + } + + taskAttrs := chasm.TaskAttributes{ + ScheduledTime: scheduledTime, + } + + isValid, err := handler.Validate(ctx, sched, taskAttrs, task) + require.NoError(t, err) + require.Equal(t, c.expectedValid, isValid) +} + +func TestIdleTask_Execute(t *testing.T) { + env := newTestEnv(t) + ctx := env.MutableContext() + sched := env.Scheduler + + handler := scheduler.NewSchedulerIdleTaskHandler(scheduler.SchedulerIdleTaskHandlerOptions{ + Config: defaultConfig(), + }) + + // Verify scheduler starts open. + require.False(t, sched.Closed) + + // Execute the idle task. + err := handler.Execute(ctx, sched, chasm.TaskAttributes{}, &schedulerpb.SchedulerIdleTask{}) + require.NoError(t, err) + + // Verify scheduler is now closed. + require.True(t, sched.Closed) +} + +func TestIdleTask_Validate_SchedulerNotIdle(t *testing.T) { + env := newTestEnv(t) + now := env.TimeSource.Now() + runIdleValidateTestCase(t, env, &idleValidateTestCase{ + configIdleTime: 10 * time.Minute, + taskIdleTimeTotal: 10 * time.Minute, + scheduledTime: now, + setupScheduler: func(sched *scheduler.Scheduler, ctx chasm.Context) { + // Make scheduler not idle by setting it as paused. + sched.Schedule.State.Paused = true + }, + expectedValid: false, + }) +} + +func TestIdleTask_Validate_ValidIdleTask(t *testing.T) { + env := newTestEnv(t) + now := env.TimeSource.Now() + runIdleValidateTestCase(t, env, &idleValidateTestCase{ + configIdleTime: 10 * time.Minute, + taskIdleTimeTotal: 10 * time.Minute, + scheduledTime: now, + idleMatchesScheduledTime: true, + expectedValid: true, + }) +} + +func TestIdleTask_Validate_SchedulerAlreadyClosed(t *testing.T) { + env := newTestEnv(t) + now := env.TimeSource.Now() + runIdleValidateTestCase(t, env, &idleValidateTestCase{ + configIdleTime: 10 * time.Minute, + taskIdleTimeTotal: 10 * time.Minute, + scheduledTime: now, + schedulerClosed: true, + idleMatchesScheduledTime: true, + expectedValid: false, // Should return !scheduler.Closed (false when closed). + }) +} diff --git a/chasm/lib/scheduler/scheduler_migrate_task.go b/chasm/lib/scheduler/scheduler_migrate_task.go new file mode 100644 index 00000000000..7b7d0adde6b --- /dev/null +++ b/chasm/lib/scheduler/scheduler_migrate_task.go @@ -0,0 +1,202 @@ +package scheduler + +import ( + "context" + "errors" + "fmt" + "maps" + "time" + + "github.com/google/uuid" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + schedulerpb "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/chasm/lib/scheduler/migration" + "go.temporal.io/server/common" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/resource" + "go.temporal.io/server/common/sdk" + "go.temporal.io/server/common/searchattribute" + "go.temporal.io/server/common/searchattribute/sadefs" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" + "go.uber.org/fx" +) + +type ( + SchedulerMigrateToWorkflowTaskHandlerOptions struct { + fx.In + + Config *Config + MetricsHandler metrics.Handler + BaseLogger log.Logger + HistoryClient resource.HistoryClient + } + + SchedulerMigrateToWorkflowTaskHandler struct { + chasm.SideEffectTaskHandlerBase[*schedulerpb.SchedulerMigrateToWorkflowTask] + config *Config + metricsHandler metrics.Handler + baseLogger log.Logger + historyClient resource.HistoryClient + } +) + +func NewSchedulerMigrateToWorkflowTaskHandler( + opts SchedulerMigrateToWorkflowTaskHandlerOptions, +) *SchedulerMigrateToWorkflowTaskHandler { + return &SchedulerMigrateToWorkflowTaskHandler{ + config: opts.Config, + metricsHandler: opts.MetricsHandler, + baseLogger: opts.BaseLogger, + historyClient: opts.HistoryClient, + } +} + +func (h *SchedulerMigrateToWorkflowTaskHandler) Validate( + _ chasm.Context, + scheduler *Scheduler, + _ chasm.TaskAttributes, + _ *schedulerpb.SchedulerMigrateToWorkflowTask, +) (bool, error) { + if scheduler.Closed { + return false, nil + } + return scheduler.WorkflowMigration != nil, nil +} + +func (h *SchedulerMigrateToWorkflowTaskHandler) Execute( + ctx context.Context, + schedulerRef chasm.ComponentRef, + _ chasm.TaskAttributes, + _ *schedulerpb.SchedulerMigrateToWorkflowTask, +) error { + // Read state and convert to V1 args inside the ReadComponent callback, + // where we have access to the CHASM context for consistent time. + type readResult struct { + args *schedulespb.StartScheduleArgs + namespace string + namespaceID string + scheduleID string + searchAttributes map[string]*commonpb.Payload + memo map[string]*commonpb.Payload + now time.Time + } + var result readResult + + _, err := chasm.ReadComponent( + ctx, + schedulerRef, + func(s *Scheduler, ctx chasm.Context, _ any) (struct{}, error) { + now := ctx.Now(s) + schedulerState := common.CloneProto(s.SchedulerState) + generatorState := common.CloneProto(s.Generator.Get(ctx).GeneratorState) + invokerState := common.CloneProto(s.Invoker.Get(ctx).InvokerState) + + bStates := make(map[string]*schedulerpb.BackfillerState, len(s.Backfillers)) + for id, field := range s.Backfillers { + bStates[id] = common.CloneProto(field.Get(ctx).BackfillerState) + } + + lastCompletionResult := common.CloneProto(s.LastCompletionResult.Get(ctx)) + + visibility := s.Visibility.Get(ctx) + searchAttributes := visibility.CustomSearchAttributes(ctx) + memo := visibility.CustomMemo(ctx) + + // Restore the pre-migration paused state so the V1 workflow receives + // the correct schedule state (not the migration-imposed pause). + // Validation guarantees WorkflowMigration and State are always set + // when this task runs. + schedulerState.Schedule.State.Paused = schedulerState.WorkflowMigration.PreMigrationPaused + schedulerState.Schedule.State.Notes = schedulerState.WorkflowMigration.PreMigrationNotes + + result = readResult{ + args: migration.CHASMToLegacyStartScheduleArgs( + schedulerState, + generatorState, + invokerState, + bStates, + lastCompletionResult, + searchAttributes, + memo, + now, + ), + namespace: schedulerState.GetNamespace(), + namespaceID: schedulerState.GetNamespaceId(), + scheduleID: schedulerState.GetScheduleId(), + searchAttributes: searchAttributes, + memo: memo, + now: now, + } + return struct{}{}, nil + }, + nil, + ) + if err != nil { + return fmt.Errorf("failed to read scheduler state: %w", err) + } + + // Serialize the V1 workflow input. + inputPayloads, err := sdk.PreferProtoDataConverter.ToPayloads(result.args) + if err != nil { + return fmt.Errorf("failed to serialize schedule args: %w", err) + } + + // Build the start request to match createScheduleWorkflow in the frontend + // as closely as possible. Include TemporalNamespaceDivision so the V1 + // workflow is discoverable via ListSchedules. + sa := &commonpb.SearchAttributes{IndexedFields: maps.Clone(result.searchAttributes)} + searchattribute.AddSearchAttribute(&sa, sadefs.TemporalNamespaceDivision, payload.EncodeString(legacyscheduler.NamespaceDivision)) + workflowID := legacyscheduler.WorkflowIDPrefix + result.scheduleID + startReq := &workflowservice.StartWorkflowExecutionRequest{ + RequestId: uuid.NewString(), + Namespace: result.namespace, + WorkflowId: workflowID, + WorkflowType: &commonpb.WorkflowType{Name: legacyscheduler.WorkflowType}, + TaskQueue: &taskqueuepb.TaskQueue{Name: primitives.PerNSWorkerTaskQueue}, + Input: inputPayloads, + Identity: fmt.Sprintf("temporal-scheduler-migration-%s-%s", result.namespace, result.scheduleID), + WorkflowIdReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_ALLOW_DUPLICATE, + WorkflowIdConflictPolicy: enumspb.WORKFLOW_ID_CONFLICT_POLICY_FAIL, + Memo: &commonpb.Memo{Fields: maps.Clone(result.memo)}, + SearchAttributes: sa, + Priority: &commonpb.Priority{}, + } + + _, err = h.historyClient.StartWorkflowExecution( + ctx, + common.CreateHistoryStartWorkflowRequest(result.namespaceID, startReq, nil, nil, result.now), + ) + if err != nil { + // Treat already-started as success for idempotency. + var alreadyStartedErr *serviceerror.WorkflowExecutionAlreadyStarted + if !errors.As(err, &alreadyStartedErr) { + return fmt.Errorf("failed to start V1 scheduler workflow: %w", err) + } + } + + // Mark the CHASM scheduler as closed now that the V1 workflow is running. + _, _, err = chasm.UpdateComponent( + ctx, + schedulerRef, + func(s *Scheduler, ctx chasm.MutableContext, _ any) (chasm.NoValue, error) { + s.Closed = true + s.WorkflowMigration = nil + return nil, nil + }, + nil, + ) + if err != nil { + return fmt.Errorf("failed to close CHASM scheduler after migration: %w", err) + } + + return nil +} diff --git a/chasm/lib/scheduler/scheduler_migrate_test.go b/chasm/lib/scheduler/scheduler_migrate_test.go new file mode 100644 index 00000000000..66f3c8f3df2 --- /dev/null +++ b/chasm/lib/scheduler/scheduler_migrate_test.go @@ -0,0 +1,169 @@ +package scheduler_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + schedulepb "go.temporal.io/api/schedule/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/chasm/lib/scheduler" + schedulerpb "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" +) + +func TestMigrateToWorkflow_PausesSchedule(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + + require.False(t, sched.Schedule.State.Paused) + + _, err := sched.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + require.NoError(t, err) + + require.True(t, sched.Schedule.State.Paused) + require.Equal(t, "paused for migration to workflow-backed scheduler", sched.Schedule.State.Notes) + require.NotNil(t, sched.WorkflowMigration) +} + +func TestMigrateToWorkflow_SavesPreMigrationState(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + + // Pause the schedule before migration with custom notes. + sched.Schedule.State.Paused = true + sched.Schedule.State.Notes = "user paused" + + _, err := sched.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + require.NoError(t, err) + + require.NotNil(t, sched.WorkflowMigration) + require.True(t, sched.WorkflowMigration.PreMigrationPaused) + require.Equal(t, "user paused", sched.WorkflowMigration.PreMigrationNotes) +} + +func TestMigrateToWorkflow_SavesPreMigrationState_Unpaused(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + + require.False(t, sched.Schedule.State.Paused) + + _, err := sched.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + require.NoError(t, err) + + require.NotNil(t, sched.WorkflowMigration) + require.False(t, sched.WorkflowMigration.PreMigrationPaused) + require.Empty(t, sched.WorkflowMigration.PreMigrationNotes) +} + +func TestMigrateToWorkflow_Idempotent(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + + _, err := sched.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + require.NoError(t, err) + + // Second call succeeds without error (no-op). + _, err = sched.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + require.NoError(t, err) +} + +func TestMigrateToWorkflow_Sentinel(t *testing.T) { + sentinel, ctx, _ := setupSentinelForTest(t) + + _, err := sentinel.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + + var notFoundErr *serviceerror.NotFound + require.ErrorAs(t, err, ¬FoundErr) +} + +func TestPatch_UnpauseBlockedDuringMigration(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + + // Initiate migration. + _, err := sched.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + require.NoError(t, err) + + // Attempt to unpause should fail. + _, err = sched.Patch(ctx, &schedulerpb.PatchScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.PatchScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + Patch: &schedulepb.SchedulePatch{ + Unpause: "resuming", + }, + }, + }) + + var unavailableErr *serviceerror.Unavailable + require.ErrorAs(t, err, &unavailableErr) + require.ErrorIs(t, err, scheduler.ErrMigrationPending) +} + +func TestPatch_PauseAllowedDuringMigration(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + + _, err := sched.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + require.NoError(t, err) + + // Pause with different notes should succeed. + _, err = sched.Patch(ctx, &schedulerpb.PatchScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.PatchScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + Patch: &schedulepb.SchedulePatch{ + Pause: "user pause during migration", + }, + }, + }) + require.NoError(t, err) + require.True(t, sched.Schedule.State.Paused) +} + +func TestUpdate_RejectedDuringMigration(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + + _, err := sched.MigrateToWorkflow(ctx, &schedulerpb.MigrateToWorkflowRequest{ + NamespaceId: namespaceID, + ScheduleId: scheduleID, + }) + require.NoError(t, err) + + // Update should be rejected outright when a migration is pending. + newSchedule := defaultSchedule() + newSchedule.State.Paused = false + newSchedule.State.Notes = "user unpaused" + + _, err = sched.Update(ctx, &schedulerpb.UpdateScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.UpdateScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + Schedule: newSchedule, + }, + }) + var unavailableErr *serviceerror.Unavailable + require.ErrorAs(t, err, &unavailableErr) + require.ErrorIs(t, err, scheduler.ErrMigrationPending) +} diff --git a/chasm/lib/scheduler/scheduler_nexus_completion_test.go b/chasm/lib/scheduler/scheduler_nexus_completion_test.go new file mode 100644 index 00000000000..302168f3071 --- /dev/null +++ b/chasm/lib/scheduler/scheduler_nexus_completion_test.go @@ -0,0 +1,290 @@ +package scheduler_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type nexusCompletionTestCase struct { + name string + setupInvoker func(*scheduler.Invoker) + setupScheduler func(*scheduler.Scheduler) + completion *persistencespb.ChasmNexusCompletion + expectPaused bool + expectStatus enumspb.WorkflowExecutionStatus + expectNoOp bool + validateInvoker func(*testing.T, *scheduler.Invoker) + validateScheduler func(*testing.T, *scheduler.Scheduler, chasm.Context) +} + +func executeNexusCompletion(t *testing.T, tc nexusCompletionTestCase) { + sched, ctx, node := setupSchedulerForTest(t) + + invoker := sched.Invoker.Get(ctx) + + if tc.setupInvoker != nil { + tc.setupInvoker(invoker) + } + if tc.setupScheduler != nil { + tc.setupScheduler(sched) + } + + initialLastCompletion := sched.LastCompletionResult.Get(ctx) + + err := sched.HandleNexusCompletion(ctx, tc.completion) + require.NoError(t, err) + + _, err = node.CloseTransaction() + require.NoError(t, err) + + readCtx := chasm.NewContext(context.Background(), node) + + if tc.expectNoOp { + currentLastCompletion := sched.LastCompletionResult.Get(readCtx) + require.Equal(t, initialLastCompletion, currentLastCompletion) + return + } + + lastCompletion := sched.LastCompletionResult.Get(readCtx) + require.NotNil(t, lastCompletion) + + if tc.completion.GetSuccess() != nil { + require.NotNil(t, lastCompletion.GetSuccess()) + } else if tc.completion.GetFailure() != nil { + require.NotNil(t, lastCompletion.GetFailure()) + } + + require.Equal(t, tc.expectPaused, sched.Schedule.State.Paused) + if tc.expectPaused { + require.NotEmpty(t, sched.Schedule.State.Notes) + require.Contains(t, sched.Schedule.State.Notes, "wf-1") + } + + // Check that workflow ID lookup now returns empty (request completed) + require.Empty(t, invoker.RunningWorkflowID(tc.completion.RequestId)) + + // If we expect a specific status, verify the BufferedStart has Completed set + if tc.expectStatus != enumspb.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED { + found := false + for _, start := range invoker.BufferedStarts { + if start.GetWorkflowId() == "wf-1" && start.GetCompleted() != nil { + require.Equal(t, tc.expectStatus, start.GetCompleted().GetStatus()) + found = true + break + } + } + require.True(t, found, "expected to find completed BufferedStart with workflow ID wf-1") + } + + if tc.validateInvoker != nil { + tc.validateInvoker(t, invoker) + } + if tc.validateScheduler != nil { + tc.validateScheduler(t, sched, readCtx) + } +} + +// TestHandleNexusCompletion_Success verifies that a successful workflow completion +// is properly recorded with the success payload and workflow status is updated. +func TestHandleNexusCompletion_Success(t *testing.T) { + tc := nexusCompletionTestCase{ + name: "successful completion", + setupInvoker: func(invoker *scheduler.Invoker) { + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + RunId: "run-1", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-1 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-30 * time.Second)), + }, + } + }, + completion: &persistencespb.ChasmNexusCompletion{ + RequestId: "req-1", + Outcome: &persistencespb.ChasmNexusCompletion_Success{ + Success: &commonpb.Payload{Data: []byte("success-data")}, + }, + CloseTime: timestamppb.New(time.Now()), + }, + expectPaused: false, + expectStatus: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + } + + executeNexusCompletion(t, tc) +} + +// TestHandleNexusCompletion_Failure verifies that a failed workflow completion +// is properly recorded with the failure payload and workflow status is updated. +func TestHandleNexusCompletion_Failure(t *testing.T) { + tc := nexusCompletionTestCase{ + name: "failed completion", + setupInvoker: func(invoker *scheduler.Invoker) { + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + RunId: "run-1", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-1 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-30 * time.Second)), + }, + } + }, + completion: &persistencespb.ChasmNexusCompletion{ + RequestId: "req-1", + Outcome: &persistencespb.ChasmNexusCompletion_Failure{ + Failure: &failurepb.Failure{ + Message: "workflow failed", + }, + }, + CloseTime: timestamppb.New(time.Now()), + }, + expectPaused: false, + expectStatus: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + } + + executeNexusCompletion(t, tc) +} + +// TestHandleNexusCompletion_PauseOnFailure verifies that when PauseOnFailure is enabled, +// a workflow failure causes the schedule to be paused and notes to be set. +func TestHandleNexusCompletion_PauseOnFailure(t *testing.T) { + tc := nexusCompletionTestCase{ + name: "pause on failure", + setupInvoker: func(invoker *scheduler.Invoker) { + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + RunId: "run-1", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-1 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-30 * time.Second)), + }, + } + }, + setupScheduler: func(sched *scheduler.Scheduler) { + sched.Schedule.Policies.PauseOnFailure = true + }, + completion: &persistencespb.ChasmNexusCompletion{ + RequestId: "req-1", + Outcome: &persistencespb.ChasmNexusCompletion_Failure{ + Failure: &failurepb.Failure{ + Message: "workflow failed", + }, + }, + CloseTime: timestamppb.New(time.Now()), + }, + expectPaused: true, + expectStatus: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + } + + executeNexusCompletion(t, tc) +} + +// TestHandleNexusCompletion_Idempotent verifies that handling a completion for an +// already-processed request ID (not in BufferedStarts) is a no-op. +func TestHandleNexusCompletion_Idempotent(t *testing.T) { + tc := nexusCompletionTestCase{ + name: "idempotent completion", + setupInvoker: func(invoker *scheduler.Invoker) { + // Empty BufferedStarts - request was already processed + invoker.BufferedStarts = []*schedulespb.BufferedStart{} + }, + completion: &persistencespb.ChasmNexusCompletion{ + RequestId: "req-1", + Outcome: &persistencespb.ChasmNexusCompletion_Success{ + Success: &commonpb.Payload{Data: []byte("success-data")}, + }, + CloseTime: timestamppb.New(time.Now()), + }, + expectNoOp: true, + } + + executeNexusCompletion(t, tc) +} + +// TestHandleNexusCompletion_Canceled verifies that a canceled workflow completion +// is properly recorded with CANCELED status. +func TestHandleNexusCompletion_Canceled(t *testing.T) { + tc := nexusCompletionTestCase{ + name: "canceled completion", + setupInvoker: func(invoker *scheduler.Invoker) { + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + RunId: "run-1", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-1 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-30 * time.Second)), + }, + } + }, + completion: &persistencespb.ChasmNexusCompletion{ + RequestId: "req-1", + Outcome: &persistencespb.ChasmNexusCompletion_Failure{ + Failure: &failurepb.Failure{ + Message: "workflow canceled", + FailureInfo: &failurepb.Failure_CanceledFailureInfo{ + CanceledFailureInfo: &failurepb.CanceledFailureInfo{}, + }, + }, + }, + CloseTime: timestamppb.New(time.Now()), + }, + expectPaused: false, + expectStatus: enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, + } + + executeNexusCompletion(t, tc) +} + +// TestHandleNexusCompletion_CompletionBeforeStart verifies that a workflow can +// complete before its start is recorded (workflow has a BufferedStart but no RunId yet). +func TestHandleNexusCompletion_CompletionBeforeStart(t *testing.T) { + desiredTime := time.Now() + tc := nexusCompletionTestCase{ + name: "completion before start", + setupInvoker: func(invoker *scheduler.Invoker) { + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + Attempt: 1, + ActualTime: timestamppb.New(desiredTime), + DesiredTime: timestamppb.New(desiredTime), + // No RunId - workflow hasn't been started yet in our records + }, + } + }, + completion: &persistencespb.ChasmNexusCompletion{ + RequestId: "req-1", + Outcome: &persistencespb.ChasmNexusCompletion_Success{ + Success: &commonpb.Payload{Data: []byte("success-data")}, + }, + CloseTime: timestamppb.New(time.Now()), + }, + expectPaused: false, + expectStatus: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + validateInvoker: func(t *testing.T, invoker *scheduler.Invoker) { + require.Len(t, invoker.BufferedStarts, 1) + require.NotNil(t, invoker.BufferedStarts[0].Completed) + }, + } + + executeNexusCompletion(t, tc) +} diff --git a/chasm/lib/scheduler/scheduler_record_completed_action_test.go b/chasm/lib/scheduler/scheduler_record_completed_action_test.go new file mode 100644 index 00000000000..dd9cd4db70d --- /dev/null +++ b/chasm/lib/scheduler/scheduler_record_completed_action_test.go @@ -0,0 +1,230 @@ +package scheduler_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + enumspb "go.temporal.io/api/enums/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type recordCompletedActionTestCase struct { + name string + setupScheduler func(*scheduler.Scheduler, chasm.MutableContext) + requestID string + completed *schedulespb.CompletedResult + validate func(*testing.T, *scheduler.Scheduler, chasm.Context) +} + +func executeRecordCompletedAction(t *testing.T, tc recordCompletedActionTestCase) { + sched, ctx, node := setupSchedulerForTest(t) + + if tc.setupScheduler != nil { + tc.setupScheduler(sched, ctx) + } + + sched.RecordCompletedAction(ctx, tc.completed, tc.requestID) + + _, err := node.CloseTransaction() + require.NoError(t, err) + + readCtx := chasm.NewContext(context.Background(), node) + if tc.validate != nil { + tc.validate(t, sched, readCtx) + } +} + +// TestRecordCompletedAction_SingleRunningWorkflow verifies that when +// a workflow exists in BufferedStarts with a RunId (running), completing it +// sets the Completed field. +func TestRecordCompletedAction_SingleRunningWorkflow(t *testing.T) { + closeTime := time.Now() + tc := recordCompletedActionTestCase{ + name: "single running workflow", + setupScheduler: func(sched *scheduler.Scheduler, ctx chasm.MutableContext) { + invoker := sched.Invoker.Get(ctx) + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + RunId: "run-1", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-1 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-30 * time.Second)), + }, + } + }, + requestID: "req-1", + completed: &schedulespb.CompletedResult{ + Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + CloseTime: timestamppb.New(closeTime), + }, + validate: func(t *testing.T, sched *scheduler.Scheduler, ctx chasm.Context) { + invoker := sched.Invoker.Get(ctx) + require.Len(t, invoker.BufferedStarts, 1) + require.NotNil(t, invoker.BufferedStarts[0].Completed) + require.Equal(t, enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, invoker.BufferedStarts[0].Completed.Status) + }, + } + + executeRecordCompletedAction(t, tc) +} + +// TestRecordCompletedAction_MultipleWorkflows verifies that when multiple +// workflows exist in BufferedStarts, only the one with the matching requestID +// is marked as completed. +func TestRecordCompletedAction_MultipleWorkflows(t *testing.T) { + closeTime := time.Now() + tc := recordCompletedActionTestCase{ + name: "multiple workflows, complete one", + setupScheduler: func(sched *scheduler.Scheduler, ctx chasm.MutableContext) { + invoker := sched.Invoker.Get(ctx) + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + RunId: "run-1", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-2 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-90 * time.Second)), + }, + { + RequestId: "req-2", + WorkflowId: "wf-2", + RunId: "run-2", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-1 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-30 * time.Second)), + }, + } + }, + requestID: "req-1", + completed: &schedulespb.CompletedResult{ + Status: enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, + CloseTime: timestamppb.New(closeTime), + }, + validate: func(t *testing.T, sched *scheduler.Scheduler, ctx chasm.Context) { + invoker := sched.Invoker.Get(ctx) + require.Len(t, invoker.BufferedStarts, 2) + + // Find workflows by RequestId since applyCompletedRetention reorders + // (non-completed first, then completed) + var req1Start, req2Start *schedulespb.BufferedStart + for _, start := range invoker.BufferedStarts { + switch start.RequestId { + case "req-1": + req1Start = start + case "req-2": + req2Start = start + default: + } + } + require.NotNil(t, req1Start) + require.NotNil(t, req2Start) + + // First workflow (req-1) should be completed + require.NotNil(t, req1Start.Completed) + require.Equal(t, enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, req1Start.Completed.Status) + + // Second workflow (req-2) should still be running + require.Nil(t, req2Start.Completed) + }, + } + + executeRecordCompletedAction(t, tc) +} + +// TestRecordCompletedAction_UpdatesDesiredTimeOnNextPending verifies that +// completing a workflow updates the DesiredTime on the first pending start +// (Attempt == 0) to the close time of the completed workflow. +func TestRecordCompletedAction_UpdatesDesiredTimeOnNextPending(t *testing.T) { + closeTime := time.Now() + tc := recordCompletedActionTestCase{ + name: "updates DesiredTime on next pending start", + setupScheduler: func(sched *scheduler.Scheduler, ctx chasm.MutableContext) { + invoker := sched.Invoker.Get(ctx) + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + RunId: "run-1", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-2 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-90 * time.Second)), + }, + { + RequestId: "req-2", + WorkflowId: "wf-2", + Attempt: 0, // pending + ActualTime: timestamppb.New(time.Now()), + DesiredTime: timestamppb.New(time.Now()), + }, + } + }, + requestID: "req-1", + completed: &schedulespb.CompletedResult{ + Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + CloseTime: timestamppb.New(closeTime), + }, + validate: func(t *testing.T, sched *scheduler.Scheduler, ctx chasm.Context) { + invoker := sched.Invoker.Get(ctx) + require.Len(t, invoker.BufferedStarts, 2) + + // Find the pending start (req-2) by RequestId since applyCompletedRetention reorders + // (non-completed first, then completed) + var req2Start *schedulespb.BufferedStart + for _, start := range invoker.BufferedStarts { + if start.RequestId == "req-2" { + req2Start = start + break + } + } + require.NotNil(t, req2Start) + + // Pending start (req-2) should have DesiredTime updated to closeTime + require.Equal(t, closeTime.Unix(), req2Start.DesiredTime.AsTime().Unix()) + }, + } + + executeRecordCompletedAction(t, tc) +} + +// TestRecordCompletedAction_NoMatchingRequest verifies that when the requestID +// doesn't match any BufferedStart, no changes are made. +func TestRecordCompletedAction_NoMatchingRequest(t *testing.T) { + closeTime := time.Now() + tc := recordCompletedActionTestCase{ + name: "no matching request", + setupScheduler: func(sched *scheduler.Scheduler, ctx chasm.MutableContext) { + invoker := sched.Invoker.Get(ctx) + invoker.BufferedStarts = []*schedulespb.BufferedStart{ + { + RequestId: "req-1", + WorkflowId: "wf-1", + RunId: "run-1", + Attempt: 1, + ActualTime: timestamppb.New(time.Now().Add(-1 * time.Minute)), + StartTime: timestamppb.New(time.Now().Add(-30 * time.Second)), + }, + } + }, + requestID: "req-nonexistent", + completed: &schedulespb.CompletedResult{ + Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + CloseTime: timestamppb.New(closeTime), + }, + validate: func(t *testing.T, sched *scheduler.Scheduler, ctx chasm.Context) { + invoker := sched.Invoker.Get(ctx) + require.Len(t, invoker.BufferedStarts, 1) + // Original workflow should still be running (not completed) + require.Nil(t, invoker.BufferedStarts[0].Completed) + }, + } + + executeRecordCompletedAction(t, tc) +} diff --git a/chasm/lib/scheduler/scheduler_tasks.go b/chasm/lib/scheduler/scheduler_tasks.go new file mode 100644 index 00000000000..7259c746ec0 --- /dev/null +++ b/chasm/lib/scheduler/scheduler_tasks.go @@ -0,0 +1,284 @@ +package scheduler + +import ( + "context" + "errors" + "fmt" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + workflowpb "go.temporal.io/api/workflow/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/resource" + "go.uber.org/fx" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type SchedulerIdleTaskHandlerOptions struct { + fx.In + + Config *Config +} + +type SchedulerIdleTaskHandler struct { + chasm.PureTaskHandlerBase + config *Config +} + +func NewSchedulerIdleTaskHandler(opts SchedulerIdleTaskHandlerOptions) *SchedulerIdleTaskHandler { + return &SchedulerIdleTaskHandler{ + config: opts.Config, + } +} + +func (r *SchedulerIdleTaskHandler) Execute( + ctx chasm.MutableContext, + scheduler *Scheduler, + _ chasm.TaskAttributes, + _ *schedulerpb.SchedulerIdleTask, +) error { + scheduler.Closed = true + return nil +} + +func (r *SchedulerIdleTaskHandler) Validate( + ctx chasm.Context, + scheduler *Scheduler, + taskAttrs chasm.TaskAttributes, + task *schedulerpb.SchedulerIdleTask, +) (bool, error) { + idleTimeTotal := task.IdleTimeTotal.AsDuration() + idleExpiration, isIdle := scheduler.getIdleExpiration(ctx, idleTimeTotal, time.Time{}) + + // If the scheduler has since woken up, or its idle expiration time changed, this + // task must be obsolete. + if !isIdle || idleExpiration.Compare(taskAttrs.ScheduledTime) != 0 { + return false, nil + } + + return !scheduler.Closed, nil +} + +type SchedulerCallbacksTaskHandlerOptions struct { + fx.In + + Config *Config + HistoryClient resource.HistoryClient + FrontendClient workflowservice.WorkflowServiceClient +} + +type SchedulerCallbacksTaskHandler struct { + chasm.SideEffectTaskHandlerBase[*schedulerpb.SchedulerCallbacksTask] + config *Config + historyClient resource.HistoryClient + frontendClient workflowservice.WorkflowServiceClient +} + +func NewSchedulerCallbacksTaskHandler(opts SchedulerCallbacksTaskHandlerOptions) *SchedulerCallbacksTaskHandler { + return &SchedulerCallbacksTaskHandler{ + config: opts.Config, + historyClient: opts.HistoryClient, + frontendClient: opts.FrontendClient, + } +} + +// watchResult holds the outcome of watchRunningStart for a single BufferedStart. +// A nil completed field means the callback was successfully attached and the +// workflow is still running. +type watchResult struct { + completed *schedulespb.CompletedResult +} + +func (r *SchedulerCallbacksTaskHandler) Execute( + ctx context.Context, + schedulerRef chasm.ComponentRef, + _ chasm.TaskAttributes, + _ *schedulerpb.SchedulerCallbacksTask, +) error { + var scheduler *Scheduler + var starts []*schedulespb.BufferedStart + var callback *commonpb.Callback + + // Read scheduler state and generate the Nexus callback token. + _, err := chasm.ReadComponent( + ctx, + schedulerRef, + func(s *Scheduler, ctx chasm.Context, _ any) (struct{}, error) { + scheduler = &Scheduler{ + SchedulerState: common.CloneProto(s.SchedulerState), + } + + invoker := s.Invoker.Get(ctx) + for _, start := range invoker.BufferedStarts { + if needsCallback(start) { + starts = append(starts, common.CloneProto(start)) + } + } + + cb, err := chasm.GenerateNexusCallback(ctx, s) + if err != nil { + return struct{}{}, err + } + callback = common.CloneProto(cb) + + return struct{}{}, nil + }, + nil, + ) + if err != nil { + return fmt.Errorf("failed to read component: %w", err) + } + + // Attach callbacks and check workflow status. + results := make(map[string]*watchResult, len(starts)) + for _, start := range starts { + result, err := r.watchRunningStart(ctx, scheduler, start, callback) + if err != nil { + return err + } + results[start.RequestId] = result + } + + // Apply results to the invoker's BufferedStarts and fire tasks. + _, _, err = chasm.UpdateComponent( + ctx, + schedulerRef, + func(s *Scheduler, ctx chasm.MutableContext, _ any) (chasm.NoValue, error) { + generator := s.Generator.Get(ctx) + invoker := s.Invoker.Get(ctx) + + for _, start := range invoker.BufferedStarts { + if result, ok := results[start.RequestId]; ok { + start.HasCallback = true + if result.completed != nil { + start.Completed = result.completed + } + } + } + + // Now that running workflow state has been refreshed, scheduler tasks can be + // fired. + invoker.addTasks(ctx) + generator.Generate(ctx) + + return nil, nil + }, + nil, + ) + if err != nil { + return fmt.Errorf("failed to update component state: %w", err) + } + + return nil +} + +// watchRunningStart will attach a Nexus completion callback to a running +// BufferedStart. If the start's workflow has already closed, the start is updated +// to indicate it has completed. Intended for migration/anti-entropy cases. +func (r *SchedulerCallbacksTaskHandler) watchRunningStart( + ctx context.Context, + scheduler *Scheduler, + start *schedulespb.BufferedStart, + callback *commonpb.Callback, +) (*watchResult, error) { + // Describe the workflow to ensure it exists and is still running. + descResp, err := r.historyClient.DescribeWorkflowExecution(ctx, &historyservice.DescribeWorkflowExecutionRequest{ + NamespaceId: scheduler.NamespaceId, + Request: &workflowservice.DescribeWorkflowExecutionRequest{ + Namespace: scheduler.Namespace, + Execution: &commonpb.WorkflowExecution{ + WorkflowId: start.WorkflowId, + RunId: start.RunId, + }, + }, + }) + if err != nil { + var notFoundErr *serviceerror.NotFound + if errors.As(err, ¬FoundErr) { + return &watchResult{ + completed: &schedulespb.CompletedResult{ + Status: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + CloseTime: timestamppb.Now(), + }, + }, nil + } + return nil, err + } + + wfInfo := descResp.GetWorkflowExecutionInfo() + wfProgressing := wfInfo.GetStatus() == enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING || + wfInfo.GetStatus() == enumspb.WORKFLOW_EXECUTION_STATUS_PAUSED + + if !wfProgressing { + return &watchResult{ + completed: &schedulespb.CompletedResult{ + Status: wfInfo.GetStatus(), + CloseTime: wfInfo.GetCloseTime(), + }, + }, nil + } + + // Workflow is still running. Attach a Nexus completion callback by issuing + // a StartWorkflowExecution with USE_EXISTING conflict policy. REJECT_DUPLICATE + // reuse policy prevents accidentally starting a new workflow if the original + // completes between the describe and this call. + requestSpec := scheduler.GetSchedule().GetAction().GetStartWorkflow() + + _, err = r.frontendClient.StartWorkflowExecution(ctx, &workflowservice.StartWorkflowExecutionRequest{ + Namespace: scheduler.Namespace, + WorkflowId: start.WorkflowId, + RequestId: start.RequestId, + Identity: scheduler.identity(), + WorkflowType: requestSpec.GetWorkflowType(), + TaskQueue: requestSpec.GetTaskQueue(), + WorkflowIdConflictPolicy: enumspb.WORKFLOW_ID_CONFLICT_POLICY_USE_EXISTING, + WorkflowIdReusePolicy: enumspb.WORKFLOW_ID_REUSE_POLICY_REJECT_DUPLICATE, + CompletionCallbacks: []*commonpb.Callback{callback}, + OnConflictOptions: &workflowpb.OnConflictOptions{ + AttachRequestId: true, + AttachCompletionCallbacks: true, + }, + }) + if err != nil { + // WorkflowExecutionAlreadyStarted: workflow completed between describe + // and this attach call (REJECT_DUPLICATE rejects completed workflows). + if isAlreadyStartedError(err) { + return &watchResult{ + completed: &schedulespb.CompletedResult{ + Status: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + CloseTime: timestamppb.Now(), + }, + }, nil + } + return nil, err + } + + // Callback attached successfully. + return &watchResult{}, nil +} + +func (r *SchedulerCallbacksTaskHandler) Validate( + ctx chasm.Context, + scheduler *Scheduler, + taskAttrs chasm.TaskAttributes, + task *schedulerpb.SchedulerCallbacksTask, +) (bool, error) { + invoker := scheduler.Invoker.Get(ctx) + for _, start := range invoker.BufferedStarts { + if needsCallback(start) { + return true, nil + } + } + return false, nil +} + +func needsCallback(start *schedulespb.BufferedStart) bool { + return !start.HasCallback && start.GetRunId() != "" && start.GetCompleted() == nil +} diff --git a/chasm/lib/scheduler/scheduler_test.go b/chasm/lib/scheduler/scheduler_test.go new file mode 100644 index 00000000000..0aa537a1026 --- /dev/null +++ b/chasm/lib/scheduler/scheduler_test.go @@ -0,0 +1,314 @@ +package scheduler_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + schedulepb "go.temporal.io/api/schedule/v1" + taskqueuepb "go.temporal.io/api/taskqueue/v1" + workflowpb "go.temporal.io/api/workflow/v1" + "go.temporal.io/api/workflowservice/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + schedulerpb "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "go.temporal.io/server/common/testing/protorequire" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func TestListInfo(t *testing.T) { + scheduler, ctx, _ := setupSchedulerForTest(t) + + // Generator maintains the FutureActionTimes list, set that up first. + generator := scheduler.Generator.Get(ctx) + expectedFutureTimes := []*timestamppb.Timestamp{timestamppb.Now(), timestamppb.Now()} + generator.FutureActionTimes = expectedFutureTimes + + listInfo := scheduler.ListInfo(ctx) + + // Should return a populated info block. + require.NotNil(t, listInfo) + require.NotNil(t, listInfo.Spec) + require.NotEmpty(t, listInfo.Spec.Interval) + protorequire.ProtoEqual(t, listInfo.Spec.Interval[0], scheduler.Schedule.Spec.Interval[0]) + require.NotNil(t, listInfo.WorkflowType) + require.NotEmpty(t, listInfo.FutureActionTimes) + require.Equal(t, expectedFutureTimes, listInfo.FutureActionTimes) +} + +func TestCreateSchedulerFromMigration(t *testing.T) { + now := time.Now().UTC() + _, _, node := setupSchedulerForTest(t) + + req := &schedulerpb.CreateFromMigrationStateRequest{ + NamespaceId: namespaceID, + State: &schedulerpb.SchedulerMigrationState{ + SchedulerState: &schedulerpb.SchedulerState{ + Schedule: defaultSchedule(), + Info: &schedulepb.ScheduleInfo{ActionCount: 10}, + Namespace: namespace, + NamespaceId: namespaceID, + ScheduleId: scheduleID, + ConflictToken: 42, + }, + GeneratorState: &schedulerpb.GeneratorState{ + LastProcessedTime: timestamppb.New(now), + }, + InvokerState: &schedulerpb.InvokerState{ + BufferedStarts: []*schedulespb.BufferedStart{ + { + NominalTime: timestamppb.New(now), + ActualTime: timestamppb.New(now), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, + RequestId: "req-1", + WorkflowId: "wf-1", + }, + { + NominalTime: timestamppb.New(now.Add(time.Minute)), + ActualTime: timestamppb.New(now.Add(time.Minute)), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + RequestId: "req-2", + WorkflowId: "wf-2", + }, + }, + }, + Backfillers: map[string]*schedulerpb.BackfillerState{ + "bf-1": { + BackfillId: "bf-1", + Request: &schedulerpb.BackfillerState_BackfillRequest{ + BackfillRequest: &schedulepb.BackfillRequest{ + StartTime: timestamppb.New(now.Add(-time.Hour)), + EndTime: timestamppb.New(now), + OverlapPolicy: enumspb.SCHEDULE_OVERLAP_POLICY_ALLOW_ALL, + }, + }, + }, + }, + LastCompletionResult: &schedulerpb.LastCompletionResult{ + Success: &commonpb.Payload{Data: []byte("result-data")}, + }, + SearchAttributes: map[string]*commonpb.Payload{ + "CustomAttr": {Data: []byte("attr-value")}, + }, + Memo: map[string]*commonpb.Payload{ + "MemoKey": {Data: []byte("memo-value")}, + }, + }, + } + + ctx := chasm.NewMutableContext(context.Background(), node) + sched, err := scheduler.CreateSchedulerFromMigration(ctx, req) + require.NoError(t, err) + + // Scheduler state + require.Equal(t, namespace, sched.Namespace) + require.Equal(t, namespaceID, sched.NamespaceId) + require.Equal(t, scheduleID, sched.ScheduleId) + require.Equal(t, int64(42), sched.ConflictToken) + require.False(t, sched.Closed) + + // Generator + generator := sched.Generator.Get(ctx) + require.Equal(t, now, generator.LastProcessedTime.AsTime()) + + // Invoker buffered starts + invoker := sched.Invoker.Get(ctx) + require.Len(t, invoker.BufferedStarts, 2) + require.Equal(t, "req-1", invoker.BufferedStarts[0].RequestId) + require.Equal(t, "req-2", invoker.BufferedStarts[1].RequestId) + + // Backfillers + require.Len(t, sched.Backfillers, 1) + bf := sched.Backfillers["bf-1"].Get(ctx) + require.Equal(t, "bf-1", bf.BackfillId) + + // Last completion result + lastResult := sched.LastCompletionResult.Get(ctx) + require.Equal(t, []byte("result-data"), lastResult.Success.Data) + + require.NoError(t, node.SetRootComponent(sched)) + _, err = node.CloseTransaction() + require.NoError(t, err) +} + +func TestUpdate_WithMemo(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + memoValue := &commonpb.Payload{Data: []byte("test-value")} + + _, err := sched.Update(ctx, &schedulerpb.UpdateScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.UpdateScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + Schedule: defaultSchedule(), + Memo: &commonpb.Memo{ + Fields: map[string]*commonpb.Payload{"key1": memoValue}, + }, + }, + }) + require.NoError(t, err) + + visibility := sched.Visibility.Get(ctx) + memo := visibility.CustomMemo(ctx) + protorequire.ProtoEqual(t, memoValue, memo["key1"]) +} + +func TestUpdate_WithNilMemo(t *testing.T) { + sched, ctx, node := setupSchedulerForTest(t) + + // Set initial memo. + visibility := sched.Visibility.Get(ctx) + visibility.MergeCustomMemo(ctx, map[string]*commonpb.Payload{ + "existing": {Data: []byte("value")}, + }) + _, err := node.CloseTransaction() + require.NoError(t, err) + + // Update without memo (nil) should preserve existing memo. + ctx = chasm.NewMutableContext(context.Background(), node) + _, err = sched.Update(ctx, &schedulerpb.UpdateScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.UpdateScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + Schedule: defaultSchedule(), + }, + }) + require.NoError(t, err) + + visibility = sched.Visibility.Get(ctx) + memo := visibility.CustomMemo(ctx) + protorequire.ProtoEqual(t, &commonpb.Payload{Data: []byte("value")}, memo["existing"]) +} + +func TestUpdate_MemoReplaceSemantics(t *testing.T) { + sched, ctx, node := setupSchedulerForTest(t) + + // Set initial memo with keys A and B. + visibility := sched.Visibility.Get(ctx) + visibility.MergeCustomMemo(ctx, map[string]*commonpb.Payload{ + "A": {Data: []byte("1")}, + "B": {Data: []byte("2")}, + }) + _, err := node.CloseTransaction() + require.NoError(t, err) + + // Update with only C: should fully replace memo (A and B are gone). + ctx = chasm.NewMutableContext(context.Background(), node) + _, err = sched.Update(ctx, &schedulerpb.UpdateScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.UpdateScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + Schedule: defaultSchedule(), + Memo: &commonpb.Memo{ + Fields: map[string]*commonpb.Payload{ + "C": {Data: []byte("3")}, + }, + }, + }, + }) + require.NoError(t, err) + + visibility = sched.Visibility.Get(ctx) + memo := visibility.CustomMemo(ctx) + require.Nil(t, memo["A"], "A should be gone after replace") + require.Nil(t, memo["B"], "B should be gone after replace") + protorequire.ProtoEqual(t, &commonpb.Payload{Data: []byte("3")}, memo["C"]) + + // Update with empty memo: should clear all memo fields. + _, err = node.CloseTransaction() + require.NoError(t, err) + ctx = chasm.NewMutableContext(context.Background(), node) + _, err = sched.Update(ctx, &schedulerpb.UpdateScheduleRequest{ + NamespaceId: namespaceID, + FrontendRequest: &workflowservice.UpdateScheduleRequest{ + Namespace: namespace, + ScheduleId: scheduleID, + Schedule: defaultSchedule(), + Memo: &commonpb.Memo{ + Fields: map[string]*commonpb.Payload{}, + }, + }, + }) + require.NoError(t, err) + + visibility = sched.Visibility.Get(ctx) + memo = visibility.CustomMemo(ctx) + require.Empty(t, memo, "memo should be empty after replace with empty map") +} + +func TestCreateSchedulerFromMigration_EmptyState(t *testing.T) { + _, _, node := setupSchedulerForTest(t) + + req := &schedulerpb.CreateFromMigrationStateRequest{ + NamespaceId: namespaceID, + State: &schedulerpb.SchedulerMigrationState{ + SchedulerState: &schedulerpb.SchedulerState{ + Schedule: defaultSchedule(), + Info: &schedulepb.ScheduleInfo{}, + Namespace: namespace, + NamespaceId: namespaceID, + ScheduleId: scheduleID, + ConflictToken: 1, + }, + GeneratorState: &schedulerpb.GeneratorState{}, + InvokerState: &schedulerpb.InvokerState{}, + }, + } + + ctx := chasm.NewMutableContext(context.Background(), node) + sched, err := scheduler.CreateSchedulerFromMigration(ctx, req) + require.NoError(t, err) + + require.Equal(t, int64(1), sched.ConflictToken) + require.Empty(t, sched.Backfillers) + + invoker := sched.Invoker.Get(ctx) + require.Empty(t, invoker.BufferedStarts) + + require.NoError(t, node.SetRootComponent(sched)) + _, err = node.CloseTransaction() + require.NoError(t, err) +} + +func TestContextMetadata(t *testing.T) { + t.Run("returns workflow type and task queue", func(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + sched.Schedule.Action = &schedulepb.ScheduleAction{ + Action: &schedulepb.ScheduleAction_StartWorkflow{ + StartWorkflow: &workflowpb.NewWorkflowExecutionInfo{ + WorkflowType: &commonpb.WorkflowType{Name: "my-workflow"}, + TaskQueue: &taskqueuepb.TaskQueue{Name: "my-task-queue"}, + }, + }, + } + + md := sched.ContextMetadata(ctx) + require.Equal(t, map[string]string{ + "workflow-type": "my-workflow", + "workflow-task-queue": "my-task-queue", + }, md) + }) + + t.Run("returns only workflow type when task queue is empty", func(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + // defaultSchedule sets WorkflowType but no TaskQueue + md := sched.ContextMetadata(ctx) + require.Equal(t, map[string]string{ + "workflow-type": "scheduled-wf-type", + }, md) + }) + + t.Run("returns nil when action is empty", func(t *testing.T) { + sched, ctx, _ := setupSchedulerForTest(t) + sched.Schedule.Action = nil + + md := sched.ContextMetadata(ctx) + require.Nil(t, md) + }) +} diff --git a/chasm/lib/scheduler/sentinel_test.go b/chasm/lib/scheduler/sentinel_test.go new file mode 100644 index 00000000000..b5e31d77000 --- /dev/null +++ b/chasm/lib/scheduler/sentinel_test.go @@ -0,0 +1,96 @@ +package scheduler_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "go.temporal.io/server/chasm/lib/scheduler/gen/schedulerpb/v1" + "google.golang.org/protobuf/types/known/durationpb" +) + +func TestNewSentinel(t *testing.T) { + sentinel, ctx, _ := setupSentinelForTest(t) + + require.True(t, sentinel.IsSentinel()) + require.NotNil(t, sentinel.Info.GetCreateTime()) + require.False(t, sentinel.Info.CreateTime.AsTime().IsZero()) + + // Sentinels should have no Visibility component, which prevents them from + // appearing in ListSchedules results. + _, ok := sentinel.Visibility.TryGet(ctx) + require.False(t, ok) +} + +func TestSentinelIdleTask_Validate_Valid(t *testing.T) { + sentinel, ctx, _ := setupSentinelForTest(t) + + executor := scheduler.NewSchedulerIdleTaskHandler(scheduler.SchedulerIdleTaskHandlerOptions{ + Config: defaultConfig(), + }) + + task := &schedulerpb.SchedulerIdleTask{ + IdleTimeTotal: durationpb.New(scheduler.SentinelIdleTime), + } + taskAttrs := chasm.TaskAttributes{ + ScheduledTime: sentinel.Info.CreateTime.AsTime().Add(scheduler.SentinelIdleTime), + } + + isValid, err := executor.Validate(ctx, sentinel, taskAttrs, task) + require.NoError(t, err) + require.True(t, isValid) +} + +func TestSentinelIdleTask_Validate_InvalidAfterClosed(t *testing.T) { + sentinel, ctx, _ := setupSentinelForTest(t) + sentinel.Closed = true + + executor := scheduler.NewSchedulerIdleTaskHandler(scheduler.SchedulerIdleTaskHandlerOptions{ + Config: defaultConfig(), + }) + + task := &schedulerpb.SchedulerIdleTask{ + IdleTimeTotal: durationpb.New(scheduler.SentinelIdleTime), + } + taskAttrs := chasm.TaskAttributes{ + ScheduledTime: sentinel.Info.CreateTime.AsTime().Add(scheduler.SentinelIdleTime), + } + + isValid, err := executor.Validate(ctx, sentinel, taskAttrs, task) + require.NoError(t, err) + require.False(t, isValid) +} + +func TestSentinelIdleTask_Validate_MismatchedScheduledTime(t *testing.T) { + sentinel, ctx, _ := setupSentinelForTest(t) + + executor := scheduler.NewSchedulerIdleTaskHandler(scheduler.SchedulerIdleTaskHandlerOptions{ + Config: defaultConfig(), + }) + + task := &schedulerpb.SchedulerIdleTask{ + IdleTimeTotal: durationpb.New(scheduler.SentinelIdleTime), + } + taskAttrs := chasm.TaskAttributes{ + ScheduledTime: sentinel.Info.CreateTime.AsTime().Add(99 * time.Hour), + } + + isValid, err := executor.Validate(ctx, sentinel, taskAttrs, task) + require.NoError(t, err) + require.False(t, isValid) +} + +func TestSentinelIdleTask_Execute(t *testing.T) { + sentinel, ctx, _ := setupSentinelForTest(t) + + executor := scheduler.NewSchedulerIdleTaskHandler(scheduler.SchedulerIdleTaskHandlerOptions{ + Config: defaultConfig(), + }) + + require.False(t, sentinel.Closed) + err := executor.Execute(ctx, sentinel, chasm.TaskAttributes{}, &schedulerpb.SchedulerIdleTask{}) + require.NoError(t, err) + require.True(t, sentinel.Closed) +} diff --git a/chasm/lib/scheduler/spec_processor.go b/chasm/lib/scheduler/spec_processor.go new file mode 100644 index 00000000000..61585781b80 --- /dev/null +++ b/chasm/lib/scheduler/spec_processor.go @@ -0,0 +1,206 @@ +package scheduler + +import ( + "time" + + enumspb "go.temporal.io/api/enums/v1" + schedulespb "go.temporal.io/server/api/schedule/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + schedulescommon "go.temporal.io/server/common/schedules" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" + "google.golang.org/protobuf/types/known/timestamppb" +) + +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination spec_processor_mock.go + +type ( + // SpecProcessor is used by the Generator and Backfiller to generate buffered + // actions according to the schedule spec. + SpecProcessor interface { + // ProcessTimeRange generates buffered actions according to the schedule spec for + // the given time range. + // + // The parameter manual is propagated to the returned BufferedStarts. When the limit + // is set to a non-nil pointer, it will be decremented for each buffered start, and + // the function will return early should limit reach 0. + // + // If backfillID is set, it will be used to generate request IDs. + ProcessTimeRange( + scheduler *Scheduler, + start, end time.Time, + overlapPolicy enumspb.ScheduleOverlapPolicy, + workflowID string, + backfillID string, + manual bool, + limit *int, + ) (*ProcessedTimeRange, error) + + // NextTime provides a peek at the next time in the spec following 'after'. + NextTime(scheduler *Scheduler, after time.Time) (legacyscheduler.GetNextTimeResult, error) + } + + SpecProcessorImpl struct { + config *Config + metricsHandler metrics.Handler + logger log.Logger + specBuilder *legacyscheduler.SpecBuilder + } + + ProcessedTimeRange struct { + NextWakeupTime time.Time + LastActionTime time.Time + BufferedStarts []*schedulespb.BufferedStart + // DroppedCount is the number of actions that would have been buffered but + // were dropped due to the limit being reached. Only populated when a limit + // is provided. + DroppedCount int64 + } +) + +func NewSpecProcessor( + config *Config, + metricsHandler metrics.Handler, + logger log.Logger, + specBuilder *legacyscheduler.SpecBuilder, +) *SpecProcessorImpl { + return &SpecProcessorImpl{ + config: config, + metricsHandler: metricsHandler, + logger: logger, + specBuilder: specBuilder, + } +} + +func (s *SpecProcessorImpl) ProcessTimeRange( + scheduler *Scheduler, + start, end time.Time, + overlapPolicy enumspb.ScheduleOverlapPolicy, + workflowID string, + backfillID string, + manual bool, + limit *int, +) (*ProcessedTimeRange, error) { + tweakables := s.config.Tweakables(scheduler.Namespace) + metricsHandler := newTaggedMetricsHandler(s.metricsHandler, scheduler) + overlapPolicy = scheduler.resolveOverlapPolicy(overlapPolicy) + + s.logger.Debug("ProcessTimeRange", + tag.Time("start", start), + tag.Time("end", end), + tag.Any("overlap-policy", overlapPolicy), + tag.Bool("manual", manual)) + + // Peek at paused/remaining actions state and don't bother if we're not going to + // take an action now. (Don't count as missed catchup window either.) + // Skip over entire time range if paused or no actions can be taken. + // + // Manual (backfill/patch) runs are always buffered here. + if !scheduler.useScheduledAction(false) && !manual { + // Use end as last action time so that we don't reprocess time spent paused. + next, err := s.NextTime(scheduler, end) + if err != nil { + return nil, err + } + + return &ProcessedTimeRange{ + NextWakeupTime: next.Next, + LastActionTime: end, + BufferedStarts: nil, + }, nil + } + + catchupWindow := catchupWindow(scheduler, tweakables) + + // lastAction is used to set the high water mark for future ProcessTimeRange + // invocations. The code below will set a "last action" even when none is taken, + // simply to indicate that processing can permanently skip that period of time + // (e.g., it was prior to an update or past a catchup). + lastAction := end + + var next legacyscheduler.GetNextTimeResult + var err error + var bufferedStarts []*schedulespb.BufferedStart + var droppedCount int64 + limitReached := false + for next, err = s.NextTime(scheduler, start); err == nil && (!next.Next.IsZero() && !next.Next.After(end)); next, err = s.NextTime(scheduler, next.Next) { + lastAction = next.Next + + if scheduler.Info.UpdateTime.AsTime().After(next.Next) && !manual { + // If we've received an update that took effect after the LastProcessedTime high + // water mark, discard actions that were scheduled to kick off before the update. + // Skip this check for manual (backfill) actions since they explicitly request + // past times. + s.logger.Info("ProcessBuffer skipped an action due to update time", + tag.Time("updateTime", scheduler.Info.UpdateTime.AsTime()), + tag.Time("droppedActionTime", next.Next)) + continue + } + + if !manual && end.Sub(next.Next) > catchupWindow { + s.logger.Info("Schedule missed catchup window", + tag.Time("now", end), + tag.Time("time", next.Next)) + metricsHandler.Counter(metrics.ScheduleMissedCatchupWindow.Name()).Record(1) + + scheduler.Info.MissedCatchupWindow++ + continue + } + + if !manual { + metricsHandler.Timer(metrics.ScheduleGenerateLatency.Name()). + Record(end.Sub(next.Next)) + } + + if limitReached { + droppedCount++ + continue + } + bufferedStarts = append(bufferedStarts, &schedulespb.BufferedStart{ + NominalTime: timestamppb.New(next.Nominal), + ActualTime: timestamppb.New(next.Next), + OverlapPolicy: overlapPolicy, + Manual: manual, + RequestId: generateRequestID(scheduler, backfillID, next.Nominal, next.Next), + WorkflowId: schedulescommon.GenerateWorkflowID(workflowID, next.Nominal), + }) + + if limit != nil { + if (*limit)--; *limit <= 0 { + // For manual (backfill) actions, break immediately so the caller + // can retry later. For automated actions, continue to count dropped. + if manual { + break + } + limitReached = true + } + } + } + + return &ProcessedTimeRange{ + NextWakeupTime: next.Next, + LastActionTime: lastAction, + BufferedStarts: bufferedStarts, + }, nil +} + +func catchupWindow(s *Scheduler, tweakables Tweakables) time.Duration { + cw := s.Schedule.GetPolicies().GetCatchupWindow() + if cw == nil { + return tweakables.DefaultCatchupWindow + } + + return max(cw.AsDuration(), tweakables.MinCatchupWindow) +} + +// NextTime returns the next time result, or an error if the schedule cannot be compiled. +func (s *SpecProcessorImpl) NextTime(scheduler *Scheduler, after time.Time) (legacyscheduler.GetNextTimeResult, error) { + spec, err := scheduler.getCompiledSpec(s.specBuilder) + if err != nil { + s.logger.Error("Invalid schedule", tag.Error(err)) + return legacyscheduler.GetNextTimeResult{}, err + } + + return spec.GetNextTime(scheduler.jitterSeed(), after), nil +} diff --git a/chasm/lib/scheduler/spec_processor_mock.go b/chasm/lib/scheduler/spec_processor_mock.go new file mode 100644 index 00000000000..8a3ea91f345 --- /dev/null +++ b/chasm/lib/scheduler/spec_processor_mock.go @@ -0,0 +1,73 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: spec_processor.go +// +// Generated by this command: +// +// mockgen -package scheduler -source spec_processor.go -destination spec_processor_mock.go +// + +// Package scheduler is a generated GoMock package. +package scheduler + +import ( + reflect "reflect" + time "time" + + enums "go.temporal.io/api/enums/v1" + scheduler "go.temporal.io/server/service/worker/scheduler" + gomock "go.uber.org/mock/gomock" +) + +// MockSpecProcessor is a mock of SpecProcessor interface. +type MockSpecProcessor struct { + ctrl *gomock.Controller + recorder *MockSpecProcessorMockRecorder + isgomock struct{} +} + +// MockSpecProcessorMockRecorder is the mock recorder for MockSpecProcessor. +type MockSpecProcessorMockRecorder struct { + mock *MockSpecProcessor +} + +// NewMockSpecProcessor creates a new mock instance. +func NewMockSpecProcessor(ctrl *gomock.Controller) *MockSpecProcessor { + mock := &MockSpecProcessor{ctrl: ctrl} + mock.recorder = &MockSpecProcessorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSpecProcessor) EXPECT() *MockSpecProcessorMockRecorder { + return m.recorder +} + +// NextTime mocks base method. +func (m *MockSpecProcessor) NextTime(arg0 *Scheduler, after time.Time) (scheduler.GetNextTimeResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NextTime", arg0, after) + ret0, _ := ret[0].(scheduler.GetNextTimeResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NextTime indicates an expected call of NextTime. +func (mr *MockSpecProcessorMockRecorder) NextTime(arg0, after any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NextTime", reflect.TypeOf((*MockSpecProcessor)(nil).NextTime), arg0, after) +} + +// ProcessTimeRange mocks base method. +func (m *MockSpecProcessor) ProcessTimeRange(arg0 *Scheduler, start, end time.Time, overlapPolicy enums.ScheduleOverlapPolicy, workflowID, backfillID string, manual bool, limit *int) (*ProcessedTimeRange, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ProcessTimeRange", arg0, start, end, overlapPolicy, workflowID, backfillID, manual, limit) + ret0, _ := ret[0].(*ProcessedTimeRange) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ProcessTimeRange indicates an expected call of ProcessTimeRange. +func (mr *MockSpecProcessorMockRecorder) ProcessTimeRange(arg0, start, end, overlapPolicy, workflowID, backfillID, manual, limit any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessTimeRange", reflect.TypeOf((*MockSpecProcessor)(nil).ProcessTimeRange), arg0, start, end, overlapPolicy, workflowID, backfillID, manual, limit) +} diff --git a/chasm/lib/scheduler/spec_processor_test.go b/chasm/lib/scheduler/spec_processor_test.go new file mode 100644 index 00000000000..e4431b5ecd2 --- /dev/null +++ b/chasm/lib/scheduler/spec_processor_test.go @@ -0,0 +1,248 @@ +package scheduler_test + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + enumspb "go.temporal.io/api/enums/v1" + schedulepb "go.temporal.io/api/schedule/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/scheduler" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + legacyscheduler "go.temporal.io/server/service/worker/scheduler" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// testSpecProcessor wraps a real SpecProcessor for testing. +type testSpecProcessor struct { + scheduler.SpecProcessor + mockMetrics *metrics.MockHandler +} + +// newTestSpecProcessor creates a real SpecProcessor for tests that need actual scheduling logic. +func newTestSpecProcessor(ctrl *gomock.Controller) *testSpecProcessor { + mockMetrics := metrics.NewMockHandler(ctrl) + mockMetrics.EXPECT().Counter(gomock.Any()).Return(metrics.NoopCounterMetricFunc).AnyTimes() + mockMetrics.EXPECT().WithTags(gomock.Any()).Return(mockMetrics).AnyTimes() + mockMetrics.EXPECT().Timer(gomock.Any()).Return(metrics.NoopTimerMetricFunc).AnyTimes() + + return &testSpecProcessor{ + SpecProcessor: scheduler.NewSpecProcessor( + &scheduler.Config{ + Tweakables: func(_ string) scheduler.Tweakables { + return scheduler.DefaultTweakables + }, + }, + mockMetrics, + log.NewTestLogger(), + legacyscheduler.NewSpecBuilder(), + ), + } +} + +func TestProcessTimeRange_LimitedActions(t *testing.T) { + env := newTestEnv(t) + ctx := chasm.NewMutableContext(context.Background(), env.Node) + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, defaultSchedule(), nil) + require.NoError(t, err) + processor := newTestSpecProcessor(env.Ctrl) + + end := time.Now() + start := end.Add(-defaultInterval) + + // A schedule with an action limit and remaining actions should buffer actions. + sched.Schedule.State.LimitedActions = true + sched.Schedule.State.RemainingActions = 1 + + res, err := processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, nil) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 1) + + // When a schedule has an action limit that has been exceeded, we don't bother + // buffering additional actions. + sched.Schedule.State.RemainingActions = 0 + + res, err = processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, nil) + require.NoError(t, err) + require.Empty(t, res.BufferedStarts) + + // Manual starts should always be allowed. + backfillID := "backfill" + res, err = processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), backfillID, true, nil) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 1) + bufferedStart := res.BufferedStarts[0] + require.True(t, bufferedStart.Manual) + require.Contains(t, bufferedStart.RequestId, backfillID) + require.NotEmpty(t, bufferedStart.WorkflowId) +} + +func TestProcessTimeRange_UpdateAfterHighWatermark(t *testing.T) { + env := newTestEnv(t) + ctx := chasm.NewMutableContext(context.Background(), env.Node) + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, defaultSchedule(), nil) + require.NoError(t, err) + processor := newTestSpecProcessor(env.Ctrl) + + // Below window would give 6 actions, but the update time halves that. + base := time.Now() + start := base.Add(-defaultInterval * 3) + end := base.Add(defaultInterval * 3) + + // Actions taking place in time before the last update time should be dropped. + sched.Info.UpdateTime = timestamppb.Now() + + res, err := processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, nil) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 3) +} + +// Tests that an update between a nominal time and jittered time for a start, that doesn't +// modify that start, will still start it. +func TestProcessTimeRange_UpdateBetweenNominalAndJitter(t *testing.T) { + env := newTestEnv(t) + ctx := chasm.NewMutableContext(context.Background(), env.Node) + schedule := defaultSchedule() + schedule.Policies.CatchupWindow = durationpb.New(2 * time.Hour) + schedule.Spec = &schedulepb.ScheduleSpec{ + Interval: []*schedulepb.IntervalSpec{{ + Interval: durationpb.New(1 * time.Hour), + }}, + Jitter: durationpb.New(1 * time.Hour), + } + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, schedule, nil) + require.NoError(t, err) + processor := newTestSpecProcessor(env.Ctrl) + + // Generate a start with a long jitter period. + base := time.Date(2025, 03, 31, 1, 0, 0, 0, time.UTC) + start := base.Add(-1 * time.Minute) + end := start.Add(1 * time.Hour) + + // Set our update time between the start's nominal and jittered time. + updateTime := start.Add(10 * time.Minute) + sched.Info.UpdateTime = timestamppb.New(updateTime) + + // A single start should have been buffered. + res, err := processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, nil) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 1) + + // Validates the test case. + actualTime := res.BufferedStarts[0].GetActualTime().AsTime() + nominalTime := res.BufferedStarts[0].GetNominalTime().AsTime() + require.True(t, nominalTime.Before(updateTime)) + require.True(t, actualTime.After(updateTime)) +} + +func TestProcessTimeRange_CatchupWindow(t *testing.T) { + env := newTestEnv(t) + ctx := chasm.NewMutableContext(context.Background(), env.Node) + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, defaultSchedule(), nil) + require.NoError(t, err) + processor := newTestSpecProcessor(env.Ctrl) + + // When an action would fall outside of the schedule's catchup window, it should + // be dropped. + end := time.Now() + start := end.Add(-defaultCatchupWindow * 2) + + res, err := processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, nil) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 5) +} + +func TestProcessTimeRange_Limit(t *testing.T) { + env := newTestEnv(t) + ctx := chasm.NewMutableContext(context.Background(), env.Node) + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, defaultSchedule(), nil) + require.NoError(t, err) + processor := newTestSpecProcessor(env.Ctrl) + + end := time.Now() + start := end.Add(-defaultInterval * 5) + + // When a limit pointer is provided, its value should be decremented with each + // action buffered, ProcessTimeRange should return once the limit has been + // exhausted. + limit := 2 + + res, err := processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, &limit) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 2) + require.Equal(t, 0, limit) +} + +func TestProcessTimeRange_OverlapPolicy(t *testing.T) { + env := newTestEnv(t) + ctx := chasm.NewMutableContext(context.Background(), env.Node) + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, defaultSchedule(), nil) + require.NoError(t, err) + processor := newTestSpecProcessor(env.Ctrl) + + end := time.Now() + start := end.Add(-defaultInterval * 5) + + // Check that a default overlap policy (SKIP) is applied, even when left unspecified. + sched.Schedule.Policies.OverlapPolicy = enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED + + res, err := processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, nil) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 5) + for _, b := range res.BufferedStarts { + require.Equal(t, enumspb.SCHEDULE_OVERLAP_POLICY_SKIP, b.OverlapPolicy) + } + + // Check that a specified overlap policy is applied. + overlapPolicy := enumspb.SCHEDULE_OVERLAP_POLICY_BUFFER_ALL + sched.Schedule.Policies.OverlapPolicy = overlapPolicy + + res, err = processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, nil) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 5) + for _, b := range res.BufferedStarts { + require.Equal(t, overlapPolicy, b.OverlapPolicy) + } +} + +func TestProcessTimeRange_Basic(t *testing.T) { + env := newTestEnv(t) + ctx := chasm.NewMutableContext(context.Background(), env.Node) + sched, err := scheduler.NewScheduler(ctx, namespace, namespaceID, scheduleID, defaultSchedule(), nil) + require.NoError(t, err) + processor := newTestSpecProcessor(env.Ctrl) + + end := time.Now() + start := end.Add(-defaultInterval * 5) + + // Validate returned BufferedStarts for unique action times and request IDs. + res, err := processor.ProcessTimeRange(sched, start, end, enumspb.SCHEDULE_OVERLAP_POLICY_UNSPECIFIED, sched.WorkflowID(), "", false, nil) + require.NoError(t, err) + require.Len(t, res.BufferedStarts, 5) + + uniqueTimes := make(map[time.Time]bool) + uniqueIDs := make(map[string]bool) + for _, b := range res.BufferedStarts { + require.False(t, b.Manual) + + actualTime := b.ActualTime.AsTime() + require.False(t, uniqueTimes[actualTime]) + require.False(t, uniqueIDs[b.RequestId]) + uniqueTimes[actualTime] = true + uniqueIDs[b.RequestId] = true + + // Validate WorkflowId format: scheduled-wf-{RFC3339 timestamp} + nominalTime := b.NominalTime.AsTime() + expectedTimestamp := nominalTime.Truncate(time.Second).Format(time.RFC3339) + require.Equal(t, "scheduled-wf-"+expectedTimestamp, b.WorkflowId) + } + + // Validate next wakeup time. + require.GreaterOrEqual(t, res.NextWakeupTime, end) + require.Less(t, res.NextWakeupTime, end.Add(defaultInterval*2)) +} diff --git a/chasm/lib/scheduler/util.go b/chasm/lib/scheduler/util.go new file mode 100644 index 00000000000..1aee82f0807 --- /dev/null +++ b/chasm/lib/scheduler/util.go @@ -0,0 +1,79 @@ +package scheduler + +import ( + "encoding/binary" + "time" + + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + schedulescommon "go.temporal.io/server/common/schedules" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +func generateRequestID(scheduler *Scheduler, backfillID string, nominal, actual time.Time) string { + return schedulescommon.GenerateRequestID( + scheduler.NamespaceId, + scheduler.ScheduleId, + scheduler.ConflictToken, + backfillID, + nominal, + actual, + ) +} + +// serializeConflictToken serializes a conflict token as a byte slice. +func serializeConflictToken(conflictToken int64) []byte { + token := make([]byte, 8) + binary.LittleEndian.PutUint64(token, uint64(conflictToken)) + return token +} + +// newTaggedLogger returns a logger tagged with the Scheduler's attributes. +func newTaggedLogger(baseLogger log.Logger, scheduler *Scheduler) log.Logger { + return log.With( + baseLogger, + tag.WorkflowNamespace(scheduler.Namespace), + tag.ScheduleID(scheduler.ScheduleId), + ) +} + +// newTaggedMetricsHandler returns a metrics handler tagged with the Scheduler's namespace and backend. +func newTaggedMetricsHandler(baseHandler metrics.Handler, scheduler *Scheduler) metrics.Handler { + return baseHandler.WithTags( + metrics.NamespaceTag(scheduler.Namespace), + metrics.StringTag(metrics.ScheduleBackendTag, metrics.ScheduleBackendChasm), + ) +} + +// validateTaskHighWaterMark validates a component's lastProcessedTime against a +// task timestamp. A task is valid if its scheduled time is after the high water mark. +// Immediate tasks (zero scheduled time) are always valid since they execute inline. +func validateTaskHighWaterMark( + lastProcessedTime *timestamppb.Timestamp, + scheduledAt time.Time, +) (bool, error) { + // Immediate tasks are always valid - they execute inline during the transaction. + if scheduledAt.IsZero() { + return true, nil + } + // If lastProcessedTime is not set, all scheduled tasks are valid. + if lastProcessedTime == nil || (lastProcessedTime.GetSeconds() == 0 && lastProcessedTime.GetNanos() == 0) { + return true, nil + } + // Scheduled tasks are valid if their time is after the high water mark. + return scheduledAt.After(lastProcessedTime.AsTime()), nil +} + +// jsonStringer wraps a proto.Message for lazy JSON serialization. Intended for +// debug logging structures. +type jsonStringer struct { + proto.Message +} + +func (j jsonStringer) String() string { + json, _ := protojson.Marshal(j.Message) + return string(json) +} diff --git a/chasm/lib/tests/fx.go b/chasm/lib/tests/fx.go new file mode 100644 index 00000000000..10de6dc39b2 --- /dev/null +++ b/chasm/lib/tests/fx.go @@ -0,0 +1,13 @@ +package tests + +import ( + "go.temporal.io/server/chasm" + "go.uber.org/fx" +) + +var Module = fx.Module( + "chasm.lib.tests", + fx.Invoke(func(registry *chasm.Registry) error { + return registry.Register(Library) + }), +) diff --git a/chasm/lib/tests/gen/testspb/v1/message.go-helpers.pb.go b/chasm/lib/tests/gen/testspb/v1/message.go-helpers.pb.go new file mode 100644 index 00000000000..3320f0f27fa --- /dev/null +++ b/chasm/lib/tests/gen/testspb/v1/message.go-helpers.pb.go @@ -0,0 +1,117 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package testspb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type TestPayloadStore to the protobuf v3 wire format +func (val *TestPayloadStore) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TestPayloadStore from the protobuf v3 wire format +func (val *TestPayloadStore) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TestPayloadStore) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TestPayloadStore values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TestPayloadStore) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TestPayloadStore + switch t := that.(type) { + case *TestPayloadStore: + that1 = t + case TestPayloadStore: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TestPayloadTTLPureTask to the protobuf v3 wire format +func (val *TestPayloadTTLPureTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TestPayloadTTLPureTask from the protobuf v3 wire format +func (val *TestPayloadTTLPureTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TestPayloadTTLPureTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TestPayloadTTLPureTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TestPayloadTTLPureTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TestPayloadTTLPureTask + switch t := that.(type) { + case *TestPayloadTTLPureTask: + that1 = t + case TestPayloadTTLPureTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TestPayloadTTLSideEffectTask to the protobuf v3 wire format +func (val *TestPayloadTTLSideEffectTask) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TestPayloadTTLSideEffectTask from the protobuf v3 wire format +func (val *TestPayloadTTLSideEffectTask) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TestPayloadTTLSideEffectTask) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TestPayloadTTLSideEffectTask values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TestPayloadTTLSideEffectTask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TestPayloadTTLSideEffectTask + switch t := that.(type) { + case *TestPayloadTTLSideEffectTask: + that1 = t + case TestPayloadTTLSideEffectTask: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/tests/gen/testspb/v1/message.pb.go b/chasm/lib/tests/gen/testspb/v1/message.pb.go new file mode 100644 index 00000000000..9bd7f2b343c --- /dev/null +++ b/chasm/lib/tests/gen/testspb/v1/message.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/tests/proto/v1/message.proto + +package testspb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TestPayloadStore struct { + state protoimpl.MessageState `protogen:"open.v1"` + TotalCount int64 `protobuf:"varint,1,opt,name=total_count,json=totalCount,proto3" json:"total_count,omitempty"` + TotalSize int64 `protobuf:"varint,2,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"` + // (-- api-linter: core::0142::time-field-type=disabled --) + ExpirationTimes map[string]*timestamppb.Timestamp `protobuf:"bytes,3,rep,name=expiration_times,json=expirationTimes,proto3" json:"expiration_times,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Closed bool `protobuf:"varint,4,opt,name=closed,proto3" json:"closed,omitempty"` + Canceled bool `protobuf:"varint,5,opt,name=canceled,proto3" json:"canceled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TestPayloadStore) Reset() { + *x = TestPayloadStore{} + mi := &file_temporal_server_chasm_lib_tests_proto_v1_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TestPayloadStore) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestPayloadStore) ProtoMessage() {} + +func (x *TestPayloadStore) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_tests_proto_v1_message_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestPayloadStore.ProtoReflect.Descriptor instead. +func (*TestPayloadStore) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescGZIP(), []int{0} +} + +func (x *TestPayloadStore) GetTotalCount() int64 { + if x != nil { + return x.TotalCount + } + return 0 +} + +func (x *TestPayloadStore) GetTotalSize() int64 { + if x != nil { + return x.TotalSize + } + return 0 +} + +func (x *TestPayloadStore) GetExpirationTimes() map[string]*timestamppb.Timestamp { + if x != nil { + return x.ExpirationTimes + } + return nil +} + +func (x *TestPayloadStore) GetClosed() bool { + if x != nil { + return x.Closed + } + return false +} + +func (x *TestPayloadStore) GetCanceled() bool { + if x != nil { + return x.Canceled + } + return false +} + +type TestPayloadTTLPureTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + PayloadKey string `protobuf:"bytes,1,opt,name=payload_key,json=payloadKey,proto3" json:"payload_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TestPayloadTTLPureTask) Reset() { + *x = TestPayloadTTLPureTask{} + mi := &file_temporal_server_chasm_lib_tests_proto_v1_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TestPayloadTTLPureTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestPayloadTTLPureTask) ProtoMessage() {} + +func (x *TestPayloadTTLPureTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_tests_proto_v1_message_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestPayloadTTLPureTask.ProtoReflect.Descriptor instead. +func (*TestPayloadTTLPureTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescGZIP(), []int{1} +} + +func (x *TestPayloadTTLPureTask) GetPayloadKey() string { + if x != nil { + return x.PayloadKey + } + return "" +} + +type TestPayloadTTLSideEffectTask struct { + state protoimpl.MessageState `protogen:"open.v1"` + PayloadKey string `protobuf:"bytes,1,opt,name=payload_key,json=payloadKey,proto3" json:"payload_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TestPayloadTTLSideEffectTask) Reset() { + *x = TestPayloadTTLSideEffectTask{} + mi := &file_temporal_server_chasm_lib_tests_proto_v1_message_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TestPayloadTTLSideEffectTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestPayloadTTLSideEffectTask) ProtoMessage() {} + +func (x *TestPayloadTTLSideEffectTask) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_tests_proto_v1_message_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestPayloadTTLSideEffectTask.ProtoReflect.Descriptor instead. +func (*TestPayloadTTLSideEffectTask) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescGZIP(), []int{2} +} + +func (x *TestPayloadTTLSideEffectTask) GetPayloadKey() string { + if x != nil { + return x.PayloadKey + } + return "" +} + +var File_temporal_server_chasm_lib_tests_proto_v1_message_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDesc = "" + + "\n" + + "6temporal/server/chasm/lib/tests/proto/v1/message.proto\x12(temporal.server.chasm.lib.tests.proto.v1\x1a\x1fgoogle/protobuf/timestamp.proto\"\xe2\x02\n" + + "\x10TestPayloadStore\x12\x1f\n" + + "\vtotal_count\x18\x01 \x01(\x03R\n" + + "totalCount\x12\x1d\n" + + "\n" + + "total_size\x18\x02 \x01(\x03R\ttotalSize\x12z\n" + + "\x10expiration_times\x18\x03 \x03(\v2O.temporal.server.chasm.lib.tests.proto.v1.TestPayloadStore.ExpirationTimesEntryR\x0fexpirationTimes\x12\x16\n" + + "\x06closed\x18\x04 \x01(\bR\x06closed\x12\x1a\n" + + "\bcanceled\x18\x05 \x01(\bR\bcanceled\x1a^\n" + + "\x14ExpirationTimesEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x120\n" + + "\x05value\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\x05value:\x028\x01\"9\n" + + "\x16TestPayloadTTLPureTask\x12\x1f\n" + + "\vpayload_key\x18\x01 \x01(\tR\n" + + "payloadKey\"?\n" + + "\x1cTestPayloadTTLSideEffectTask\x12\x1f\n" + + "\vpayload_key\x18\x01 \x01(\tR\n" + + "payloadKeyB;Z9go.temporal.io/server/chasm/lib/tests/gen/testspb;testspbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDesc), len(file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDescData +} + +var file_temporal_server_chasm_lib_tests_proto_v1_message_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_temporal_server_chasm_lib_tests_proto_v1_message_proto_goTypes = []any{ + (*TestPayloadStore)(nil), // 0: temporal.server.chasm.lib.tests.proto.v1.TestPayloadStore + (*TestPayloadTTLPureTask)(nil), // 1: temporal.server.chasm.lib.tests.proto.v1.TestPayloadTTLPureTask + (*TestPayloadTTLSideEffectTask)(nil), // 2: temporal.server.chasm.lib.tests.proto.v1.TestPayloadTTLSideEffectTask + nil, // 3: temporal.server.chasm.lib.tests.proto.v1.TestPayloadStore.ExpirationTimesEntry + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp +} +var file_temporal_server_chasm_lib_tests_proto_v1_message_proto_depIdxs = []int32{ + 3, // 0: temporal.server.chasm.lib.tests.proto.v1.TestPayloadStore.expiration_times:type_name -> temporal.server.chasm.lib.tests.proto.v1.TestPayloadStore.ExpirationTimesEntry + 4, // 1: temporal.server.chasm.lib.tests.proto.v1.TestPayloadStore.ExpirationTimesEntry.value:type_name -> google.protobuf.Timestamp + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_tests_proto_v1_message_proto_init() } +func file_temporal_server_chasm_lib_tests_proto_v1_message_proto_init() { + if File_temporal_server_chasm_lib_tests_proto_v1_message_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDesc), len(file_temporal_server_chasm_lib_tests_proto_v1_message_proto_rawDesc)), + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_tests_proto_v1_message_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_tests_proto_v1_message_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_tests_proto_v1_message_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_tests_proto_v1_message_proto = out.File + file_temporal_server_chasm_lib_tests_proto_v1_message_proto_goTypes = nil + file_temporal_server_chasm_lib_tests_proto_v1_message_proto_depIdxs = nil +} diff --git a/chasm/lib/tests/gen/testspb/v1/request_response.go-helpers.pb.go b/chasm/lib/tests/gen/testspb/v1/request_response.go-helpers.pb.go new file mode 100644 index 00000000000..c4da427ee6e --- /dev/null +++ b/chasm/lib/tests/gen/testspb/v1/request_response.go-helpers.pb.go @@ -0,0 +1,80 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package testspb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type TestRequest to the protobuf v3 wire format +func (val *TestRequest) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TestRequest from the protobuf v3 wire format +func (val *TestRequest) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TestRequest) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TestRequest values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TestRequest) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TestRequest + switch t := that.(type) { + case *TestRequest: + that1 = t + case TestRequest: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type TestResponse to the protobuf v3 wire format +func (val *TestResponse) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type TestResponse from the protobuf v3 wire format +func (val *TestResponse) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *TestResponse) Size() int { + return proto.Size(val) +} + +// Equal returns whether two TestResponse values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *TestResponse) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *TestResponse + switch t := that.(type) { + case *TestResponse: + that1 = t + case TestResponse: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/tests/gen/testspb/v1/request_response.pb.go b/chasm/lib/tests/gen/testspb/v1/request_response.pb.go new file mode 100644 index 00000000000..0942950ed38 --- /dev/null +++ b/chasm/lib/tests/gen/testspb/v1/request_response.pb.go @@ -0,0 +1,181 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/tests/proto/v1/request_response.proto + +package testspb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TestRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TestRequest) Reset() { + *x = TestRequest{} + mi := &file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TestRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestRequest) ProtoMessage() {} + +func (x *TestRequest) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestRequest.ProtoReflect.Descriptor instead. +func (*TestRequest) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDescGZIP(), []int{0} +} + +func (x *TestRequest) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +type TestResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + HasEngineCtx bool `protobuf:"varint,2,opt,name=has_engine_ctx,json=hasEngineCtx,proto3" json:"has_engine_ctx,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *TestResponse) Reset() { + *x = TestResponse{} + mi := &file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *TestResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TestResponse) ProtoMessage() {} + +func (x *TestResponse) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TestResponse.ProtoReflect.Descriptor instead. +func (*TestResponse) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDescGZIP(), []int{1} +} + +func (x *TestResponse) GetRequestId() string { + if x != nil { + return x.RequestId + } + return "" +} + +func (x *TestResponse) GetHasEngineCtx() bool { + if x != nil { + return x.HasEngineCtx + } + return false +} + +var File_temporal_server_chasm_lib_tests_proto_v1_request_response_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDesc = "" + + "\n" + + "?temporal/server/chasm/lib/tests/proto/v1/request_response.proto\x12(temporal.server.chasm.lib.tests.proto.v1\",\n" + + "\vTestRequest\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\"S\n" + + "\fTestResponse\x12\x1d\n" + + "\n" + + "request_id\x18\x01 \x01(\tR\trequestId\x12$\n" + + "\x0ehas_engine_ctx\x18\x02 \x01(\bR\fhasEngineCtxB;Z9go.temporal.io/server/chasm/lib/tests/gen/testspb;testspbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDescData +} + +var file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_goTypes = []any{ + (*TestRequest)(nil), // 0: temporal.server.chasm.lib.tests.proto.v1.TestRequest + (*TestResponse)(nil), // 1: temporal.server.chasm.lib.tests.proto.v1.TestResponse +} +var file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_init() } +func file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_init() { + if File_temporal_server_chasm_lib_tests_proto_v1_request_response_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDesc), len(file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_tests_proto_v1_request_response_proto = out.File + file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_goTypes = nil + file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_depIdxs = nil +} diff --git a/chasm/lib/tests/gen/testspb/v1/service.pb.go b/chasm/lib/tests/gen/testspb/v1/service.pb.go new file mode 100644 index 00000000000..5b33e9f9c77 --- /dev/null +++ b/chasm/lib/tests/gen/testspb/v1/service.pb.go @@ -0,0 +1,70 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/tests/proto/v1/service.proto + +package testspb + +import ( + reflect "reflect" + unsafe "unsafe" + + _ "go.temporal.io/server/api/common/v1" + _ "go.temporal.io/server/api/routing/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var File_temporal_server_chasm_lib_tests_proto_v1_service_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_tests_proto_v1_service_proto_rawDesc = "" + + "\n" + + "6temporal/server/chasm/lib/tests/proto/v1/service.proto\x12(temporal.server.chasm.lib.tests.proto.v1\x1a?temporal/server/chasm/lib/tests/proto/v1/request_response.proto\x1a0temporal/server/api/common/v1/api_category.proto\x1a.temporal/server/api/routing/v1/extension.proto2\x93\x01\n" + + "\vTestService\x12\x83\x01\n" + + "\x04Test\x125.temporal.server.chasm.lib.tests.proto.v1.TestRequest\x1a6.temporal.server.chasm.lib.tests.proto.v1.TestResponse\"\f\x8a\xb5\x18\x02\b\x01\xd2\xc3\x18\x02\b\x01B;Z9go.temporal.io/server/chasm/lib/tests/gen/testspb;testspbb\x06proto3" + +var file_temporal_server_chasm_lib_tests_proto_v1_service_proto_goTypes = []any{ + (*TestRequest)(nil), // 0: temporal.server.chasm.lib.tests.proto.v1.TestRequest + (*TestResponse)(nil), // 1: temporal.server.chasm.lib.tests.proto.v1.TestResponse +} +var file_temporal_server_chasm_lib_tests_proto_v1_service_proto_depIdxs = []int32{ + 0, // 0: temporal.server.chasm.lib.tests.proto.v1.TestService.Test:input_type -> temporal.server.chasm.lib.tests.proto.v1.TestRequest + 1, // 1: temporal.server.chasm.lib.tests.proto.v1.TestService.Test:output_type -> temporal.server.chasm.lib.tests.proto.v1.TestResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_tests_proto_v1_service_proto_init() } +func file_temporal_server_chasm_lib_tests_proto_v1_service_proto_init() { + if File_temporal_server_chasm_lib_tests_proto_v1_service_proto != nil { + return + } + file_temporal_server_chasm_lib_tests_proto_v1_request_response_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_tests_proto_v1_service_proto_rawDesc), len(file_temporal_server_chasm_lib_tests_proto_v1_service_proto_rawDesc)), + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_temporal_server_chasm_lib_tests_proto_v1_service_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_tests_proto_v1_service_proto_depIdxs, + }.Build() + File_temporal_server_chasm_lib_tests_proto_v1_service_proto = out.File + file_temporal_server_chasm_lib_tests_proto_v1_service_proto_goTypes = nil + file_temporal_server_chasm_lib_tests_proto_v1_service_proto_depIdxs = nil +} diff --git a/chasm/lib/tests/gen/testspb/v1/service_client.pb.go b/chasm/lib/tests/gen/testspb/v1/service_client.pb.go new file mode 100644 index 00000000000..fbeff6fc99b --- /dev/null +++ b/chasm/lib/tests/gen/testspb/v1/service_client.pb.go @@ -0,0 +1,104 @@ +// Code generated by protoc-gen-go-chasm. DO NOT EDIT. +package testspb + +import ( + "context" + "math/rand" + "time" + + "go.temporal.io/server/client/history" + "go.temporal.io/server/common" + "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/primitives" + "google.golang.org/grpc" +) + +// TestServiceLayeredClient is a client for TestService. +type TestServiceLayeredClient struct { + metricsHandler metrics.Handler + numShards int32 + redirector history.Redirector[TestServiceClient] + retryPolicy backoff.RetryPolicy +} + +// NewTestServiceLayeredClient initializes a new TestServiceLayeredClient. +func NewTestServiceLayeredClient( + dc *dynamicconfig.Collection, + rpcFactory common.RPCFactory, + monitor membership.Monitor, + config *config.Persistence, + logger log.Logger, + metricsHandler metrics.Handler, +) (TestServiceClient, error) { + resolver, err := monitor.GetResolver(primitives.HistoryService) + if err != nil { + return nil, err + } + connections := history.NewConnectionPool(resolver, rpcFactory, NewTestServiceClient) + var redirector history.Redirector[TestServiceClient] + if dynamicconfig.HistoryClientOwnershipCachingEnabled.Get(dc)() { + redirector = history.NewCachingRedirector( + connections, + resolver, + logger, + dynamicconfig.HistoryClientOwnershipCachingStaleTTL.Get(dc), + ) + } else { + redirector = history.NewBasicRedirector(connections, resolver) + } + return &TestServiceLayeredClient{ + metricsHandler: metricsHandler, + redirector: redirector, + numShards: config.NumHistoryShards, + retryPolicy: common.CreateHistoryClientRetryPolicy(), + }, nil +} +func (c *TestServiceLayeredClient) callTestNoRetry( + ctx context.Context, + request *TestRequest, + opts ...grpc.CallOption, +) (*TestResponse, error) { + var response *TestResponse + var err error + startTime := time.Now().UTC() + // the caller is a namespace, hence the tag below. + caller := headers.GetCallerInfo(ctx).CallerName + metricsHandler := c.metricsHandler.WithTags( + metrics.OperationTag("TestService.Test"), + metrics.NamespaceTag(caller), + metrics.ServiceRoleTag(metrics.HistoryRoleTagValue), + ) + metrics.ClientRequests.With(metricsHandler).Record(1) + defer func() { + if err != nil { + metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err)) + } + metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) + }() + shardID := int32(rand.Intn(int(c.numShards)) + 1) + op := func(ctx context.Context, client TestServiceClient) error { + var err error + ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout) + defer cancel() + response, err = client.Test(ctx, request, opts...) + return err + } + err = c.redirector.Execute(ctx, shardID, op) + return response, err +} +func (c *TestServiceLayeredClient) Test( + ctx context.Context, + request *TestRequest, + opts ...grpc.CallOption, +) (*TestResponse, error) { + call := func(ctx context.Context) (*TestResponse, error) { + return c.callTestNoRetry(ctx, request, opts...) + } + return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError) +} diff --git a/chasm/lib/tests/gen/testspb/v1/service_grpc.pb.go b/chasm/lib/tests/gen/testspb/v1/service_grpc.pb.go new file mode 100644 index 00000000000..1b7ce585522 --- /dev/null +++ b/chasm/lib/tests/gen/testspb/v1/service_grpc.pb.go @@ -0,0 +1,110 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// plugins: +// - protoc-gen-go-grpc +// - protoc +// source: temporal/server/chasm/lib/tests/proto/v1/service.proto + +package testspb + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + TestService_Test_FullMethodName = "/temporal.server.chasm.lib.tests.proto.v1.TestService/Test" +) + +// TestServiceClient is the client API for TestService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TestServiceClient interface { + Test(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) +} + +type testServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTestServiceClient(cc grpc.ClientConnInterface) TestServiceClient { + return &testServiceClient{cc} +} + +func (c *testServiceClient) Test(ctx context.Context, in *TestRequest, opts ...grpc.CallOption) (*TestResponse, error) { + out := new(TestResponse) + err := c.cc.Invoke(ctx, TestService_Test_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TestServiceServer is the server API for TestService service. +// All implementations must embed UnimplementedTestServiceServer +// for forward compatibility +type TestServiceServer interface { + Test(context.Context, *TestRequest) (*TestResponse, error) + mustEmbedUnimplementedTestServiceServer() +} + +// UnimplementedTestServiceServer must be embedded to have forward compatible implementations. +type UnimplementedTestServiceServer struct { +} + +func (UnimplementedTestServiceServer) Test(context.Context, *TestRequest) (*TestResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Test not implemented") +} +func (UnimplementedTestServiceServer) mustEmbedUnimplementedTestServiceServer() {} + +// UnsafeTestServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TestServiceServer will +// result in compilation errors. +type UnsafeTestServiceServer interface { + mustEmbedUnimplementedTestServiceServer() +} + +func RegisterTestServiceServer(s grpc.ServiceRegistrar, srv TestServiceServer) { + s.RegisterService(&TestService_ServiceDesc, srv) +} + +func _TestService_Test_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TestRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TestServiceServer).Test(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: TestService_Test_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TestServiceServer).Test(ctx, req.(*TestRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// TestService_ServiceDesc is the grpc.ServiceDesc for TestService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TestService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "temporal.server.chasm.lib.tests.proto.v1.TestService", + HandlerType: (*TestServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Test", + Handler: _TestService_Test_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "temporal/server/chasm/lib/tests/proto/v1/service.proto", +} diff --git a/chasm/lib/tests/handler.go b/chasm/lib/tests/handler.go new file mode 100644 index 00000000000..1c5d0d04600 --- /dev/null +++ b/chasm/lib/tests/handler.go @@ -0,0 +1,253 @@ +package tests + +import ( + "context" + "time" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/tests/gen/testspb/v1" + "go.temporal.io/server/common/namespace" +) + +type ( + NewPayloadStoreRequest struct { + NamespaceID namespace.ID + StoreID string + IDReusePolicy chasm.BusinessIDReusePolicy + IDConflictPolicy chasm.BusinessIDConflictPolicy + } + + NewPayloadStoreResponse struct { + RunID string + } + + DescribePayloadStoreRequest struct { + NamespaceID namespace.ID + StoreID string + } + + DescribePayloadStoreResponse struct { + State *testspb.TestPayloadStore + ApproximateStateSize int + } + + ClosePayloadStoreRequest struct { + NamespaceID namespace.ID + StoreID string + } + + ClosePayloadStoreResponse struct{} + + CancelPayloadStoreRequest struct { + NamespaceID namespace.ID + StoreID string + } + + CancelPayloadStoreResponse struct{} + + AddPayloadRequest struct { + NamespaceID namespace.ID + StoreID string + PayloadKey string + Payload *commonpb.Payload + TTL time.Duration + } + + AddPayloadResponse struct { + State *testspb.TestPayloadStore + } + + GetPayloadRequest struct { + NamespaceID namespace.ID + StoreID string + PayloadKey string + } + + GetPayloadResponse struct { + Payload *commonpb.Payload + } + + RemovePayloadRequest struct { + NamespaceID namespace.ID + StoreID string + PayloadKey string + } + + RemovePayloadResponse struct { + State *testspb.TestPayloadStore + } + + DeletePayloadStoreRequest struct { + NamespaceID namespace.ID + StoreID string + Reason string + Identity string + } +) + +func NewPayloadStoreHandler( + ctx context.Context, + request NewPayloadStoreRequest, +) (NewPayloadStoreResponse, error) { + result, err := chasm.StartExecution( + ctx, + chasm.ExecutionKey{ + NamespaceID: request.NamespaceID.String(), + BusinessID: request.StoreID, + }, + func(mutableContext chasm.MutableContext, _ any) (*PayloadStore, error) { + store, err := NewPayloadStore(mutableContext) + return store, err + }, + nil, + chasm.WithBusinessIDPolicy(request.IDReusePolicy, request.IDConflictPolicy), + ) + if err != nil { + return NewPayloadStoreResponse{}, err + } + return NewPayloadStoreResponse{ + RunID: result.ExecutionKey.RunID, + }, nil +} + +func DescribePayloadStoreHandler( + ctx context.Context, + request DescribePayloadStoreRequest, +) (DescribePayloadStoreResponse, error) { + return chasm.ReadComponent( + ctx, + chasm.NewComponentRef[*PayloadStore]( + chasm.ExecutionKey{ + NamespaceID: request.NamespaceID.String(), + BusinessID: request.StoreID, + }, + ), + (*PayloadStore).Describe, + request, + ) +} + +func ClosePayloadStoreHandler( + ctx context.Context, + request ClosePayloadStoreRequest, +) (ClosePayloadStoreResponse, error) { + resp, _, err := chasm.UpdateComponent( + ctx, + chasm.NewComponentRef[*PayloadStore]( + chasm.ExecutionKey{ + NamespaceID: request.NamespaceID.String(), + BusinessID: request.StoreID, + }, + ), + (*PayloadStore).Close, + nil, + ) + return resp, err +} + +func CancelPayloadStoreHandler( + ctx context.Context, + request CancelPayloadStoreRequest, +) (CancelPayloadStoreResponse, error) { + resp, _, err := chasm.UpdateComponent( + ctx, + chasm.NewComponentRef[*PayloadStore]( + chasm.ExecutionKey{ + NamespaceID: request.NamespaceID.String(), + BusinessID: request.StoreID, + }, + ), + (*PayloadStore).Cancel, + request, + ) + return resp, err +} + +func AddPayloadHandler( + ctx context.Context, + request AddPayloadRequest, +) (AddPayloadResponse, error) { + state, _, err := chasm.UpdateComponent( + ctx, + chasm.NewComponentRef[*PayloadStore]( + chasm.ExecutionKey{ + NamespaceID: request.NamespaceID.String(), + BusinessID: request.StoreID, + }, + ), + (*PayloadStore).AddPayload, + request, + ) + if err != nil { + return AddPayloadResponse{}, err + } + return AddPayloadResponse{ + State: state, + }, nil +} + +func GetPayloadHandler( + ctx context.Context, + request GetPayloadRequest, +) (GetPayloadResponse, error) { + payload, err := chasm.ReadComponent( + ctx, + chasm.NewComponentRef[*PayloadStore]( + chasm.ExecutionKey{ + NamespaceID: request.NamespaceID.String(), + BusinessID: request.StoreID, + }, + ), + (*PayloadStore).GetPayload, + request.PayloadKey, + ) + if err != nil { + return GetPayloadResponse{}, err + } + return GetPayloadResponse{ + Payload: payload, + }, nil +} + +func DeletePayloadStoreHandler( + ctx context.Context, + request DeletePayloadStoreRequest, +) error { + return chasm.DeleteExecution[*PayloadStore]( + ctx, + chasm.ExecutionKey{ + NamespaceID: request.NamespaceID.String(), + BusinessID: request.StoreID, + }, + chasm.DeleteExecutionRequest{ + TerminateComponentRequest: chasm.TerminateComponentRequest{ + Reason: request.Reason, + Identity: request.Identity, + }, + }, + ) +} + +func RemovePayloadHandler( + ctx context.Context, + request RemovePayloadRequest, +) (RemovePayloadResponse, error) { + state, _, err := chasm.UpdateComponent( + ctx, + chasm.NewComponentRef[*PayloadStore]( + chasm.ExecutionKey{ + NamespaceID: request.NamespaceID.String(), + BusinessID: request.StoreID, + }, + ), + (*PayloadStore).RemovePayload, + request.PayloadKey, + ) + if err != nil { + return RemovePayloadResponse{}, err + } + return RemovePayloadResponse{ + State: state, + }, nil +} diff --git a/chasm/lib/tests/library.go b/chasm/lib/tests/library.go new file mode 100644 index 00000000000..432dad23967 --- /dev/null +++ b/chasm/lib/tests/library.go @@ -0,0 +1,67 @@ +package tests + +import ( + "github.com/nexus-rpc/sdk-go/nexus" + "go.temporal.io/server/chasm" +) + +type ( + library struct { + chasm.UnimplementedLibrary + } +) + +const ( + libraryName = "tests" + componentName = "payloadStore" +) + +var ( + Archetype = chasm.FullyQualifiedName(libraryName, componentName) + ArchetypeID = chasm.GenerateTypeID(Archetype) +) + +var Library = &library{} + +func (l *library) Name() string { + return libraryName +} + +func (l *library) NexusServices() []*nexus.Service { + return []*nexus.Service{NewTestServiceNexusService()} +} + +func (l *library) NexusServiceProcessors() []*chasm.NexusServiceProcessor { + return []*chasm.NexusServiceProcessor{NewTestServiceNexusServiceProcessor()} +} + +func (l *library) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*PayloadStore]( + componentName, + chasm.WithBusinessIDAlias("PayloadStoreId"), + chasm.WithSearchAttributes( + PayloadTotalCountSearchAttribute, + PayloadTotalSizeSearchAttribute, + ExecutionStatusSearchAttribute, + chasm.SearchAttributeTaskQueue, + ), + chasm.WithContextValues(map[any]any{ + componentCtxKey: componentCtxVal, + }), + ), + } +} + +func (l *library) Tasks() []*chasm.RegistrableTask { + return []*chasm.RegistrableTask{ + chasm.NewRegistrablePureTask( + "payloadTTLPureTask", + &PayloadTTLPureTaskHandler{}, + ), + chasm.NewRegistrableSideEffectTask( + "payloadTTLSideEffectTask", + &PayloadTTLSideEffectTaskHandler{}, + ), + } +} diff --git a/chasm/lib/tests/nexus_service.go b/chasm/lib/tests/nexus_service.go new file mode 100644 index 00000000000..1597d9c44a3 --- /dev/null +++ b/chasm/lib/tests/nexus_service.go @@ -0,0 +1,36 @@ +package tests + +import ( + "context" + + "github.com/nexus-rpc/sdk-go/nexus" + "go.temporal.io/server/chasm" +) + +var TestOperation = nexus.NewSyncOperation("TestOperation", func(ctx context.Context, input string, options nexus.StartOperationOptions) (string, error) { + return "Hello, " + input, nil +}) + +func NewTestServiceNexusService() *nexus.Service { + service := nexus.NewService("TestService") + service.MustRegister(TestOperation) + return service +} + +type testOperationProcessor struct { +} + +func (o testOperationProcessor) ProcessInput(ctx chasm.NexusOperationProcessorContext, input string) (*chasm.NexusOperationProcessorResult, error) { + return &chasm.NexusOperationProcessorResult{ + RoutingKey: chasm.NexusOperationRoutingKeyExecution{ + NamespaceID: ctx.Namespace.ID().String(), + BusinessID: input, + }, + }, nil +} + +func NewTestServiceNexusServiceProcessor() *chasm.NexusServiceProcessor { + sp := chasm.NewNexusServiceProcessor("TestService") + sp.MustRegisterOperation("TestOperation", chasm.NewRegisterableNexusOperationProcessor(testOperationProcessor{})) + return sp +} diff --git a/chasm/lib/tests/payload.go b/chasm/lib/tests/payload.go new file mode 100644 index 00000000000..4764adfb1d2 --- /dev/null +++ b/chasm/lib/tests/payload.go @@ -0,0 +1,266 @@ +package tests + +import ( + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/tests/gen/testspb/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/softassert" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + TotalCountMemoFieldName = "TotalCount" + TotalSizeMemoFieldName = "TotalSize" +) + +const ( + TestScheduleID = "TestScheduleID" + PayloadTotalCountSAAlias = "PayloadTotalCount" + PayloadTotalSizeSAAlias = "PayloadTotalSize" + ExecutionStatusSAAlias = "ExecutionStatus" + DefaultPayloadStoreTaskQueue = "payload-store-task-queue" +) + +var ( + PayloadTotalCountSearchAttribute = chasm.NewSearchAttributeInt(PayloadTotalCountSAAlias, chasm.SearchAttributeFieldInt01) + PayloadTotalSizeSearchAttribute = chasm.NewSearchAttributeInt(PayloadTotalSizeSAAlias, chasm.SearchAttributeFieldInt02) + ExecutionStatusSearchAttribute = chasm.NewSearchAttributeKeyword(ExecutionStatusSAAlias, chasm.SearchAttributeFieldLowCardinalityKeyword01) + + _ chasm.VisibilitySearchAttributesProvider = (*PayloadStore)(nil) + _ chasm.VisibilityMemoProvider = (*PayloadStore)(nil) +) + +type ( + PayloadStore struct { + chasm.UnimplementedComponent + + State *testspb.TestPayloadStore + + Payloads chasm.Map[string, *commonpb.Payload] + Visibility chasm.Field[*chasm.Visibility] + } + + componentContextKey string +) + +const ( + componentCtxKey componentContextKey = "key" + componentCtxVal string = "value" +) + +func NewPayloadStore( + mutableContext chasm.MutableContext, +) (*PayloadStore, error) { + if err := assertContextValue(mutableContext); err != nil { + return nil, err + } + + store := &PayloadStore{ + State: &testspb.TestPayloadStore{ + TotalCount: 0, + TotalSize: 0, + ExpirationTimes: make(map[string]*timestamppb.Timestamp), + }, + Visibility: chasm.NewComponentField( + mutableContext, + chasm.NewVisibility(mutableContext), + ), + } + return store, nil +} + +func assertContextValue(chasmContext chasm.Context) error { + if val := chasmContext.Value(componentCtxKey); val != componentCtxVal { + return softassert.UnexpectedInternalErr( + chasmContext.Logger(), + "registered component key value pair not available in context", + nil, + ) + } + + return nil +} + +func (s *PayloadStore) Describe( + chasmContext chasm.Context, + _ DescribePayloadStoreRequest, +) (DescribePayloadStoreResponse, error) { + if err := assertContextValue(chasmContext); err != nil { + return DescribePayloadStoreResponse{}, err + } + + state := common.CloneProto(s.State) + executionInfo := chasmContext.ExecutionInfo() + + return DescribePayloadStoreResponse{ + State: state, + ApproximateStateSize: executionInfo.ApproximateStateSize, + }, nil +} + +func (s *PayloadStore) Close( + chasmContext chasm.MutableContext, + _ chasm.NoValue, +) (ClosePayloadStoreResponse, error) { + if err := assertContextValue(chasmContext); err != nil { + return ClosePayloadStoreResponse{}, err + } + + s.State.Closed = true + return ClosePayloadStoreResponse{}, nil +} + +func (s *PayloadStore) Cancel( + _ chasm.MutableContext, + _ CancelPayloadStoreRequest, +) (CancelPayloadStoreResponse, error) { + s.State.Canceled = true + return CancelPayloadStoreResponse{}, nil +} + +func (s *PayloadStore) ContextMetadata(_ chasm.Context) map[string]string { + return map[string]string{ + string(componentCtxKey): componentCtxVal, + } +} + +func (s *PayloadStore) AddPayload( + mutableContext chasm.MutableContext, + request AddPayloadRequest, +) (*testspb.TestPayloadStore, error) { + if err := assertContextValue(mutableContext); err != nil { + return nil, err + } + + if _, ok := s.Payloads[request.PayloadKey]; ok { + return nil, serviceerror.NewAlreadyExistsf("payload already exists with key: %s", request.PayloadKey) + } + + if s.Payloads == nil { + s.Payloads = make(chasm.Map[string, *commonpb.Payload]) + } + s.Payloads[request.PayloadKey] = chasm.NewDataField(mutableContext, request.Payload) + s.State.TotalCount++ + s.State.TotalSize += int64(len(request.Payload.Data)) + + if request.TTL > 0 { + expirationTime := mutableContext.Now(s).Add(request.TTL) + if s.State.ExpirationTimes == nil { + s.State.ExpirationTimes = make(map[string]*timestamppb.Timestamp) + } + s.State.ExpirationTimes[request.PayloadKey] = timestamppb.New(expirationTime) + mutableContext.AddTask( + s, + chasm.TaskAttributes{ScheduledTime: expirationTime}, + // You can switch between TestPayloadTTLPureTask & TestPayloadTTLSideEffectTask + &testspb.TestPayloadTTLPureTask{ + PayloadKey: request.PayloadKey, + }, + ) + } + + return common.CloneProto(s.State), nil +} + +func (s *PayloadStore) GetPayload( + chasmContext chasm.Context, + key string, +) (*commonpb.Payload, error) { + if err := assertContextValue(chasmContext); err != nil { + return nil, err + } + + if field, ok := s.Payloads[key]; ok { + return field.Get(chasmContext), nil + } + return nil, serviceerror.NewNotFoundf("payload not found with key: %s", key) +} + +func (s *PayloadStore) RemovePayload( + mutableContext chasm.MutableContext, + key string, +) (*testspb.TestPayloadStore, error) { + if err := assertContextValue(mutableContext); err != nil { + return nil, err + } + + if _, ok := s.Payloads[key]; !ok { + return nil, serviceerror.NewNotFoundf("payload not found with key: %s", key) + } + + field := s.Payloads[key] + payload := field.Get(mutableContext) + s.State.TotalCount-- + s.State.TotalSize -= int64(len(payload.Data)) + delete(s.Payloads, key) + delete(s.State.ExpirationTimes, key) + + return common.CloneProto(s.State), nil +} + +func (s *PayloadStore) LifecycleState( + chasmContext chasm.Context, +) chasm.LifecycleState { + if err := assertContextValue(chasmContext); err != nil { + // nolint:forbidigo // Panic here for testing. + panic("registered component key value pair not available in context") + } + + if s.State.Canceled { + return chasm.LifecycleStateFailed + } + if s.State.Closed { + return chasm.LifecycleStateCompleted + } + return chasm.LifecycleStateRunning +} + +func (s *PayloadStore) Terminate( + mutableContext chasm.MutableContext, + _ chasm.TerminateComponentRequest, +) (chasm.TerminateComponentResponse, error) { + if err := assertContextValue(mutableContext); err != nil { + return chasm.TerminateComponentResponse{}, err + } + + if _, err := s.Close(mutableContext, nil); err != nil { + return chasm.TerminateComponentResponse{}, err + } + return chasm.TerminateComponentResponse{}, nil +} + +// SearchAttributes implements chasm.VisibilitySearchAttributesProvider interface +func (s *PayloadStore) SearchAttributes( + chasmContext chasm.Context, +) []chasm.SearchAttributeKeyValue { + if err := assertContextValue(chasmContext); err != nil { + // nolint:forbidigo // Panic here for testing. + panic("registered component key value pair not available in context") + } + + status := s.LifecycleState(chasmContext).String() + if s.State.Canceled { + status = "Canceled" + } + + return []chasm.SearchAttributeKeyValue{ + PayloadTotalCountSearchAttribute.Value(s.State.TotalCount), + PayloadTotalSizeSearchAttribute.Value(s.State.TotalSize), + ExecutionStatusSearchAttribute.Value(status), + chasm.SearchAttributeTemporalScheduledByID.Value(TestScheduleID), + chasm.SearchAttributeTaskQueue.Value(DefaultPayloadStoreTaskQueue), + } +} + +// Memo implements chasm.VisibilityMemoProvider interface +func (s *PayloadStore) Memo(chasmContext chasm.Context) proto.Message { + if err := assertContextValue(chasmContext); err != nil { + // nolint:forbidigo // Panic here for testing. + panic("registered component key value pair not available in context") + } + + return s.State +} diff --git a/chasm/lib/tests/proto/v1/message.proto b/chasm/lib/tests/proto/v1/message.proto new file mode 100644 index 00000000000..98f077d4c21 --- /dev/null +++ b/chasm/lib/tests/proto/v1/message.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.tests.proto.v1; + +import "google/protobuf/timestamp.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/tests/gen/testspb;testspb"; + +message TestPayloadStore { + int64 total_count = 1; + int64 total_size = 2; + // (-- api-linter: core::0142::time-field-type=disabled --) + map expiration_times = 3; + bool closed = 4; + bool canceled = 5; +} + +message TestPayloadTTLPureTask { + string payload_key = 1; +} + +message TestPayloadTTLSideEffectTask { + string payload_key = 1; +} diff --git a/chasm/lib/tests/proto/v1/request_response.proto b/chasm/lib/tests/proto/v1/request_response.proto new file mode 100644 index 00000000000..43b0a48659b --- /dev/null +++ b/chasm/lib/tests/proto/v1/request_response.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.tests.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/tests/gen/testspb;testspb"; + +message TestRequest { + string request_id = 1; +} + +message TestResponse { + string request_id = 1; + bool has_engine_ctx = 2; +} diff --git a/chasm/lib/tests/proto/v1/service.proto b/chasm/lib/tests/proto/v1/service.proto new file mode 100644 index 00000000000..782e4aa27e1 --- /dev/null +++ b/chasm/lib/tests/proto/v1/service.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.tests.proto.v1; + +import "chasm/lib/tests/proto/v1/request_response.proto"; +import "temporal/server/api/common/v1/api_category.proto"; +import "temporal/server/api/routing/v1/extension.proto"; + +option go_package = "go.temporal.io/server/chasm/lib/tests/gen/testspb;testspb"; + +service TestService { + rpc Test(TestRequest) returns (TestResponse) { + option (temporal.server.api.routing.v1.routing).random = true; + option (temporal.server.api.common.v1.api_category).category = API_CATEGORY_STANDARD; + } +} diff --git a/chasm/lib/tests/tasks.go b/chasm/lib/tests/tasks.go new file mode 100644 index 00000000000..d501a114724 --- /dev/null +++ b/chasm/lib/tests/tasks.go @@ -0,0 +1,79 @@ +package tests + +import ( + "context" + + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/tests/gen/testspb/v1" +) + +type PayloadTTLPureTaskHandler struct{ chasm.PureTaskHandlerBase } + +func (h *PayloadTTLPureTaskHandler) Execute( + mutableContext chasm.MutableContext, + store *PayloadStore, + _ chasm.TaskAttributes, + task *testspb.TestPayloadTTLPureTask, +) error { + if err := assertContextValue(mutableContext); err != nil { + return err + } + + _, err := store.RemovePayload(mutableContext, task.PayloadKey) + return err +} + +func (h *PayloadTTLPureTaskHandler) Validate( + chasmContext chasm.Context, + store *PayloadStore, + attributes chasm.TaskAttributes, + task *testspb.TestPayloadTTLPureTask, +) (bool, error) { + return validateTask(chasmContext, store, attributes, task.PayloadKey) +} + +type PayloadTTLSideEffectTaskHandler struct { + chasm.SideEffectTaskHandlerBase[*testspb.TestPayloadTTLSideEffectTask] +} + +func (h *PayloadTTLSideEffectTaskHandler) Execute( + ctx context.Context, + ref chasm.ComponentRef, + _ chasm.TaskAttributes, + task *testspb.TestPayloadTTLSideEffectTask, +) error { + _, _, err := chasm.UpdateComponent( + ctx, + ref, + (*PayloadStore).RemovePayload, + task.PayloadKey, + ) + return err +} + +func (h *PayloadTTLSideEffectTaskHandler) Validate( + chasmContext chasm.Context, + store *PayloadStore, + attributes chasm.TaskAttributes, + task *testspb.TestPayloadTTLSideEffectTask, +) (bool, error) { + return validateTask(chasmContext, store, attributes, task.PayloadKey) +} + +func validateTask( + chasmContext chasm.Context, + store *PayloadStore, + attributes chasm.TaskAttributes, + payloadKey string, +) (bool, error) { + if err := assertContextValue(chasmContext); err != nil { + return false, err + } + + expirationTime, ok := store.State.ExpirationTimes[payloadKey] + if !ok { + return false, nil + } + + return !expirationTime.AsTime().After(attributes.ScheduledTime), nil +} diff --git a/chasm/lib/workflow/events.go b/chasm/lib/workflow/events.go new file mode 100644 index 00000000000..7ee8382c081 --- /dev/null +++ b/chasm/lib/workflow/events.go @@ -0,0 +1,27 @@ +package workflow + +import ( + "errors" + + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/server/chasm" +) + +// ErrEventNotCherryPickable should be returned by CherryPick if an event should not be cherry picked for whatever reason. +var ErrEventNotCherryPickable = errors.New("event not cherry pickable") + +// EventDefinition is a definition for a history event for a given event type. +type EventDefinition interface { + Type() enumspb.EventType + // IsWorkflowTaskTrigger returns a boolean indicating whether this event type should trigger a workflow task. + IsWorkflowTaskTrigger() bool + // Apply a history event to the state machine. Triggered during replication and workflow reset. + Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error + // CherryPick (a.k.a "reapply") an event from a different history branch. + // Implementations should apply the event to the machine state and return nil in case the event is cherry-pickable. + // Command events should never be cherry picked as we rely on the workflow to reschedule them. + // Return [ErrEventNotCherryPickable] to skip cherry picking. Any other error is considered fatal and will abort the + // cherry pick process. + CherryPick(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent, resetReapplyExcludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) error +} diff --git a/chasm/lib/workflow/fx.go b/chasm/lib/workflow/fx.go new file mode 100644 index 00000000000..310ff1b2117 --- /dev/null +++ b/chasm/lib/workflow/fx.go @@ -0,0 +1,25 @@ +package workflow + +import ( + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation" + "go.uber.org/fx" +) + +var Module = fx.Module( + "chasm.lib.workflow", + fx.Provide(NewRegistry), + fx.Provide(newLibrary), + fx.Invoke(func( + chasmRegistry *chasm.Registry, + library *library, + config *nexusoperation.Config, + ) error { + if err := library.registry.Register( + newNexusLibrary(config, chasmRegistry.NexusEndpointProcessor), + ); err != nil { + return err + } + return chasmRegistry.Register(library) + }), +) diff --git a/chasm/lib/workflow/gen/workflowpb/v1/state.go-helpers.pb.go b/chasm/lib/workflow/gen/workflowpb/v1/state.go-helpers.pb.go new file mode 100644 index 00000000000..50980db658e --- /dev/null +++ b/chasm/lib/workflow/gen/workflowpb/v1/state.go-helpers.pb.go @@ -0,0 +1,117 @@ +// Code generated by protoc-gen-go-helpers. DO NOT EDIT. +package workflowpb + +import ( + "google.golang.org/protobuf/proto" +) + +// Marshal an object of type NexusOperationParentData to the protobuf v3 wire format +func (val *NexusOperationParentData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusOperationParentData from the protobuf v3 wire format +func (val *NexusOperationParentData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusOperationParentData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusOperationParentData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusOperationParentData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusOperationParentData + switch t := that.(type) { + case *NexusOperationParentData: + that1 = t + case NexusOperationParentData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type NexusCancellationParentData to the protobuf v3 wire format +func (val *NexusCancellationParentData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type NexusCancellationParentData from the protobuf v3 wire format +func (val *NexusCancellationParentData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *NexusCancellationParentData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two NexusCancellationParentData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *NexusCancellationParentData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *NexusCancellationParentData + switch t := that.(type) { + case *NexusCancellationParentData: + that1 = t + case NexusCancellationParentData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} + +// Marshal an object of type IncomingSignalData to the protobuf v3 wire format +func (val *IncomingSignalData) Marshal() ([]byte, error) { + return proto.Marshal(val) +} + +// Unmarshal an object of type IncomingSignalData from the protobuf v3 wire format +func (val *IncomingSignalData) Unmarshal(buf []byte) error { + return proto.Unmarshal(buf, val) +} + +// Size returns the size of the object, in bytes, once serialized +func (val *IncomingSignalData) Size() int { + return proto.Size(val) +} + +// Equal returns whether two IncomingSignalData values are equivalent by recursively +// comparing the message's fields. +// For more information see the documentation for +// https://pkg.go.dev/google.golang.org/protobuf/proto#Equal +func (this *IncomingSignalData) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + var that1 *IncomingSignalData + switch t := that.(type) { + case *IncomingSignalData: + that1 = t + case IncomingSignalData: + that1 = &t + default: + return false + } + + return proto.Equal(this, that1) +} diff --git a/chasm/lib/workflow/gen/workflowpb/v1/state.pb.go b/chasm/lib/workflow/gen/workflowpb/v1/state.pb.go new file mode 100644 index 00000000000..304853e6afd --- /dev/null +++ b/chasm/lib/workflow/gen/workflowpb/v1/state.pb.go @@ -0,0 +1,235 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// plugins: +// protoc-gen-go +// protoc +// source: temporal/server/chasm/lib/workflow/proto/v1/state.proto + +package workflowpb + +import ( + reflect "reflect" + sync "sync" + unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// NexusOperationParentData contains workflow-specific data stored in a nexus operation's +// parent_data field when the operation is embedded in a workflow. +type NexusOperationParentData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Event ID of the NEXUS_OPERATION_SCHEDULED event. + ScheduledEventId int64 `protobuf:"varint,1,opt,name=scheduled_event_id,json=scheduledEventId,proto3" json:"scheduled_event_id,omitempty"` + // Token for loading the NEXUS_OPERATION_SCHEDULED event. + ScheduledEventToken []byte `protobuf:"bytes,2,opt,name=scheduled_event_token,json=scheduledEventToken,proto3" json:"scheduled_event_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusOperationParentData) Reset() { + *x = NexusOperationParentData{} + mi := &file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusOperationParentData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusOperationParentData) ProtoMessage() {} + +func (x *NexusOperationParentData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusOperationParentData.ProtoReflect.Descriptor instead. +func (*NexusOperationParentData) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescGZIP(), []int{0} +} + +func (x *NexusOperationParentData) GetScheduledEventId() int64 { + if x != nil { + return x.ScheduledEventId + } + return 0 +} + +func (x *NexusOperationParentData) GetScheduledEventToken() []byte { + if x != nil { + return x.ScheduledEventToken + } + return nil +} + +// NexusCancellationParentData contains workflow-specific data stored in a nexus cancellation's +// parent_data field when the operation is embedded in a workflow. +type NexusCancellationParentData struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Event ID of the NEXUS_OPERATION_CANCEL_REQUESTED event. + RequestedEventId int64 `protobuf:"varint,1,opt,name=requested_event_id,json=requestedEventId,proto3" json:"requested_event_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *NexusCancellationParentData) Reset() { + *x = NexusCancellationParentData{} + mi := &file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *NexusCancellationParentData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NexusCancellationParentData) ProtoMessage() {} + +func (x *NexusCancellationParentData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NexusCancellationParentData.ProtoReflect.Descriptor instead. +func (*NexusCancellationParentData) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescGZIP(), []int{1} +} + +func (x *NexusCancellationParentData) GetRequestedEventId() int64 { + if x != nil { + return x.RequestedEventId + } + return 0 +} + +// IncomingSignalData records the event associated with a signal's request ID, which allows +// DescribeWorkflow to resolve RequestIDRef signal backlinks. +type IncomingSignalData struct { + state protoimpl.MessageState `protogen:"open.v1"` + EventId int64 `protobuf:"varint,1,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *IncomingSignalData) Reset() { + *x = IncomingSignalData{} + mi := &file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *IncomingSignalData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IncomingSignalData) ProtoMessage() {} + +func (x *IncomingSignalData) ProtoReflect() protoreflect.Message { + mi := &file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IncomingSignalData.ProtoReflect.Descriptor instead. +func (*IncomingSignalData) Descriptor() ([]byte, []int) { + return file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescGZIP(), []int{2} +} + +func (x *IncomingSignalData) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +var File_temporal_server_chasm_lib_workflow_proto_v1_state_proto protoreflect.FileDescriptor + +const file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDesc = "" + + "\n" + + "7temporal/server/chasm/lib/workflow/proto/v1/state.proto\x12+temporal.server.chasm.lib.workflow.proto.v1\"|\n" + + "\x18NexusOperationParentData\x12,\n" + + "\x12scheduled_event_id\x18\x01 \x01(\x03R\x10scheduledEventId\x122\n" + + "\x15scheduled_event_token\x18\x02 \x01(\fR\x13scheduledEventToken\"K\n" + + "\x1bNexusCancellationParentData\x12,\n" + + "\x12requested_event_id\x18\x01 \x01(\x03R\x10requestedEventId\"/\n" + + "\x12IncomingSignalData\x12\x19\n" + + "\bevent_id\x18\x01 \x01(\x03R\aeventIdBDZBgo.temporal.io/server/chasm/lib/workflow/gen/workflowpb;workflowpbb\x06proto3" + +var ( + file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescOnce sync.Once + file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescData []byte +) + +func file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescGZIP() []byte { + file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescOnce.Do(func() { + file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDesc), len(file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDesc))) + }) + return file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDescData +} + +var file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_goTypes = []any{ + (*NexusOperationParentData)(nil), // 0: temporal.server.chasm.lib.workflow.proto.v1.NexusOperationParentData + (*NexusCancellationParentData)(nil), // 1: temporal.server.chasm.lib.workflow.proto.v1.NexusCancellationParentData + (*IncomingSignalData)(nil), // 2: temporal.server.chasm.lib.workflow.proto.v1.IncomingSignalData +} +var file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_init() } +func file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_init() { + if File_temporal_server_chasm_lib_workflow_proto_v1_state_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDesc), len(file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_goTypes, + DependencyIndexes: file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_depIdxs, + MessageInfos: file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_msgTypes, + }.Build() + File_temporal_server_chasm_lib_workflow_proto_v1_state_proto = out.File + file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_goTypes = nil + file_temporal_server_chasm_lib_workflow_proto_v1_state_proto_depIdxs = nil +} diff --git a/chasm/lib/workflow/library.go b/chasm/lib/workflow/library.go new file mode 100644 index 00000000000..759baf124bb --- /dev/null +++ b/chasm/lib/workflow/library.go @@ -0,0 +1,56 @@ +package workflow + +import ( + "go.temporal.io/server/chasm" +) + +type library struct { + chasm.UnimplementedLibrary + + registry *Registry +} + +func newLibrary(registry *Registry) *library { + return &library{ + registry: registry, + } +} + +// NewLibrary creates a new CHASM library for the workflow package. +func NewLibrary(registry *Registry) chasm.Library { + return newLibrary(registry) +} + +func (l *library) Name() string { + return chasm.WorkflowLibraryName +} + +type workflowContext struct { + registry *Registry +} + +type ctxKeyWorkflowContextType struct{} + +var ctxKeyWorkflowContext = ctxKeyWorkflowContextType{} + +func workflowContextFromChasm(ctx chasm.Context) *workflowContext { + wc, ok := ctx.Value(ctxKeyWorkflowContext).(*workflowContext) + if !ok { + return nil + } + return wc +} + +func (l *library) Components() []*chasm.RegistrableComponent { + return []*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*Workflow](chasm.WorkflowComponentName, chasm.WithContextValues(map[any]any{ + ctxKeyWorkflowContext: &workflowContext{registry: l.registry}, + })), + } +} + +// SetEventRegistryOnContext injects the event registry into a CHASM context. This is primarily +// useful for tests that construct MockMutableContext directly. +func SetEventRegistryOnContext[C chasm.Context](ctx C, registry *Registry) C { + return chasm.ContextWithValue(ctx, ctxKeyWorkflowContext, &workflowContext{registry: registry}) +} diff --git a/chasm/lib/workflow/nexus_commands.go b/chasm/lib/workflow/nexus_commands.go new file mode 100644 index 00000000000..32734ccf7cd --- /dev/null +++ b/chasm/lib/workflow/nexus_commands.go @@ -0,0 +1,297 @@ +package workflow + +import ( + "errors" + "fmt" + "slices" + "strings" + + "github.com/google/uuid" + "github.com/nexus-rpc/sdk-go/nexus" + commandpb "go.temporal.io/api/command/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/primitives/timestamp" + "google.golang.org/protobuf/types/known/durationpb" +) + +type nexusCommandHandler struct { + config *nexusoperation.Config + nexusProcessor *chasm.NexusEndpointProcessor +} + +//nolint:revive // cognitive-complexity: this is a direct port of the HSM command handler +func (ch *nexusCommandHandler) handleScheduleCommand( + ctx chasm.MutableContext, + wf *Workflow, + validator Validator, + cmd *commandpb.Command, + opts CommandHandlerOptions, +) error { + ns := ctx.NamespaceEntry() + nsName := ns.Name().String() + + if !ch.config.EnableChasmNexusWorkflowOperations(nsName) { + return ErrCommandNotSupported + } + + attrs := cmd.GetScheduleNexusOperationCommandAttributes() + if attrs == nil { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: "empty ScheduleNexusOperationCommandAttributes", + } + } + + requestID := uuid.NewString() + var endpointID string + // Skip endpoint registry lookup for __temporal_system endpoint + if attrs.Endpoint == commonnexus.SystemEndpoint { + if len(attrs.NexusHeader) > 0 { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf("ScheduleNexusOperationCommandAttributes.NexusHeader must be empty when using %s endpoint", commonnexus.SystemEndpoint), + } + } + // Run ProcessInput for validation. + _, err := ch.nexusProcessor.ProcessInput(chasm.NexusOperationProcessorContext{ + Namespace: ns, + RequestID: requestID, + // Links are not needed for validation. + }, attrs.Service, attrs.Operation, attrs.Input) + if err != nil { + var handlerErr *nexus.HandlerError + if errors.As(err, &handlerErr) { + //nolint:exhaustive + switch handlerErr.Type { + case nexus.HandlerErrorTypeNotFound, nexus.HandlerErrorTypeBadRequest: + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: handlerErr.Message, + } + } + } + return err + } + } else { + endpoint, err := ctx.EndpointByName(attrs.Endpoint) + if err != nil { + if errors.As(err, new(*serviceerror.NotFound)) { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf("endpoint %q not found", attrs.Endpoint), + } + } + if errors.As(err, new(*serviceerror.PermissionDenied)) { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf("caller namespace %q unauthorized for %q", ns.Name(), attrs.Endpoint), + } + } + return err + } + endpointID = endpoint.Id + } + + if len(attrs.Service) > ch.config.MaxServiceNameLength(nsName) { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf( + "ScheduleNexusOperationCommandAttributes.Service exceeds length limit of %d", + ch.config.MaxServiceNameLength(nsName), + ), + } + } + + if len(attrs.Operation) > ch.config.MaxOperationNameLength(nsName) { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf( + "ScheduleNexusOperationCommandAttributes.Operation exceeds length limit of %d", + ch.config.MaxOperationNameLength(nsName), + ), + } + } + + if err := timestamp.ValidateAndCapProtoDuration(attrs.ScheduleToCloseTimeout); err != nil { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf( + "ScheduleNexusOperationCommandAttributes.ScheduleToCloseTimeout is invalid: %v", err), + } + } + + if err := timestamp.ValidateAndCapProtoDuration(attrs.ScheduleToStartTimeout); err != nil { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf( + "ScheduleNexusOperationCommandAttributes.ScheduleToStartTimeout is invalid: %v", err), + } + } + + if err := timestamp.ValidateAndCapProtoDuration(attrs.StartToCloseTimeout); err != nil { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf( + "ScheduleNexusOperationCommandAttributes.StartToCloseTimeout is invalid: %v", err), + } + } + + if !validator.IsValidPayloadSize(attrs.Input.Size()) { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: "ScheduleNexusOperationCommandAttributes.Input exceeds size limit", + TerminateWorkflow: true, + } + } + + headerLength := 0 + lowerCaseHeader := make(map[string]string, len(attrs.NexusHeader)) + for k, v := range attrs.NexusHeader { + lowerK := strings.ToLower(k) + lowerCaseHeader[lowerK] = v + headerLength += len(lowerK) + len(v) + if slices.Contains(ch.config.DisallowedOperationHeaders(), lowerK) { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf("ScheduleNexusOperationCommandAttributes.NexusHeader contains a disallowed header key: %q", k), + } + } + } + + if headerLength > ch.config.MaxOperationHeaderSize(nsName) { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, + Message: "ScheduleNexusOperationCommandAttributes.NexusHeader exceeds size limit", + } + } + + maxPendingOperations := ch.config.MaxConcurrentOperationsPerWorkflow(nsName) + if wf.pendingNexusOperationCount() >= maxPendingOperations { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_NEXUS_OPERATIONS_LIMIT_EXCEEDED, + Message: fmt.Sprintf("workflow has reached the pending nexus operation limit of %d for this namespace", maxPendingOperations), + } + } + + // Trim timeout to workflow run timeout. + runTimeout := wf.WorkflowRunTimeout() + opTimeout := attrs.ScheduleToCloseTimeout.AsDuration() + if runTimeout > 0 && (opTimeout == 0 || opTimeout > runTimeout) { + attrs.ScheduleToCloseTimeout = durationpb.New(runTimeout) + opTimeout = runTimeout + } + + // Trim timeout to max allowed timeout. + if maxTimeout := ch.config.MaxOperationScheduleToCloseTimeout(nsName); maxTimeout > 0 && opTimeout > maxTimeout { + attrs.ScheduleToCloseTimeout = durationpb.New(maxTimeout) + } + + // Trim secondary timeouts to the primary timeout. + scheduleToCloseTimeout := attrs.ScheduleToCloseTimeout.AsDuration() + scheduleToStartTimeout := attrs.ScheduleToStartTimeout.AsDuration() + startToCloseTimeout := attrs.StartToCloseTimeout.AsDuration() + + if scheduleToCloseTimeout > 0 { + if scheduleToStartTimeout > scheduleToCloseTimeout { + attrs.ScheduleToStartTimeout = attrs.ScheduleToCloseTimeout + } + if startToCloseTimeout > scheduleToCloseTimeout { + attrs.StartToCloseTimeout = attrs.ScheduleToCloseTimeout + } + } + + _, err := addAndApplyHistoryEvent[ScheduledEventDefinition](wf, ctx, func(he *historypb.HistoryEvent) { + he.Attributes = &historypb.HistoryEvent_NexusOperationScheduledEventAttributes{ + NexusOperationScheduledEventAttributes: &historypb.NexusOperationScheduledEventAttributes{ + Endpoint: attrs.Endpoint, + EndpointId: endpointID, + Service: attrs.Service, + Operation: attrs.Operation, + Input: attrs.Input, + ScheduleToCloseTimeout: attrs.ScheduleToCloseTimeout, + ScheduleToStartTimeout: attrs.ScheduleToStartTimeout, + StartToCloseTimeout: attrs.StartToCloseTimeout, + NexusHeader: lowerCaseHeader, + RequestId: requestID, + WorkflowTaskCompletedEventId: opts.WorkflowTaskCompletedEventID, + }, + } + he.UserMetadata = cmd.UserMetadata + }) + return err +} + +func (ch *nexusCommandHandler) handleCancelCommand( + ctx chasm.MutableContext, + wf *Workflow, + validator Validator, + cmd *commandpb.Command, + opts CommandHandlerOptions, +) error { + nsName := ctx.NamespaceEntry().Name().String() + if !ch.config.EnableChasmNexusWorkflowOperations(nsName) { + return ErrCommandNotSupported + } + + attrs := cmd.GetRequestCancelNexusOperationCommandAttributes() + if attrs == nil { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_NEXUS_OPERATION_ATTRIBUTES, + Message: "empty CancelNexusOperationCommandAttributes", + } + } + + _, operationFound := wf.Operations[attrs.ScheduledEventId] + hasBufferedEvent := func() bool { + return wf.HasAnyBufferedEvent(makeNexusOperationTerminalEventFilter(attrs.ScheduledEventId)) + } + + if !operationFound && !hasBufferedEvent() { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf("requested cancelation for a non-existing or already completed operation with scheduled event ID of %d", attrs.ScheduledEventId), + } + } + + // Always create the event even if there's a buffered completion to avoid breaking replay in the SDK. + // The event will be applied before the completion since buffered events are reordered and put at the end of the + // batch, after command events from the workflow task. + _, err := addAndApplyHistoryEvent[CancelRequestedEventDefinition](wf, ctx, func(he *historypb.HistoryEvent) { + he.Attributes = &historypb.HistoryEvent_NexusOperationCancelRequestedEventAttributes{ + NexusOperationCancelRequestedEventAttributes: &historypb.NexusOperationCancelRequestedEventAttributes{ + ScheduledEventId: attrs.ScheduledEventId, + WorkflowTaskCompletedEventId: opts.WorkflowTaskCompletedEventID, + }, + } + he.UserMetadata = cmd.UserMetadata + }) + if errors.Is(err, nexusoperation.ErrCancellationAlreadyRequested) { + return FailWorkflowTaskError{ + Cause: enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_NEXUS_OPERATION_ATTRIBUTES, + Message: fmt.Sprintf("cancelation was already requested for an operation with scheduled event ID %d", attrs.ScheduledEventId), + } + } + return err +} + +func makeNexusOperationTerminalEventFilter(scheduledEventID int64) func(event *historypb.HistoryEvent) bool { + return func(event *historypb.HistoryEvent) bool { + switch event.EventType { + case enumspb.EVENT_TYPE_NEXUS_OPERATION_COMPLETED: + return event.GetNexusOperationCompletedEventAttributes().GetScheduledEventId() == scheduledEventID + case enumspb.EVENT_TYPE_NEXUS_OPERATION_FAILED: + return event.GetNexusOperationFailedEventAttributes().GetScheduledEventId() == scheduledEventID + case enumspb.EVENT_TYPE_NEXUS_OPERATION_CANCELED: + return event.GetNexusOperationCanceledEventAttributes().GetScheduledEventId() == scheduledEventID + case enumspb.EVENT_TYPE_NEXUS_OPERATION_TIMED_OUT: + return event.GetNexusOperationTimedOutEventAttributes().GetScheduledEventId() == scheduledEventID + default: + return false + } + } +} diff --git a/chasm/lib/workflow/nexus_commands_test.go b/chasm/lib/workflow/nexus_commands_test.go new file mode 100644 index 00000000000..5f53f4bbad2 --- /dev/null +++ b/chasm/lib/workflow/nexus_commands_test.go @@ -0,0 +1,935 @@ +package workflow + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + sdkpb "go.temporal.io/api/sdk/v1" + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + workflowpb "go.temporal.io/server/chasm/lib/workflow/gen/workflowpb/v1" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/namespace" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexustest" + "go.temporal.io/server/service/history/historybuilder" + "go.temporal.io/server/service/history/tests" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type commandValidator struct { + maxPayloadSize int +} + +func (v commandValidator) IsValidPayloadSize(size int) bool { + return size <= v.maxPayloadSize +} + +type testContext struct { + chasmCtx *chasm.MockMutableContext + wf *Workflow + backend *chasm.MockNodeBackend + execInfo *persistencespb.WorkflowExecutionInfo + scheduleHandler CommandHandler + cancelHandler CommandHandler + history *historypb.History + registry *Registry +} + +func (tcx *testContext) setHasAnyBufferedEvent(value bool) { + tcx.backend.HandleHasAnyBufferedEvent = func(filter historybuilder.BufferedEventFilter) bool { + return value + } +} + +var defaultConfig = &nexusoperation.Config{ + EnableChasmNexusWorkflowOperations: dynamicconfig.GetBoolPropertyFnFilteredByNamespace(true), + MaxServiceNameLength: dynamicconfig.GetIntPropertyFnFilteredByNamespace(len("service")), + MaxOperationNameLength: dynamicconfig.GetIntPropertyFnFilteredByNamespace(len("op")), + MaxConcurrentOperationsPerWorkflow: dynamicconfig.GetIntPropertyFnFilteredByNamespace(2), + MaxOperationHeaderSize: dynamicconfig.GetIntPropertyFnFilteredByNamespace(20), + DisallowedOperationHeaders: dynamicconfig.GetTypedPropertyFn([]string{"request-timeout"}), + MaxOperationScheduleToCloseTimeout: dynamicconfig.GetDurationPropertyFnFilteredByNamespace(time.Hour * 24), +} + +func newTestContext(t *testing.T, cfg *nexusoperation.Config) testContext { + endpointReg := nexustest.FakeEndpointRegistry{ + OnGetByName: func(ctx context.Context, namespaceID namespace.ID, endpointName string) (*persistencespb.NexusEndpointEntry, error) { + if endpointName == "endpoint caller namespace unauthorized" { + return nil, serviceerror.NewPermissionDenied("caller namespace unauthorized", "") + } else if endpointName != "endpoint" { + return nil, serviceerror.NewNotFound("endpoint not found") + } + return &persistencespb.NexusEndpointEntry{Id: "endpoint-id"}, nil + }, + } + wfreg := NewRegistry() + nexusProcessor := chasm.NewNexusEndpointProcessor() + require.NoError(t, wfreg.Register(newNexusLibrary(cfg, nexusProcessor))) + + execInfo := &persistencespb.WorkflowExecutionInfo{} + backend := &chasm.MockNodeBackend{ + HandleGetExecutionInfo: func() *persistencespb.WorkflowExecutionInfo { + return execInfo + }, + HandleGetNamespaceEntry: func() *namespace.Namespace { + return tests.GlobalNamespaceEntry + }, + } + + lastEventID := int64(4) + history := &historypb.History{} + backend.HandleAddHistoryEvent = func(t enumspb.EventType, setAttrs func(he *historypb.HistoryEvent)) *historypb.HistoryEvent { + e := &historypb.HistoryEvent{ + Version: 1, + EventId: lastEventID, + EventTime: timestamppb.Now(), + } + lastEventID++ + setAttrs(e) + history.Events = append(history.Events, e) + return e + } + + chasmCtx := SetEventRegistryOnContext(&chasm.MockMutableContext{ + MockContext: chasm.MockContext{ + HandleNamespaceEntry: func() *namespace.Namespace { + return tests.GlobalNamespaceEntry + }, + HandleEndpointByName: func(name string) (*persistencespb.NexusEndpointEntry, error) { + return endpointReg.GetByName(context.Background(), tests.GlobalNamespaceEntry.ID(), name) + }, + HandleExecutionKey: func() chasm.ExecutionKey { + return chasm.ExecutionKey{ + NamespaceID: tests.GlobalNamespaceEntry.ID().String(), + } + }, + GoCtx: context.WithValue(context.Background(), nexusoperation.OperationContextKey, &nexusoperation.OperationContext{MetricTagConfig: dynamicconfig.GetTypedPropertyFn(nexusoperation.NexusMetricTagConfig{})}), + }, + }, wfreg) + + wf := &Workflow{ + MSPointer: chasm.NewMSPointer(backend), + } + + scheduleHandler, ok := wfreg.CommandHandler(enumspb.COMMAND_TYPE_SCHEDULE_NEXUS_OPERATION) + require.True(t, ok) + cancelHandler, ok := wfreg.CommandHandler(enumspb.COMMAND_TYPE_REQUEST_CANCEL_NEXUS_OPERATION) + require.True(t, ok) + + return testContext{ + chasmCtx: chasmCtx, + wf: wf, + backend: backend, + execInfo: execInfo, + history: history, + scheduleHandler: scheduleHandler, + cancelHandler: cancelHandler, + registry: wfreg, + } +} + +func TestHandleScheduleCommand(t *testing.T) { + t.Run("chasm nexus not enabled", func(t *testing.T) { + tcx := newTestContext(t, &nexusoperation.Config{ + EnableChasmNexusWorkflowOperations: dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), + }) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{}, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.ErrorIs(t, err, ErrCommandNotSupported) + require.Empty(t, tcx.history.Events) + }) + + t.Run("empty attributes", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{}, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("endpoint not found - rejected by config", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "not found", + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("caller namespace unauthorized", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint caller namespace unauthorized", + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("exceeds max service length", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "too long", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("exceeds max operation length", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "too long", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("exceeds max operation header size", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + NexusHeader: map[string]string{ + "key1234567890": "value1234567890", + }, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("invalid header keys", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + NexusHeader: map[string]string{ + "request-timeout": "1s", + }, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("exceeds max payload size", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + Input: &commonpb.Payload{ + Data: []byte("ab"), + }, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.True(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("exceeds max concurrent operations", func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + tcx := newTestContext(t, defaultConfig) + for range 2 { + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + } + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_PENDING_NEXUS_OPERATIONS_LIMIT_EXCEEDED, failWFTErr.Cause) + require.Len(t, tcx.history.Events, 2) + }) + + t.Run("schedule to close timeout capped by run timeout", func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + tcx := newTestContext(t, defaultConfig) + tcx.execInfo.WorkflowRunTimeout = durationpb.New(time.Hour) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: durationpb.New(time.Hour * 2), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + require.Equal(t, time.Hour, tcx.history.Events[0].GetNexusOperationScheduledEventAttributes().ScheduleToCloseTimeout.AsDuration()) + }) + + t.Run("schedule to close timeout capped by dynamic config", func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + cfg := *defaultConfig + cfg.MaxOperationScheduleToCloseTimeout = dynamicconfig.GetDurationPropertyFnFilteredByNamespace(time.Minute) + tcx := newTestContext(t, &cfg) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: durationpb.New(time.Hour), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + require.Equal(t, time.Minute, tcx.history.Events[0].GetNexusOperationScheduledEventAttributes().ScheduleToCloseTimeout.AsDuration()) + }) + + timeoutCases := []struct { + name string + workflowRunTimeout *durationpb.Duration + commandTimeout *durationpb.Duration + expectedTimeout *durationpb.Duration + }{ + { + name: "operation timeout defaults to workflow run timeout", + workflowRunTimeout: durationpb.New(time.Minute), + commandTimeout: nil, + expectedTimeout: durationpb.New(time.Minute), + }, + { + name: "operation timeout trimmed to workflow run timeout", + workflowRunTimeout: durationpb.New(time.Minute), + commandTimeout: durationpb.New(time.Hour), + expectedTimeout: durationpb.New(time.Minute), + }, + { + name: "operation timeout left as is if less than workflow run timeout", + workflowRunTimeout: durationpb.New(time.Minute), + commandTimeout: durationpb.New(time.Second), + expectedTimeout: durationpb.New(time.Second), + }, + { + name: "operation timeout left as is if no workflow run timeout", + workflowRunTimeout: nil, + commandTimeout: durationpb.New(time.Second), + expectedTimeout: durationpb.New(time.Second), + }, + } + for _, tc := range timeoutCases { + t.Run(tc.name, func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + tcx := newTestContext(t, defaultConfig) + + tcx.execInfo.WorkflowRunTimeout = tc.workflowRunTimeout + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: tc.commandTimeout, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + require.Equal(t, tc.expectedTimeout.AsDuration(), tcx.history.Events[0].GetNexusOperationScheduledEventAttributes().ScheduleToCloseTimeout.AsDuration()) + }) + } + + t.Run("invalid schedule-to-start timeout", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToStartTimeout: durationpb.New(-1 * time.Second), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("invalid start-to-close timeout", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + StartToCloseTimeout: durationpb.New(-1 * time.Second), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("schedule-to-start timeout trimmed to schedule-to-close timeout", func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: durationpb.New(30 * time.Minute), + ScheduleToStartTimeout: durationpb.New(time.Hour), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + eAttrs := tcx.history.Events[0].GetNexusOperationScheduledEventAttributes() + require.Equal(t, 30*time.Minute, eAttrs.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 30*time.Minute, eAttrs.ScheduleToCloseTimeout.AsDuration()) + }) + + t.Run("start-to-close timeout trimmed to schedule-to-close timeout", func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: durationpb.New(30 * time.Minute), + StartToCloseTimeout: durationpb.New(time.Hour), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + eAttrs := tcx.history.Events[0].GetNexusOperationScheduledEventAttributes() + require.Equal(t, 30*time.Minute, eAttrs.StartToCloseTimeout.AsDuration()) + require.Equal(t, 30*time.Minute, eAttrs.ScheduleToCloseTimeout.AsDuration()) + }) + + t.Run("both secondary timeouts trimmed to schedule-to-close timeout", func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: durationpb.New(30 * time.Minute), + ScheduleToStartTimeout: durationpb.New(time.Hour), + StartToCloseTimeout: durationpb.New(2 * time.Hour), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + eAttrs := tcx.history.Events[0].GetNexusOperationScheduledEventAttributes() + require.Equal(t, 30*time.Minute, eAttrs.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 30*time.Minute, eAttrs.StartToCloseTimeout.AsDuration()) + require.Equal(t, 30*time.Minute, eAttrs.ScheduleToCloseTimeout.AsDuration()) + }) + + t.Run("secondary timeouts not trimmed when less than schedule-to-close timeout", func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: durationpb.New(time.Hour), + ScheduleToStartTimeout: durationpb.New(20 * time.Minute), + StartToCloseTimeout: durationpb.New(30 * time.Minute), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + eAttrs := tcx.history.Events[0].GetNexusOperationScheduledEventAttributes() + require.Equal(t, 20*time.Minute, eAttrs.ScheduleToStartTimeout.AsDuration()) + require.Equal(t, 30*time.Minute, eAttrs.StartToCloseTimeout.AsDuration()) + require.Equal(t, time.Hour, eAttrs.ScheduleToCloseTimeout.AsDuration()) + }) + + t.Run("sets event attributes with UserMetadata and creates an operation component", func(t *testing.T) { + t.Skip("requires TransitionScheduled implementation") + tcx := newTestContext(t, defaultConfig) + cAttrs := &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + Input: &commonpb.Payload{}, + NexusHeader: map[string]string{ + "key": "value", + }, + ScheduleToCloseTimeout: durationpb.New(time.Hour), + } + userMetadata := &sdkpb.UserMetadata{ + Summary: &commonpb.Payload{ + Metadata: map[string][]byte{"test_key": []byte(`test_val`)}, + Data: []byte(`Test summary Data`), + }, + Details: &commonpb.Payload{ + Metadata: map[string][]byte{"test_key": []byte(`test_val`)}, + Data: []byte(`Test Details Data`), + }, + } + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: cAttrs, + }, + UserMetadata: userMetadata, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + + event := tcx.history.Events[0] + eAttrs := event.GetNexusOperationScheduledEventAttributes() + require.Equal(t, cAttrs.Service, eAttrs.Service) + require.Equal(t, cAttrs.Operation, eAttrs.Operation) + require.Equal(t, cAttrs.Input, eAttrs.Input) + require.Equal(t, cAttrs.ScheduleToCloseTimeout, eAttrs.ScheduleToCloseTimeout) + require.Equal(t, cAttrs.NexusHeader, eAttrs.NexusHeader) + require.Equal(t, int64(1), eAttrs.WorkflowTaskCompletedEventId) + + opField, ok := tcx.wf.Operations[event.EventId] + require.True(t, ok) + op := opField.Get(tcx.chasmCtx) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_SCHEDULED, op.Status) + + opParentData := &workflowpb.NexusOperationParentData{} + require.NoError(t, op.ParentData.UnmarshalTo(opParentData)) + require.EqualExportedValues(t, &workflowpb.NexusOperationParentData{ + ScheduledEventId: event.EventId, + ScheduledEventToken: []byte("test token"), + }, opParentData) + require.EqualExportedValues(t, userMetadata, event.UserMetadata) + }) + + t.Run("__temporal_system endpoint does not accept headers", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: commonnexus.SystemEndpoint, + Service: "service", + Operation: "op", + NexusHeader: map[string]string{"key": "value"}, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("__temporal_system endpoint is validated", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: commonnexus.SystemEndpoint, + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_SCHEDULE_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Contains(t, failWFTErr.Message, "not found") + require.Empty(t, tcx.history.Events) + }) +} + +func TestHandleCancelCommand(t *testing.T) { + t.Run("chasm nexus not enabled", func(t *testing.T) { + tcx := newTestContext(t, &nexusoperation.Config{ + EnableChasmNexusWorkflowOperations: dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), + }) + err := tcx.cancelHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{}, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.ErrorIs(t, err, ErrCommandNotSupported) + require.Empty(t, tcx.history.Events) + }) + + t.Run("empty attributes", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.cancelHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{}, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("operation not found", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + + err := tcx.cancelHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_RequestCancelNexusOperationCommandAttributes{ + RequestCancelNexusOperationCommandAttributes: &commandpb.RequestCancelNexusOperationCommandAttributes{ + ScheduledEventId: 5, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Empty(t, tcx.history.Events) + }) + + t.Run("operation already completed", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + event := tcx.history.Events[0] + + // TODO: Complete the operation using CHASM equivalent of CompletedEventDefinition. + tcx.wf.removeNexusOperation(event.EventId) + + // Try to cancel - should fail since operation is completed/deleted. + err = tcx.cancelHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_RequestCancelNexusOperationCommandAttributes{ + RequestCancelNexusOperationCommandAttributes: &commandpb.RequestCancelNexusOperationCommandAttributes{ + ScheduledEventId: event.EventId, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.False(t, failWFTErr.TerminateWorkflow) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Len(t, tcx.history.Events, 1) // Only scheduled event should be recorded. + }) + + t.Run("operation already completed - completion buffered", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + tcx.setHasAnyBufferedEvent(true) // simulate buffered terminal event + + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + event := tcx.history.Events[0] + + // TODO: Complete the operation using CHASM equivalent of CompletedEventDefinition. + tcx.wf.removeNexusOperation(event.EventId) + + // Try to cancel - should succeed because there's a buffered completion. + err = tcx.cancelHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_RequestCancelNexusOperationCommandAttributes{ + RequestCancelNexusOperationCommandAttributes: &commandpb.RequestCancelNexusOperationCommandAttributes{ + ScheduledEventId: event.EventId, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 2) // Both scheduled and cancel requested events should be recorded. + crAttrs := tcx.history.Events[1].GetNexusOperationCancelRequestedEventAttributes() + require.Equal(t, event.EventId, crAttrs.ScheduledEventId) + }) + + t.Run("sets event attributes with UserMetadata and spawns cancelation child", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + + // TODO: Replace with CHASM equivalent of ScheduledEventDefinition.Apply(). + event := tcx.history.Events[0] + op := tcx.wf.Operations[event.EventId].Get(tcx.chasmCtx) + op.SetStateMachineState(nexusoperationpb.OPERATION_STATUS_SCHEDULED) + + userMetadata := &sdkpb.UserMetadata{ + Summary: &commonpb.Payload{ + Metadata: map[string][]byte{"test_key": []byte(`test_val`)}, + Data: []byte(`Test summary Data`), + }, + Details: &commonpb.Payload{ + Metadata: map[string][]byte{"test_key": []byte(`test_val`)}, + Data: []byte(`Test Details Data`), + }, + } + require.Len(t, tcx.history.Events, 1) + + err = tcx.cancelHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_RequestCancelNexusOperationCommandAttributes{ + RequestCancelNexusOperationCommandAttributes: &commandpb.RequestCancelNexusOperationCommandAttributes{ + ScheduledEventId: event.EventId, + }, + }, + UserMetadata: userMetadata, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + + opField, operationFound := tcx.wf.Operations[event.EventId] + require.True(t, operationFound) + + require.Len(t, tcx.history.Events, 2) + crAttrs := tcx.history.Events[1].GetNexusOperationCancelRequestedEventAttributes() + require.Equal(t, event.EventId, crAttrs.ScheduledEventId) + require.Equal(t, int64(1), crAttrs.WorkflowTaskCompletedEventId) + savedUserMetadata := tcx.history.Events[1].GetUserMetadata() + require.EqualExportedValues(t, userMetadata, savedUserMetadata) + + // Verify cancelation child component exists and has correct parent info. + op = opField.Get(tcx.chasmCtx) + cancellation, hasCancellation := op.Cancellation.TryGet(tcx.chasmCtx) + require.True(t, hasCancellation) + cancelParentData := &workflowpb.NexusCancellationParentData{} + require.NoError(t, cancellation.ParentData.UnmarshalTo(cancelParentData)) + require.EqualExportedValues(t, &workflowpb.NexusCancellationParentData{ + RequestedEventId: tcx.history.Events[1].EventId, + }, cancelParentData) + }) +} + +func TestOperationNodeDeletionOnTerminalEvents(t *testing.T) { + t.Skip("requires CHASM operation lifecycle implementation") + + scheduleOperation := func(t *testing.T, tcx testContext) (scheduledEvent *historypb.HistoryEvent, nodeID int64) { + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 100}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.Len(t, tcx.history.Events, 1) + scheduledEvent = tcx.history.Events[0] + + nodeID = scheduledEvent.EventId + _, ok := tcx.wf.Operations[nodeID] + require.True(t, ok) + return + } + + applyTerminalEventAndAssertDeletion := func( + t *testing.T, + tcx testContext, + scheduledEventID int64, + eventType enumspb.EventType, + eventAttr any, + ) { + nodeID := scheduledEventID + + event := &historypb.HistoryEvent{ + Version: 1, + EventId: scheduledEventID + 1, + EventType: eventType, + EventTime: timestamppb.Now(), + } + + switch eventType { //nolint:exhaustive + case enumspb.EVENT_TYPE_NEXUS_OPERATION_COMPLETED: + event.Attributes = &historypb.HistoryEvent_NexusOperationCompletedEventAttributes{ + NexusOperationCompletedEventAttributes: eventAttr.(*historypb.NexusOperationCompletedEventAttributes), + } + case enumspb.EVENT_TYPE_NEXUS_OPERATION_FAILED: + event.Attributes = &historypb.HistoryEvent_NexusOperationFailedEventAttributes{ + NexusOperationFailedEventAttributes: eventAttr.(*historypb.NexusOperationFailedEventAttributes), + } + case enumspb.EVENT_TYPE_NEXUS_OPERATION_CANCELED: + event.Attributes = &historypb.HistoryEvent_NexusOperationCanceledEventAttributes{ + NexusOperationCanceledEventAttributes: eventAttr.(*historypb.NexusOperationCanceledEventAttributes), + } + case enumspb.EVENT_TYPE_NEXUS_OPERATION_TIMED_OUT: + event.Attributes = &historypb.HistoryEvent_NexusOperationTimedOutEventAttributes{ + NexusOperationTimedOutEventAttributes: eventAttr.(*historypb.NexusOperationTimedOutEventAttributes), + } + default: + panic(fmt.Sprintf("unexpected event type in test: %v", eventType)) + } + + // TODO: Apply the terminal event using CHASM equivalent of HSM EventDefinition. + _ = event + + _, ok := tcx.wf.Operations[nodeID] + require.False(t, ok, "operation should be deleted after terminal event") + + err := tcx.cancelHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_RequestCancelNexusOperationCommandAttributes{ + RequestCancelNexusOperationCommandAttributes: &commandpb.RequestCancelNexusOperationCommandAttributes{ + ScheduledEventId: scheduledEventID, + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 2}) + var failWFTErr FailWorkflowTaskError + require.ErrorAs(t, err, &failWFTErr) + require.Equal(t, enumspb.WORKFLOW_TASK_FAILED_CAUSE_BAD_REQUEST_CANCEL_NEXUS_OPERATION_ATTRIBUTES, failWFTErr.Cause) + require.Len(t, tcx.history.Events, 1, "no new events after attempting to cancel a terminated operation") + } + + cases := []struct { + name string + eventType enumspb.EventType + eventAttr any + }{ + { + name: "completed event deletes node", + eventType: enumspb.EVENT_TYPE_NEXUS_OPERATION_COMPLETED, + eventAttr: &historypb.NexusOperationCompletedEventAttributes{}, + }, + { + name: "failed event deletes node", + eventType: enumspb.EVENT_TYPE_NEXUS_OPERATION_FAILED, + eventAttr: &historypb.NexusOperationFailedEventAttributes{}, + }, + { + name: "canceled event deletes node", + eventType: enumspb.EVENT_TYPE_NEXUS_OPERATION_CANCELED, + eventAttr: &historypb.NexusOperationCanceledEventAttributes{}, + }, + { + name: "timed out event deletes node", + eventType: enumspb.EVENT_TYPE_NEXUS_OPERATION_TIMED_OUT, + eventAttr: &historypb.NexusOperationTimedOutEventAttributes{}, + }, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, _ := scheduleOperation(t, tcx) + + switch a := tc.eventAttr.(type) { + case *historypb.NexusOperationCompletedEventAttributes: + a.ScheduledEventId = scheduledEvent.EventId + case *historypb.NexusOperationFailedEventAttributes: + a.ScheduledEventId = scheduledEvent.EventId + case *historypb.NexusOperationCanceledEventAttributes: + a.ScheduledEventId = scheduledEvent.EventId + case *historypb.NexusOperationTimedOutEventAttributes: + a.ScheduledEventId = scheduledEvent.EventId + default: + t.Fatalf("unexpected event attribute type: %T", tc.eventAttr) + } + + applyTerminalEventAndAssertDeletion(t, tcx, scheduledEvent.EventId, tc.eventType, tc.eventAttr) + }) + } +} diff --git a/chasm/lib/workflow/nexus_events.go b/chasm/lib/workflow/nexus_events.go new file mode 100644 index 00000000000..8e6296fc71b --- /dev/null +++ b/chasm/lib/workflow/nexus_events.go @@ -0,0 +1,351 @@ +package workflow + +import ( + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + workflowpb "go.temporal.io/server/chasm/lib/workflow/gen/workflowpb/v1" + "google.golang.org/protobuf/types/known/anypb" +) + +// ScheduledEventDefinition handles the NexusOperationScheduled history event. +type ScheduledEventDefinition struct{} + +func (d ScheduledEventDefinition) IsWorkflowTaskTrigger() bool { + return false +} + +func (d ScheduledEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_SCHEDULED +} + +func (d ScheduledEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationScheduledEventAttributes() + + token, err := wf.GenerateEventLoadToken(event) + if err != nil { + return serviceerror.NewInternalf("failed to generate event load token: %v", err) + } + + parentData, err := anypb.New(&workflowpb.NexusOperationParentData{ + ScheduledEventId: event.GetEventId(), + ScheduledEventToken: token, + }) + if err != nil { + return serviceerror.NewInternalf("failed to marshal parent data: %v", err) + } + + op := nexusoperation.NewOperation(&nexusoperationpb.OperationState{ + EndpointId: attrs.GetEndpointId(), + Endpoint: attrs.GetEndpoint(), + Service: attrs.GetService(), + Operation: attrs.GetOperation(), + ScheduledTime: event.GetEventTime(), + ScheduleToStartTimeout: attrs.GetScheduleToStartTimeout(), + StartToCloseTimeout: attrs.GetStartToCloseTimeout(), + ScheduleToCloseTimeout: attrs.GetScheduleToCloseTimeout(), + RequestId: attrs.GetRequestId(), + ParentData: parentData, + Attempt: 0, + }) + + if err := nexusoperation.TransitionScheduled.Apply(op, ctx, nexusoperation.EventScheduled{}); err != nil { + return err + } + + wf.addNexusOperation(ctx, event.GetEventId(), op) + + return nil +} + +func (d ScheduledEventDefinition) CherryPick(_ chasm.MutableContext, _ *Workflow, _ *historypb.HistoryEvent, _ map[enumspb.ResetReapplyExcludeType]struct{}) error { + // We never cherry pick command events, and instead allow user logic to reschedule those commands. + return ErrEventNotCherryPickable +} + +// CancelRequestedEventDefinition handles the NexusOperationCancelRequested history event. +type CancelRequestedEventDefinition struct { +} + +func (d CancelRequestedEventDefinition) IsWorkflowTaskTrigger() bool { + return false +} + +func (d CancelRequestedEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_CANCEL_REQUESTED +} + +func (d CancelRequestedEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationCancelRequestedEventAttributes() + field, ok := wf.Operations[attrs.GetScheduledEventId()] + if !ok { + // Operation may have already completed (buffered terminal event). Ignore. + return nil + } + + op := field.Get(ctx) + cancelParentData, err := anypb.New(&workflowpb.NexusCancellationParentData{ + RequestedEventId: event.GetEventId(), + }) + if err != nil { + return serviceerror.NewInternalf("failed to marshal cancellation parent data: %v", err) + } + + return op.RequestCancel(ctx, &nexusoperationpb.CancellationState{ + ParentData: cancelParentData, + }) +} + +func (d CancelRequestedEventDefinition) CherryPick(_ chasm.MutableContext, _ *Workflow, _ *historypb.HistoryEvent, _ map[enumspb.ResetReapplyExcludeType]struct{}) error { + // We never cherry pick command events, and instead allow user logic to reschedule those commands. + return ErrEventNotCherryPickable +} + +// CancelRequestCompletedEventDefinition handles the NexusOperationCancelRequestCompleted history event. +type CancelRequestCompletedEventDefinition struct { +} + +func (d CancelRequestCompletedEventDefinition) IsWorkflowTaskTrigger() bool { + return true +} + +func (d CancelRequestCompletedEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_CANCEL_REQUEST_COMPLETED +} + +func (d CancelRequestCompletedEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationCancelRequestCompletedEventAttributes() + field, ok := wf.Operations[attrs.GetScheduledEventId()] + if !ok { + return serviceerror.NewNotFoundf("nexus operation not found for scheduled event ID %d", attrs.GetScheduledEventId()) + } + // Cancellation must be present to deliver a cancel request. + cancellation := field.Get(ctx).Cancellation.Get(ctx) + return nexusoperation.TransitionCancellationSucceeded.Apply(cancellation, ctx, nexusoperation.EventCancellationSucceeded{}) +} + +func (d CancelRequestCompletedEventDefinition) CherryPick(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent, excludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) error { + if _, ok := excludeTypes[enumspb.RESET_REAPPLY_EXCLUDE_TYPE_NEXUS]; ok { + return ErrEventNotCherryPickable + } + return d.Apply(ctx, wf, event) +} + +// CancelRequestFailedEventDefinition handles the NexusOperationCancelRequestFailed history event. +type CancelRequestFailedEventDefinition struct { +} + +func (d CancelRequestFailedEventDefinition) IsWorkflowTaskTrigger() bool { + return true +} + +func (d CancelRequestFailedEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_CANCEL_REQUEST_FAILED +} + +func (d CancelRequestFailedEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationCancelRequestFailedEventAttributes() + field, ok := wf.Operations[attrs.GetScheduledEventId()] + if !ok { + return serviceerror.NewNotFoundf("nexus operation not found for scheduled event ID %d", attrs.GetScheduledEventId()) + } + // Cancellation must be present to deliver a cancel request. + cancellation := field.Get(ctx).Cancellation.Get(ctx) + return nexusoperation.TransitionCancellationFailed.Apply(cancellation, ctx, nexusoperation.EventCancellationFailed{ + Failure: attrs.GetFailure(), + }) +} + +func (d CancelRequestFailedEventDefinition) CherryPick(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent, excludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) error { + if _, ok := excludeTypes[enumspb.RESET_REAPPLY_EXCLUDE_TYPE_NEXUS]; ok { + return ErrEventNotCherryPickable + } + return d.Apply(ctx, wf, event) +} + +// StartedEventDefinition handles the NexusOperationStarted history event. +type StartedEventDefinition struct { +} + +func (d StartedEventDefinition) IsWorkflowTaskTrigger() bool { + return true +} + +func (d StartedEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_STARTED +} + +func (d StartedEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationStartedEventAttributes() + field, ok := wf.Operations[attrs.GetScheduledEventId()] + if !ok { + return serviceerror.NewNotFoundf("nexus operation not found for scheduled event ID %d", attrs.GetScheduledEventId()) + } + op := field.Get(ctx) + + startTime := event.GetEventTime().AsTime() + return nexusoperation.TransitionStarted.Apply(op, ctx, nexusoperation.EventStarted{ + OperationToken: attrs.GetOperationToken(), + StartTime: &startTime, + }) +} + +func (d StartedEventDefinition) CherryPick(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent, excludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) error { + if _, ok := excludeTypes[enumspb.RESET_REAPPLY_EXCLUDE_TYPE_NEXUS]; ok { + return ErrEventNotCherryPickable + } + return d.Apply(ctx, wf, event) +} + +// CompletedEventDefinition handles the NexusOperationCompleted history event. +type CompletedEventDefinition struct { +} + +func (d CompletedEventDefinition) IsWorkflowTaskTrigger() bool { + return true +} + +func (d CompletedEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_COMPLETED +} + +func (d CompletedEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationCompletedEventAttributes() + field, ok := wf.Operations[attrs.GetScheduledEventId()] + if !ok { + return serviceerror.NewNotFoundf("nexus operation not found for scheduled event ID %d", attrs.GetScheduledEventId()) + } + op := field.Get(ctx) + + completeTime := event.GetEventTime().AsTime() + if err := nexusoperation.TransitionSucceeded.Apply(op, ctx, nexusoperation.EventSucceeded{ + CompleteTime: &completeTime, + Result: attrs.GetResult(), + }); err != nil { + return err + } + wf.removeNexusOperation(attrs.GetScheduledEventId()) + return nil +} + +func (d CompletedEventDefinition) CherryPick(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent, excludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) error { + if _, ok := excludeTypes[enumspb.RESET_REAPPLY_EXCLUDE_TYPE_NEXUS]; ok { + return ErrEventNotCherryPickable + } + return d.Apply(ctx, wf, event) +} + +// FailedEventDefinition handles the NexusOperationFailed history event. +type FailedEventDefinition struct { +} + +func (d FailedEventDefinition) IsWorkflowTaskTrigger() bool { + return true +} + +func (d FailedEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_FAILED +} + +func (d FailedEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationFailedEventAttributes() + field, ok := wf.Operations[attrs.GetScheduledEventId()] + if !ok { + return serviceerror.NewNotFoundf("nexus operation not found for scheduled event ID %d", attrs.GetScheduledEventId()) + } + op := field.Get(ctx) + + completeTime := event.GetEventTime().AsTime() + if err := nexusoperation.TransitionFailed.Apply(op, ctx, nexusoperation.EventFailed{ + CompleteTime: &completeTime, + Failure: attrs.GetFailure().GetCause(), + }); err != nil { + return err + } + wf.removeNexusOperation(attrs.GetScheduledEventId()) + return nil +} + +func (d FailedEventDefinition) CherryPick(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent, excludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) error { + if _, ok := excludeTypes[enumspb.RESET_REAPPLY_EXCLUDE_TYPE_NEXUS]; ok { + return ErrEventNotCherryPickable + } + return d.Apply(ctx, wf, event) +} + +// CanceledEventDefinition handles the NexusOperationCanceled history event. +type CanceledEventDefinition struct { +} + +func (d CanceledEventDefinition) IsWorkflowTaskTrigger() bool { + return true +} + +func (d CanceledEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_CANCELED +} + +func (d CanceledEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationCanceledEventAttributes() + field, ok := wf.Operations[attrs.GetScheduledEventId()] + if !ok { + return serviceerror.NewNotFoundf("nexus operation not found for scheduled event ID %d", attrs.GetScheduledEventId()) + } + op := field.Get(ctx) + + completeTime := event.GetEventTime().AsTime() + if err := nexusoperation.TransitionCanceled.Apply(op, ctx, nexusoperation.EventCanceled{ + CompleteTime: &completeTime, + Failure: attrs.GetFailure().GetCause(), + }); err != nil { + return err + } + wf.removeNexusOperation(attrs.GetScheduledEventId()) + return nil +} + +func (d CanceledEventDefinition) CherryPick(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent, excludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) error { + if _, ok := excludeTypes[enumspb.RESET_REAPPLY_EXCLUDE_TYPE_NEXUS]; ok { + return ErrEventNotCherryPickable + } + return d.Apply(ctx, wf, event) +} + +// TimedOutEventDefinition handles the NexusOperationTimedOut history event. +type TimedOutEventDefinition struct { +} + +func (d TimedOutEventDefinition) IsWorkflowTaskTrigger() bool { + return true +} + +func (d TimedOutEventDefinition) Type() enumspb.EventType { + return enumspb.EVENT_TYPE_NEXUS_OPERATION_TIMED_OUT +} + +func (d TimedOutEventDefinition) Apply(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent) error { + attrs := event.GetNexusOperationTimedOutEventAttributes() + field, ok := wf.Operations[attrs.GetScheduledEventId()] + if !ok { + return serviceerror.NewNotFoundf("nexus operation not found for scheduled event ID %d", attrs.GetScheduledEventId()) + } + op := field.Get(ctx) + + if err := nexusoperation.TransitionTimedOut.Apply(op, ctx, nexusoperation.EventTimedOut{ + Failure: attrs.GetFailure().GetCause(), + }); err != nil { + return err + } + wf.removeNexusOperation(attrs.GetScheduledEventId()) + return nil +} + +func (d TimedOutEventDefinition) CherryPick(ctx chasm.MutableContext, wf *Workflow, event *historypb.HistoryEvent, excludeTypes map[enumspb.ResetReapplyExcludeType]struct{}) error { + if _, ok := excludeTypes[enumspb.RESET_REAPPLY_EXCLUDE_TYPE_NEXUS]; ok { + return ErrEventNotCherryPickable + } + return d.Apply(ctx, wf, event) +} diff --git a/chasm/lib/workflow/nexus_events_test.go b/chasm/lib/workflow/nexus_events_test.go new file mode 100644 index 00000000000..b16e55c164b --- /dev/null +++ b/chasm/lib/workflow/nexus_events_test.go @@ -0,0 +1,453 @@ +package workflow + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + commandpb "go.temporal.io/api/command/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + historypb "go.temporal.io/api/history/v1" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +// scheduleOperation is a helper that schedules a nexus operation via the command handler +// and returns the scheduled event and its scheduled event ID (used as the operation key). +func scheduleOperation(t *testing.T, tcx testContext) (*historypb.HistoryEvent, int64) { + t.Helper() + err := tcx.scheduleHandler(tcx.chasmCtx, tcx.wf, commandValidator{maxPayloadSize: 1}, &commandpb.Command{ + Attributes: &commandpb.Command_ScheduleNexusOperationCommandAttributes{ + ScheduleNexusOperationCommandAttributes: &commandpb.ScheduleNexusOperationCommandAttributes{ + Endpoint: "endpoint", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: durationpb.New(time.Hour), + }, + }, + }, CommandHandlerOptions{WorkflowTaskCompletedEventID: 1}) + require.NoError(t, err) + require.NotEmpty(t, tcx.history.Events) + event := tcx.history.Events[len(tcx.history.Events)-1] + return event, event.EventId +} + +func applyStartedEvent(t *testing.T, tcx testContext, scheduledEventID int64, eventTime time.Time) { + t.Helper() + applyEventDefinition[StartedEventDefinition](t, tcx, &historypb.HistoryEvent{ + EventTime: timestamppb.New(eventTime), + Attributes: &historypb.HistoryEvent_NexusOperationStartedEventAttributes{ + NexusOperationStartedEventAttributes: &historypb.NexusOperationStartedEventAttributes{ + ScheduledEventId: scheduledEventID, + OperationToken: "token", + }, + }, + }) +} + +func applyEventDefinition[D EventDefinition]( + t *testing.T, + tcx testContext, + event *historypb.HistoryEvent, +) { + t.Helper() + def, ok := eventDefinitionByGoType[D](tcx.registry) + require.True(t, ok) + err := def.Apply(tcx.chasmCtx, tcx.wf, event) + require.NoError(t, err) +} + +func assertTerminalEventApplied( + t *testing.T, + tcx testContext, + key int64, + op *nexusoperationpb.OperationState, + expectedStatus nexusoperationpb.OperationStatus, +) { + t.Helper() + require.Equal(t, expectedStatus, op.GetStatus()) + _, ok := tcx.wf.Operations[key] + require.False(t, ok, "operation should be removed after terminal event") +} + +func TestCherryPick(t *testing.T) { + t.Run("should exclude nexus events", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + event, _ := scheduleOperation(t, tcx) + + nexusEventDefs := []EventDefinition{ + ScheduledEventDefinition{}, + StartedEventDefinition{}, + CompletedEventDefinition{}, + CancelRequestedEventDefinition{}, + CancelRequestCompletedEventDefinition{}, + CancelRequestFailedEventDefinition{}, + CanceledEventDefinition{}, + FailedEventDefinition{}, + TimedOutEventDefinition{}, + } + + excludeNexus := map[enumspb.ResetReapplyExcludeType]struct{}{ + enumspb.RESET_REAPPLY_EXCLUDE_TYPE_NEXUS: {}, + } + for _, def := range nexusEventDefs { + err := def.CherryPick(tcx.chasmCtx, tcx.wf, event, excludeNexus) + require.ErrorIs(t, err, ErrEventNotCherryPickable, + "%T should not be cherry-pickable when RESET_REAPPLY_EXCLUDE_TYPE_NEXUS is set", def) + } + }) + + t.Run("scheduled is never cherry-pickable", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + event, _ := scheduleOperation(t, tcx) + + def := ScheduledEventDefinition{} + err := def.CherryPick(tcx.chasmCtx, tcx.wf, event, nil) + require.ErrorIs(t, err, ErrEventNotCherryPickable) + }) + + t.Run("cancel requested is never cherry-pickable", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + event, _ := scheduleOperation(t, tcx) + + def := CancelRequestedEventDefinition{} + err := def.CherryPick(tcx.chasmCtx, tcx.wf, event, nil) + require.ErrorIs(t, err, ErrEventNotCherryPickable) + }) + + t.Run("started cherry-pick applies", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + event, _ := scheduleOperation(t, tcx) + + def := StartedEventDefinition{} + err := def.CherryPick(tcx.chasmCtx, tcx.wf, &historypb.HistoryEvent{ + EventTime: timestamppb.Now(), + Attributes: &historypb.HistoryEvent_NexusOperationStartedEventAttributes{ + NexusOperationStartedEventAttributes: &historypb.NexusOperationStartedEventAttributes{ + ScheduledEventId: event.EventId, + }, + }, + }, nil) + require.NoError(t, err) + }) + + t.Run("started double apply fails", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + event, _ := scheduleOperation(t, tcx) + + def := StartedEventDefinition{} + startedEvent := &historypb.HistoryEvent{ + EventTime: timestamppb.Now(), + Attributes: &historypb.HistoryEvent_NexusOperationStartedEventAttributes{ + NexusOperationStartedEventAttributes: &historypb.NexusOperationStartedEventAttributes{ + ScheduledEventId: event.EventId, + }, + }, + } + err := def.CherryPick(tcx.chasmCtx, tcx.wf, startedEvent, nil) + require.NoError(t, err) + // Second apply should fail — operation is already in STARTED state. + err = def.CherryPick(tcx.chasmCtx, tcx.wf, startedEvent, nil) + require.Error(t, err) + }) +} + +func TestCompletedEventDefinitionApply(t *testing.T) { + eventTime := time.Now().UTC() + buildEvent := func(scheduledEventID int64) *historypb.HistoryEvent { + return &historypb.HistoryEvent{ + EventTime: timestamppb.New(eventTime), + Attributes: &historypb.HistoryEvent_NexusOperationCompletedEventAttributes{ + NexusOperationCompletedEventAttributes: &historypb.NexusOperationCompletedEventAttributes{ + ScheduledEventId: scheduledEventID, + }, + }, + } + } + + t.Run("without started event", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, key := scheduleOperation(t, tcx) + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + // no start event + applyEventDefinition[CompletedEventDefinition](t, tcx, buildEvent(scheduledEvent.EventId)) + assertTerminalEventApplied(t, tcx, key, op.OperationState, nexusoperationpb.OPERATION_STATUS_SUCCEEDED) + require.Equal(t, eventTime, op.GetClosedTime().AsTime()) + }) + + t.Run("with started event", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, key := scheduleOperation(t, tcx) + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + applyStartedEvent(t, tcx, scheduledEvent.EventId, eventTime) // add start event firsts + applyEventDefinition[CompletedEventDefinition](t, tcx, buildEvent(scheduledEvent.EventId)) + assertTerminalEventApplied(t, tcx, key, op.OperationState, nexusoperationpb.OPERATION_STATUS_SUCCEEDED) + require.Equal(t, eventTime, op.GetClosedTime().AsTime()) + }) +} + +func TestFailedEventDefinitionApply(t *testing.T) { + eventTime := time.Now().UTC() + buildEvent := func(scheduledEventID int64) *historypb.HistoryEvent { + return &historypb.HistoryEvent{ + EventTime: timestamppb.New(eventTime), + Attributes: &historypb.HistoryEvent_NexusOperationFailedEventAttributes{ + NexusOperationFailedEventAttributes: &historypb.NexusOperationFailedEventAttributes{ + ScheduledEventId: scheduledEventID, + Failure: &failurepb.Failure{ + Message: "nexus operation failed", + Cause: &failurepb.Failure{Message: "operation failed"}, + }, + }, + }, + } + } + + t.Run("without started event", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, key := scheduleOperation(t, tcx) + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + // no start event + applyEventDefinition[FailedEventDefinition](t, tcx, buildEvent(scheduledEvent.EventId)) + assertTerminalEventApplied(t, tcx, key, op.OperationState, nexusoperationpb.OPERATION_STATUS_FAILED) + require.Equal(t, eventTime, op.GetClosedTime().AsTime()) + }) + + t.Run("with started event", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, key := scheduleOperation(t, tcx) + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + applyStartedEvent(t, tcx, scheduledEvent.EventId, eventTime) // add start event first + applyEventDefinition[FailedEventDefinition](t, tcx, buildEvent(scheduledEvent.EventId)) + assertTerminalEventApplied(t, tcx, key, op.OperationState, nexusoperationpb.OPERATION_STATUS_FAILED) + require.Equal(t, eventTime, op.GetClosedTime().AsTime()) + }) +} + +func TestCanceledEventDefinitionApply(t *testing.T) { + eventTime := time.Now().UTC() + buildEvent := func(scheduledEventID int64) *historypb.HistoryEvent { + return &historypb.HistoryEvent{ + EventTime: timestamppb.New(eventTime), + Attributes: &historypb.HistoryEvent_NexusOperationCanceledEventAttributes{ + NexusOperationCanceledEventAttributes: &historypb.NexusOperationCanceledEventAttributes{ + ScheduledEventId: scheduledEventID, + Failure: &failurepb.Failure{ + Message: "nexus operation canceled", + Cause: &failurepb.Failure{Message: "operation canceled"}, + }, + }, + }, + } + } + + t.Run("without started event", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, key := scheduleOperation(t, tcx) + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + // no start event + applyEventDefinition[CanceledEventDefinition](t, tcx, buildEvent(scheduledEvent.EventId)) + assertTerminalEventApplied(t, tcx, key, op.OperationState, nexusoperationpb.OPERATION_STATUS_CANCELED) + require.Equal(t, eventTime, op.GetClosedTime().AsTime()) + }) + + t.Run("with started event", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, key := scheduleOperation(t, tcx) + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + applyStartedEvent(t, tcx, scheduledEvent.EventId, eventTime) // add start event first + applyEventDefinition[CanceledEventDefinition](t, tcx, buildEvent(scheduledEvent.EventId)) + assertTerminalEventApplied(t, tcx, key, op.OperationState, nexusoperationpb.OPERATION_STATUS_CANCELED) + require.Equal(t, eventTime, op.GetClosedTime().AsTime()) + }) +} + +func TestTimedOutEventDefinitionApply(t *testing.T) { + eventTime := time.Now().UTC() + buildEvent := func(scheduledEventID int64) *historypb.HistoryEvent { + return &historypb.HistoryEvent{ + EventTime: timestamppb.New(eventTime), + Attributes: &historypb.HistoryEvent_NexusOperationTimedOutEventAttributes{ + NexusOperationTimedOutEventAttributes: &historypb.NexusOperationTimedOutEventAttributes{ + ScheduledEventId: scheduledEventID, + Failure: &failurepb.Failure{ + Message: "nexus operation timed out", + Cause: &failurepb.Failure{Message: "operation timed out"}, + }, + }, + }, + } + } + + t.Run("without started event", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, key := scheduleOperation(t, tcx) + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + // no start event + applyEventDefinition[TimedOutEventDefinition](t, tcx, buildEvent(scheduledEvent.EventId)) + assertTerminalEventApplied(t, tcx, key, op.OperationState, nexusoperationpb.OPERATION_STATUS_TIMED_OUT) + }) + + t.Run("with started event", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + scheduledEvent, key := scheduleOperation(t, tcx) + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + applyStartedEvent(t, tcx, scheduledEvent.EventId, eventTime) // add start event first + applyEventDefinition[TimedOutEventDefinition](t, tcx, buildEvent(scheduledEvent.EventId)) + assertTerminalEventApplied(t, tcx, key, op.OperationState, nexusoperationpb.OPERATION_STATUS_TIMED_OUT) + }) +} + +func TestScheduledEventDefinitionApply(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + + event := &historypb.HistoryEvent{ + EventId: int64(10), + EventTime: timestamppb.Now(), + Attributes: &historypb.HistoryEvent_NexusOperationScheduledEventAttributes{ + NexusOperationScheduledEventAttributes: &historypb.NexusOperationScheduledEventAttributes{ + Endpoint: "endpoint", + EndpointId: "endpoint-id", + Service: "service", + Operation: "op", + ScheduleToCloseTimeout: durationpb.New(time.Hour), + RequestId: "request-id", + WorkflowTaskCompletedEventId: 1, + }, + }, + } + + applyEventDefinition[ScheduledEventDefinition](t, tcx, event) + + field, ok := tcx.wf.Operations[event.EventId] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + require.Equal(t, "endpoint", op.GetEndpoint()) + require.Equal(t, "endpoint-id", op.GetEndpointId()) + require.Equal(t, "service", op.GetService()) + require.Equal(t, "op", op.GetOperation()) + require.Equal(t, "request-id", op.GetRequestId()) + require.Equal(t, int32(1), op.GetAttempt()) +} + +func TestStartedEventDefinitionApply(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + event, key := scheduleOperation(t, tcx) + startTime := time.Now().UTC() + + applyEventDefinition[StartedEventDefinition](t, tcx, &historypb.HistoryEvent{ + EventTime: timestamppb.New(startTime), + Attributes: &historypb.HistoryEvent_NexusOperationStartedEventAttributes{ + NexusOperationStartedEventAttributes: &historypb.NexusOperationStartedEventAttributes{ + ScheduledEventId: event.EventId, + OperationToken: "test-token", + }, + }, + }) + + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + require.Equal(t, nexusoperationpb.OPERATION_STATUS_STARTED, op.Status) + require.Equal(t, "test-token", op.GetOperationToken()) + require.Equal(t, startTime, op.GetStartedTime().AsTime()) +} + +func TestCancelRequestedEventDefinitionApply(t *testing.T) { + t.Run("creates cancellation child", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + event, key := scheduleOperation(t, tcx) + + applyEventDefinition[CancelRequestedEventDefinition](t, tcx, &historypb.HistoryEvent{ + EventId: int64(20), + EventTime: timestamppb.Now(), + Attributes: &historypb.HistoryEvent_NexusOperationCancelRequestedEventAttributes{ + NexusOperationCancelRequestedEventAttributes: &historypb.NexusOperationCancelRequestedEventAttributes{ + ScheduledEventId: event.EventId, + }, + }, + }) + + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + _, hasCancellation := op.Cancellation.TryGet(tcx.chasmCtx) + require.True(t, hasCancellation) + }) + + t.Run("tolerates missing operation", func(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + + applyEventDefinition[CancelRequestedEventDefinition](t, tcx, &historypb.HistoryEvent{ + EventId: int64(20), + EventTime: timestamppb.Now(), + Attributes: &historypb.HistoryEvent_NexusOperationCancelRequestedEventAttributes{ + NexusOperationCancelRequestedEventAttributes: &historypb.NexusOperationCancelRequestedEventAttributes{ + ScheduledEventId: 999, // non-existent + }, + }, + }) + }) +} + +func TestCancelRequestCompletedEventDefinitionApply(t *testing.T) { + tcx := newTestContext(t, defaultConfig) + event, key := scheduleOperation(t, tcx) + + // First, request cancellation. + applyEventDefinition[CancelRequestedEventDefinition](t, tcx, &historypb.HistoryEvent{ + EventId: int64(20), + EventTime: timestamppb.Now(), + Attributes: &historypb.HistoryEvent_NexusOperationCancelRequestedEventAttributes{ + NexusOperationCancelRequestedEventAttributes: &historypb.NexusOperationCancelRequestedEventAttributes{ + ScheduledEventId: event.EventId, + }, + }, + }) + + // Transition the operation to STARTED so the cancellation gets scheduled. + applyEventDefinition[StartedEventDefinition](t, tcx, &historypb.HistoryEvent{ + EventTime: timestamppb.Now(), + Attributes: &historypb.HistoryEvent_NexusOperationStartedEventAttributes{ + NexusOperationStartedEventAttributes: &historypb.NexusOperationStartedEventAttributes{ + ScheduledEventId: event.EventId, + OperationToken: "token", + }, + }, + }) + + // Now complete the cancel request. + applyEventDefinition[CancelRequestCompletedEventDefinition](t, tcx, &historypb.HistoryEvent{ + EventTime: timestamppb.Now(), + Attributes: &historypb.HistoryEvent_NexusOperationCancelRequestCompletedEventAttributes{ + NexusOperationCancelRequestCompletedEventAttributes: &historypb.NexusOperationCancelRequestCompletedEventAttributes{ + ScheduledEventId: event.EventId, + }, + }, + }) + + field, ok := tcx.wf.Operations[key] + require.True(t, ok) + op := field.Get(tcx.chasmCtx) + cancellation, hasCancellation := op.Cancellation.TryGet(tcx.chasmCtx) + require.True(t, hasCancellation) + require.Equal(t, nexusoperationpb.CANCELLATION_STATUS_SUCCEEDED, cancellation.StateMachineState()) +} diff --git a/chasm/lib/workflow/nexus_library.go b/chasm/lib/workflow/nexus_library.go new file mode 100644 index 00000000000..6f179729f96 --- /dev/null +++ b/chasm/lib/workflow/nexus_library.go @@ -0,0 +1,38 @@ +package workflow + +import ( + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation" +) + +type nexusLibrary struct { + config *nexusoperation.Config + nexusProcessor *chasm.NexusEndpointProcessor +} + +func newNexusLibrary(config *nexusoperation.Config, nexusProcessor *chasm.NexusEndpointProcessor) *nexusLibrary { + return &nexusLibrary{config: config, nexusProcessor: nexusProcessor} +} + +func (l *nexusLibrary) CommandHandlers() map[enumspb.CommandType]CommandHandler { + h := &nexusCommandHandler{config: l.config, nexusProcessor: l.nexusProcessor} + return map[enumspb.CommandType]CommandHandler{ + enumspb.COMMAND_TYPE_SCHEDULE_NEXUS_OPERATION: h.handleScheduleCommand, + enumspb.COMMAND_TYPE_REQUEST_CANCEL_NEXUS_OPERATION: h.handleCancelCommand, + } +} + +func (l *nexusLibrary) EventDefinitions() []EventDefinition { + return []EventDefinition{ + ScheduledEventDefinition{}, + CancelRequestedEventDefinition{}, + CancelRequestCompletedEventDefinition{}, + CancelRequestFailedEventDefinition{}, + StartedEventDefinition{}, + CompletedEventDefinition{}, + FailedEventDefinition{}, + CanceledEventDefinition{}, + TimedOutEventDefinition{}, + } +} diff --git a/chasm/lib/workflow/nexus_methods.go b/chasm/lib/workflow/nexus_methods.go new file mode 100644 index 00000000000..e20ca619d21 --- /dev/null +++ b/chasm/lib/workflow/nexus_methods.go @@ -0,0 +1,360 @@ +package workflow + +import ( + "time" + + "github.com/nexus-rpc/sdk-go/nexus" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + failurepb "go.temporal.io/api/failure/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + workflowpb "go.temporal.io/api/workflow/v1" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/nexusoperation" + nexusoperationpb "go.temporal.io/server/chasm/lib/nexusoperation/gen/nexusoperationpb/v1" + chasmworkflowpb "go.temporal.io/server/chasm/lib/workflow/gen/workflowpb/v1" + commonnexus "go.temporal.io/server/common/nexus" + "go.temporal.io/server/common/nexus/nexusrpc" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var _ nexusoperation.OperationStore = (*Workflow)(nil) + +// addNexusOperation adds a Nexus operation component to the workflow. +func (w *Workflow) addNexusOperation( + ctx chasm.MutableContext, + key int64, + op *nexusoperation.Operation, +) { + if w.Operations == nil { + w.Operations = make(chasm.Map[int64, *nexusoperation.Operation]) + } + w.Operations[key] = chasm.NewComponentField(ctx, op) +} + +// removeNexusOperation removes a Nexus operation from the workflow. +func (w *Workflow) removeNexusOperation(key int64) { + delete(w.Operations, key) +} + +// pendingNexusOperationCount returns the number of pending Nexus operations in the workflow. +func (w *Workflow) pendingNexusOperationCount() int { + return len(w.Operations) +} + +// OnNexusOperationStarted adds a NexusOperationStarted history event to the workflow and applies +// the corresponding event definition. +func (w *Workflow) OnNexusOperationStarted( + ctx chasm.MutableContext, + op *nexusoperation.Operation, + operationToken string, + startTime *time.Time, + links []*commonpb.Link, +) error { + parentData := &chasmworkflowpb.NexusOperationParentData{} + if err := op.GetParentData().UnmarshalTo(parentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus operation parent data: %v", err) + } + + _, err := addAndApplyHistoryEvent[StartedEventDefinition](w, ctx, func(e *historypb.HistoryEvent) { + e.Attributes = &historypb.HistoryEvent_NexusOperationStartedEventAttributes{ + NexusOperationStartedEventAttributes: &historypb.NexusOperationStartedEventAttributes{ + ScheduledEventId: parentData.GetScheduledEventId(), + OperationToken: operationToken, + RequestId: op.GetRequestId(), + }, + } + e.Links = links + if startTime != nil { + // For completion-before-start, use the callback-provided start time for the synthetic started event. + e.EventTime = timestamppb.New(*startTime) + } + }) + return err +} + +// OnNexusOperationCanceled adds a NexusOperationCanceled history event to the workflow and applies +// the corresponding event definition. +func (w *Workflow) OnNexusOperationCanceled( + ctx chasm.MutableContext, + op *nexusoperation.Operation, + cause *failurepb.Failure, +) error { + parentData := &chasmworkflowpb.NexusOperationParentData{} + if err := op.GetParentData().UnmarshalTo(parentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus operation parent data: %v", err) + } + + scheduledEventID := parentData.GetScheduledEventId() + _, err := addAndApplyHistoryEvent[CanceledEventDefinition](w, ctx, func(e *historypb.HistoryEvent) { + e.Attributes = &historypb.HistoryEvent_NexusOperationCanceledEventAttributes{ + NexusOperationCanceledEventAttributes: &historypb.NexusOperationCanceledEventAttributes{ + ScheduledEventId: scheduledEventID, + RequestId: op.GetRequestId(), + Failure: createNexusOperationFailure(op, scheduledEventID, cause), + }, + } + }) + return err +} + +// OnNexusOperationFailed adds a NexusOperationFailed history event to the workflow and applies +// the corresponding event definition. +func (w *Workflow) OnNexusOperationFailed( + ctx chasm.MutableContext, + op *nexusoperation.Operation, + cause *failurepb.Failure, +) error { + parentData := &chasmworkflowpb.NexusOperationParentData{} + if err := op.GetParentData().UnmarshalTo(parentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus operation parent data: %v", err) + } + + scheduledEventID := parentData.GetScheduledEventId() + _, err := addAndApplyHistoryEvent[FailedEventDefinition](w, ctx, func(e *historypb.HistoryEvent) { + e.Attributes = &historypb.HistoryEvent_NexusOperationFailedEventAttributes{ + NexusOperationFailedEventAttributes: &historypb.NexusOperationFailedEventAttributes{ + ScheduledEventId: scheduledEventID, + RequestId: op.GetRequestId(), + Failure: createNexusOperationFailure(op, scheduledEventID, cause), + }, + } + }) + return err +} + +// OnNexusOperationCompleted adds a NexusOperationCompleted history event to the workflow and applies +// the corresponding event definition. +func (w *Workflow) OnNexusOperationCompleted( + ctx chasm.MutableContext, + op *nexusoperation.Operation, + result *commonpb.Payload, + links []*commonpb.Link, +) error { + parentData := &chasmworkflowpb.NexusOperationParentData{} + if err := op.GetParentData().UnmarshalTo(parentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus operation parent data: %v", err) + } + + _, err := addAndApplyHistoryEvent[CompletedEventDefinition](w, ctx, func(e *historypb.HistoryEvent) { + e.Attributes = &historypb.HistoryEvent_NexusOperationCompletedEventAttributes{ + NexusOperationCompletedEventAttributes: &historypb.NexusOperationCompletedEventAttributes{ + ScheduledEventId: parentData.GetScheduledEventId(), + RequestId: op.GetRequestId(), + Result: result, + }, + } + e.Links = links + }) + return err +} + +// OnNexusOperationTimedOut adds a NexusOperationTimedOut history event to the workflow and applies +// the corresponding event definition. +func (w *Workflow) OnNexusOperationTimedOut( + ctx chasm.MutableContext, + op *nexusoperation.Operation, + cause *failurepb.Failure, + _ bool, +) error { + parentData := &chasmworkflowpb.NexusOperationParentData{} + if err := op.GetParentData().UnmarshalTo(parentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus operation parent data: %v", err) + } + + scheduledEventID := parentData.GetScheduledEventId() + _, err := addAndApplyHistoryEvent[TimedOutEventDefinition](w, ctx, func(e *historypb.HistoryEvent) { + e.Attributes = &historypb.HistoryEvent_NexusOperationTimedOutEventAttributes{ + NexusOperationTimedOutEventAttributes: &historypb.NexusOperationTimedOutEventAttributes{ + ScheduledEventId: scheduledEventID, + RequestId: op.GetRequestId(), + Failure: createNexusOperationFailure(op, scheduledEventID, cause), + }, + } + }) + return err +} + +func (w *Workflow) OnNexusOperationCancellationCompleted(ctx chasm.MutableContext, op *nexusoperation.Operation) error { + parentData := &chasmworkflowpb.NexusOperationParentData{} + if err := op.GetParentData().UnmarshalTo(parentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus operation parent data: %v", err) + } + + cancelParentData := &chasmworkflowpb.NexusCancellationParentData{} + if err := op.Cancellation.Get(ctx).GetParentData().UnmarshalTo(cancelParentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus cancellation parent data: %v", err) + } + + _, err := addAndApplyHistoryEvent[CancelRequestCompletedEventDefinition](w, ctx, func(e *historypb.HistoryEvent) { + e.Attributes = &historypb.HistoryEvent_NexusOperationCancelRequestCompletedEventAttributes{ + NexusOperationCancelRequestCompletedEventAttributes: &historypb.NexusOperationCancelRequestCompletedEventAttributes{ + ScheduledEventId: parentData.GetScheduledEventId(), + RequestedEventId: cancelParentData.GetRequestedEventId(), + }, + } + // nolint:revive // We must mutate here even if the linter doesn't like it. + e.WorkerMayIgnore = true // For compatibility with older SDKs. + }) + return err +} + +func (w *Workflow) OnNexusOperationCancellationFailed(ctx chasm.MutableContext, op *nexusoperation.Operation, failure *failurepb.Failure) error { + parentData := &chasmworkflowpb.NexusOperationParentData{} + if err := op.GetParentData().UnmarshalTo(parentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus operation parent data: %v", err) + } + + cancelParentData := &chasmworkflowpb.NexusCancellationParentData{} + if err := op.Cancellation.Get(ctx).GetParentData().UnmarshalTo(cancelParentData); err != nil { + return serviceerror.NewInternalf("failed to unmarshal nexus cancellation parent data: %v", err) + } + + _, err := addAndApplyHistoryEvent[CancelRequestFailedEventDefinition](w, ctx, func(e *historypb.HistoryEvent) { + e.Attributes = &historypb.HistoryEvent_NexusOperationCancelRequestFailedEventAttributes{ + NexusOperationCancelRequestFailedEventAttributes: &historypb.NexusOperationCancelRequestFailedEventAttributes{ + ScheduledEventId: parentData.GetScheduledEventId(), + RequestedEventId: cancelParentData.GetRequestedEventId(), + Failure: failure, + }, + } + // nolint:revive // We must mutate here even if the linter doesn't like it. + e.WorkerMayIgnore = true // For compatibility with older SDKs. + }) + return err +} + +// NexusOperationInvocationData loads invocation data from the scheduled history event. +func (w *Workflow) NexusOperationInvocationData( + ctx chasm.Context, + op *nexusoperation.Operation, +) (nexusoperation.InvocationData, error) { + parentData := &chasmworkflowpb.NexusOperationParentData{} + if err := op.GetParentData().UnmarshalTo(parentData); err != nil { + return nexusoperation.InvocationData{}, serviceerror.NewInternalf( + "failed to unmarshal nexus operation parent data: %v", err, + ) + } + + event, err := w.LoadHistoryEvent(ctx, parentData.GetScheduledEventToken()) + if err != nil { + return nexusoperation.InvocationData{}, err + } + + attrs := event.GetNexusOperationScheduledEventAttributes() + execKey := ctx.ExecutionKey() + nsEntry := ctx.NamespaceEntry() + + nexusLink := commonnexus.ConvertLinkWorkflowEventToNexusLink(&commonpb.Link_WorkflowEvent{ + Namespace: nsEntry.Name().String(), + WorkflowId: execKey.BusinessID, + RunId: execKey.RunID, + Reference: &commonpb.Link_WorkflowEvent_EventRef{ + EventRef: &commonpb.Link_WorkflowEvent_EventReference{ + EventId: event.GetEventId(), + EventType: event.GetEventType(), + }, + }, + }) + + return nexusoperation.InvocationData{ + Input: attrs.GetInput(), + Header: attrs.GetNexusHeader(), + NexusLinks: []nexus.Link{nexusLink}, + }, nil +} + +func (w *Workflow) GetNexusCompletion( + ctx chasm.Context, + requestID string, +) (nexusrpc.CompleteOperationOptions, error) { + // Retrieve the completion data from the underlying mutable state via MSPointer + return w.MSPointer.GetNexusCompletion(ctx, requestID) +} + +// BuildPendingNexusOperationInfos reads nexus operations from the workflow and converts them to API format. +func (w *Workflow) BuildPendingNexusOperationInfos( + ctx chasm.Context, + circuitBreaker func(endpoint string) bool, +) ([]*workflowpb.PendingNexusOperationInfo, error) { + var result []*workflowpb.PendingNexusOperationInfo + for key, field := range w.Operations { + op := field.Get(ctx) + + if op.GetStatus() == nexusoperationpb.OPERATION_STATUS_UNSPECIFIED { + return nil, serviceerror.NewInternal("Nexus operation with UNSPECIFIED state") + } + + state := nexusoperation.PendingOperationState(op.GetStatus()) + if state == enumspb.PENDING_NEXUS_OPERATION_STATE_UNSPECIFIED { + // Operation is not pending. + continue + } + + blockedReason := "" + if state == enumspb.PENDING_NEXUS_OPERATION_STATE_SCHEDULED && circuitBreaker(op.GetEndpoint()) { + state = enumspb.PENDING_NEXUS_OPERATION_STATE_BLOCKED + blockedReason = "The circuit breaker is open." + } + + info := &workflowpb.PendingNexusOperationInfo{ + Endpoint: op.GetEndpoint(), + Service: op.GetService(), + Operation: op.GetOperation(), + OperationId: op.GetOperationToken(), + OperationToken: op.GetOperationToken(), + ScheduledEventId: key, + ScheduleToCloseTimeout: op.GetScheduleToCloseTimeout(), + ScheduleToStartTimeout: op.GetScheduleToStartTimeout(), + StartToCloseTimeout: op.GetStartToCloseTimeout(), + ScheduledTime: op.GetScheduledTime(), + State: state, + Attempt: op.GetAttempt(), + LastAttemptCompleteTime: op.GetLastAttemptCompleteTime(), + LastAttemptFailure: op.GetLastAttemptFailure(), + NextAttemptScheduleTime: op.GetNextAttemptScheduleTime(), + BlockedReason: blockedReason, + } + + if cancel, ok := op.Cancellation.TryGet(ctx); ok { + state := nexusoperation.CancellationAPIState(cancel.Status) + blockedReason := "" + + if state == enumspb.NEXUS_OPERATION_CANCELLATION_STATE_SCHEDULED && circuitBreaker(info.Endpoint) { + state = enumspb.NEXUS_OPERATION_CANCELLATION_STATE_BLOCKED + blockedReason = "The circuit breaker is open." + } + + info.CancellationInfo = &workflowpb.NexusOperationCancellationInfo{ + RequestedTime: cancel.RequestedTime, + State: state, + Attempt: cancel.Attempt, + LastAttemptCompleteTime: cancel.LastAttemptCompleteTime, + LastAttemptFailure: cancel.LastAttemptFailure, + NextAttemptScheduleTime: cancel.NextAttemptScheduleTime, + BlockedReason: blockedReason, + } + } + + result = append(result, info) + } + return result, nil +} + +// createNexusOperationFailure creates a NexusOperationExecutionFailure wrapping the given cause. +func createNexusOperationFailure(op *nexusoperation.Operation, scheduledEventID int64, cause *failurepb.Failure) *failurepb.Failure { + return &failurepb.Failure{ + Message: "nexus operation completed unsuccessfully", + FailureInfo: &failurepb.Failure_NexusOperationExecutionFailureInfo{ + NexusOperationExecutionFailureInfo: &failurepb.NexusOperationFailureInfo{ + Endpoint: op.GetEndpoint(), + Service: op.GetService(), + Operation: op.GetOperation(), + OperationToken: op.GetOperationToken(), + ScheduledEventId: scheduledEventID, + }, + }, + Cause: cause, + } +} diff --git a/chasm/lib/workflow/proto/v1/state.proto b/chasm/lib/workflow/proto/v1/state.proto new file mode 100644 index 00000000000..0ff01d116b5 --- /dev/null +++ b/chasm/lib/workflow/proto/v1/state.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package temporal.server.chasm.lib.workflow.proto.v1; + +option go_package = "go.temporal.io/server/chasm/lib/workflow/gen/workflowpb;workflowpb"; + +// NexusOperationParentData contains workflow-specific data stored in a nexus operation's +// parent_data field when the operation is embedded in a workflow. +message NexusOperationParentData { + // Event ID of the NEXUS_OPERATION_SCHEDULED event. + int64 scheduled_event_id = 1; + // Token for loading the NEXUS_OPERATION_SCHEDULED event. + bytes scheduled_event_token = 2; +} + +// NexusCancellationParentData contains workflow-specific data stored in a nexus cancellation's +// parent_data field when the operation is embedded in a workflow. +message NexusCancellationParentData { + // Event ID of the NEXUS_OPERATION_CANCEL_REQUESTED event. + int64 requested_event_id = 1; +} + +// IncomingSignalData records the event associated with a signal's request ID, which allows +// DescribeWorkflow to resolve RequestIDRef signal backlinks. +message IncomingSignalData { + int64 event_id = 1; +} diff --git a/chasm/lib/workflow/registry.go b/chasm/lib/workflow/registry.go new file mode 100644 index 00000000000..ebe1c3c5aaa --- /dev/null +++ b/chasm/lib/workflow/registry.go @@ -0,0 +1,131 @@ +package workflow + +import ( + "errors" + "fmt" + "reflect" + + commandpb "go.temporal.io/api/command/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/chasm" +) + +// ErrDuplicateRegistration is returned by a [Registry] when it detects duplicate registration. +var ErrDuplicateRegistration = errors.New("duplicate registration") + +// Library is an interface for registering command handlers and event definitions with a [Registry]. +type Library interface { + CommandHandlers() map[enumspb.CommandType]CommandHandler + EventDefinitions() []EventDefinition +} + +// Registry maintains a the following mappings for a workflow: +// CommandType -> Handler +// EventType -> EventDefinition +type Registry struct { + commandHandlers map[enumspb.CommandType]CommandHandler + eventDefinitions map[enumspb.EventType]EventDefinition + eventDefinitionsByGoType map[reflect.Type]EventDefinition +} + +// NewRegistry creates a new [Registry]. +func NewRegistry() *Registry { + return &Registry{ + commandHandlers: make(map[enumspb.CommandType]CommandHandler), + eventDefinitions: make(map[enumspb.EventType]EventDefinition), + eventDefinitionsByGoType: make(map[reflect.Type]EventDefinition), + } +} + +// Register registers all command handlers and event definitions from a [Library]. +// Returns an [ErrDuplicateRegistration] if a handler or definition is already registered. +// All registration is expected to happen in a single thread on process initialization. +func (r *Registry) Register(lib Library) error { + for t, handler := range lib.CommandHandlers() { + if existing, ok := r.commandHandlers[t]; ok { + return fmt.Errorf("%w: command handler for %v: %v", ErrDuplicateRegistration, t, existing) + } + r.commandHandlers[t] = handler + } + for _, def := range lib.EventDefinitions() { + if existing, ok := r.eventDefinitions[def.Type()]; ok { + return fmt.Errorf("%w: event handler for %v: %v", ErrDuplicateRegistration, def.Type(), existing) + } + goType := reflect.TypeOf(def) + for goType.Kind() == reflect.Pointer { + goType = goType.Elem() + } + if existing, ok := r.eventDefinitionsByGoType[goType]; ok { + return fmt.Errorf("%w: event definition for Go type %v: %v", ErrDuplicateRegistration, goType, existing) + } + r.eventDefinitions[def.Type()] = def + r.eventDefinitionsByGoType[goType] = def + } + return nil +} + +// CommandHandler returns a [CommandHandler] for a given command type and a boolean indicating whether it was found. +func (r *Registry) CommandHandler(t enumspb.CommandType) (handler CommandHandler, ok bool) { + handler, ok = r.commandHandlers[t] + return +} + +// EventDefinitionByEventType returns an [EventDefinition] for a given event type and a boolean indicating whether it was found. +func (r *Registry) EventDefinitionByEventType(t enumspb.EventType) (EventDefinition, bool) { + def, ok := r.eventDefinitions[t] + return def, ok +} + +// eventDefinitionByGoType returns an [EventDefinition] for a given Go type and a boolean indicating whether it was found. +// Registration by Go type allows easy go-to-definition navigation in call sites. +func eventDefinitionByGoType[D EventDefinition](r *Registry) (D, bool) { + var zero D + goType := reflect.TypeFor[D]() + for goType.Kind() == reflect.Pointer { + goType = goType.Elem() + } + def, ok := r.eventDefinitionsByGoType[goType] + if !ok { + return zero, false + } + d, ok := def.(D) + if !ok { + // D is a struct but def was registered as a pointer; dereference. + d, ok = reflect.ValueOf(def).Elem().Interface().(D) + } + return d, ok +} + +// ErrCommandNotSupported is returned by a [CommandHandler] when the command type is registered but not supported; +// for example, because of a disabled feature flag. +var ErrCommandNotSupported = errors.New("command not supported") + +type CommandHandlerOptions struct { + WorkflowTaskCompletedEventID int64 +} + +// CommandHandler is a function for handling a workflow command as part of processing a RespondWorkflowTaskCompleted +// worker request. +type CommandHandler func( + chasmCtx chasm.MutableContext, + wf *Workflow, + validator Validator, + command *commandpb.Command, + opts CommandHandlerOptions, +) error + +// Validator is a helper for validating workflow commands. +type Validator interface { + // IsValidPayloadSize validates that a payload size is within the configured limits. + IsValidPayloadSize(size int) bool +} + +// FailWorkflowTaskError is an error that can be returned from a [CommandHandler] to fail the current workflow task and +// optionally terminate the entire workflow. +type FailWorkflowTaskError struct { + Cause enumspb.WorkflowTaskFailedCause + Message string + TerminateWorkflow bool +} + +func (e FailWorkflowTaskError) Error() string { return e.Message } diff --git a/chasm/lib/workflow/workflow.go b/chasm/lib/workflow/workflow.go new file mode 100644 index 00000000000..8e3148cc03a --- /dev/null +++ b/chasm/lib/workflow/workflow.go @@ -0,0 +1,193 @@ +package workflow + +import ( + "fmt" + + commonpb "go.temporal.io/api/common/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/chasm" + "go.temporal.io/server/chasm/lib/callback" + callbackspb "go.temporal.io/server/chasm/lib/callback/gen/callbackpb/v1" + "go.temporal.io/server/chasm/lib/nexusoperation" + chasmworkflowpb "go.temporal.io/server/chasm/lib/workflow/gen/workflowpb/v1" + "go.temporal.io/server/service/history/historybuilder" + "google.golang.org/protobuf/types/known/emptypb" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type Workflow struct { + chasm.UnimplementedComponent + + // For now, workflow state is managed by mutable_state_impl, not CHASM engine, leaving it empty as CHASM expects a + // state object. + *emptypb.Empty + + // MSPointer is a special in-memory field for accessing the underlying mutable state. + chasm.MSPointer + + // Callbacks map is used to store the callbacks for the workflow. + Callbacks chasm.Map[string, *callback.Callback] + + // Operations map is used to store the Nexus operations for the workflow, keyed by scheduled event ID. + Operations chasm.Map[int64, *nexusoperation.Operation] + + // IncomingSignals map is used to track incoming signals, keyed by request ID, + // to allow DescribeWorkflow to resolve RequestIDRef signal backlinks. + IncomingSignals chasm.Map[string, *chasmworkflowpb.IncomingSignalData] +} + +func NewWorkflow( + _ chasm.MutableContext, + msPointer chasm.MSPointer, +) *Workflow { + return &Workflow{ + MSPointer: msPointer, + } +} + +func (w *Workflow) LifecycleState( + _ chasm.Context, +) chasm.LifecycleState { + // NOTE: closeTransactionHandleRootLifecycleChange() is bypassed in tree.go + // + // NOTE: detached mode is not implemented yet, so always return Running here. + // Otherwise, tasks for callback component can't be executed after workflow is closed. + return chasm.LifecycleStateRunning +} + +func (w *Workflow) ContextMetadata(_ chasm.Context) map[string]string { + // TODO: Export workflow metadata from the CHASM workflow root instead of CloseTransaction(). + return nil +} + +func (w *Workflow) Terminate( + _ chasm.MutableContext, + _ chasm.TerminateComponentRequest, +) (chasm.TerminateComponentResponse, error) { + return chasm.TerminateComponentResponse{}, serviceerror.NewInternal("workflow root Terminate should not be called") +} + +// AddCompletionCallbacks creates completion callbacks using the CHASM implementation. +// maxCallbacksPerWorkflow is the configured maximum number of callbacks allowed per workflow. +func (w *Workflow) AddCompletionCallbacks( + ctx chasm.MutableContext, + eventTime *timestamppb.Timestamp, + requestID string, + completionCallbacks []*commonpb.Callback, + maxCallbacksPerWorkflow int, +) error { + // Check CHASM max callbacks limit + currentCallbackCount := len(w.Callbacks) + if len(completionCallbacks)+currentCallbackCount > maxCallbacksPerWorkflow { + return serviceerror.NewFailedPreconditionf( + "cannot attach more than %d callbacks to a workflow (%d callbacks already attached)", + maxCallbacksPerWorkflow, + currentCallbackCount, + ) + } + + // Initialize map if needed + if w.Callbacks == nil { + w.Callbacks = make(chasm.Map[string, *callback.Callback], len(completionCallbacks)) + } + + // Add each callback + for idx, cb := range completionCallbacks { + chasmCB := &callbackspb.Callback{ + Links: cb.GetLinks(), + } + switch variant := cb.Variant.(type) { + case *commonpb.Callback_Nexus_: + chasmCB.Variant = &callbackspb.Callback_Nexus_{ + Nexus: &callbackspb.Callback_Nexus{ + Url: variant.Nexus.GetUrl(), + Header: variant.Nexus.GetHeader(), + }, + } + default: + return serviceerror.NewInvalidArgumentf("unsupported callback variant: %T", variant) + } + + // requestID (unique per API call) + idx (position within the request) ensures unique, idempotent callback IDs. + // Unlike HSM callbacks, CHASM replicates entire trees rather than replaying events, so deterministic + // cross-cluster IDs based on event version are not needed. + id := fmt.Sprintf("%s-%d", requestID, idx) + + // Create and add callback + callbackObj := callback.NewCallback(requestID, eventTime, &callbackspb.CallbackState{}, chasmCB) + w.Callbacks[id] = chasm.NewComponentField(ctx, callbackObj) + } + return nil +} + +// addAndApplyHistoryEvent adds a history event to the workflow and applies the corresponding event definition, +// looked up by Go type. This is the preferred way to add and apply events as it provides go-to-definition navigation. +func addAndApplyHistoryEvent[D EventDefinition]( + w *Workflow, + ctx chasm.MutableContext, + setAttributes func(*historypb.HistoryEvent), +) (*historypb.HistoryEvent, error) { + def, ok := eventDefinitionByGoType[D](workflowContextFromChasm(ctx).registry) + if !ok { + return nil, serviceerror.NewInternalf("no event definition registered for Go type %T", (*D)(nil)) + } + event := w.AddHistoryEvent(def.Type(), setAttributes) + return event, def.Apply(ctx, w, event) +} + +// AddIncomingSignalEvent adds an entry for the signal requestID -> eventID mapping to +// track all signals that have been received by the workflow. +// Note that since signals are buffered, the eventID may the common.BufferedEventID, which +// will be updated to a concrete eventID once this signal is flushed to the DB. +// If caller tries to add an already-existing eventID, this function will ignore and silently return +// instead of overwriting -- use UpdateIncomingSignalEvent to update existing entries. +func (w *Workflow) AddIncomingSignalEvent( + ctx chasm.MutableContext, + requestID string, + eventID int64, +) error { + if w.IncomingSignals == nil { + w.IncomingSignals = make(chasm.Map[string, *chasmworkflowpb.IncomingSignalData]) + } + if w.HasIncomingSignalEvent(ctx, requestID) { + return nil + } + w.IncomingSignals[requestID] = chasm.NewDataField(ctx, &chasmworkflowpb.IncomingSignalData{ + // This might be common.BufferedEventID, which will be updated via UpdateIncomingSignalEvent + // once this signal is flushed to DB. + EventId: eventID, + }) + return nil +} + +// UpdateIncomingSignalEvent updates the eventID for an existing signal requestID in the map. +// If the requestID is not in the map, this is a no-op (e.g. when called for non-signal request IDs +// during buffer flush). +func (w *Workflow) UpdateIncomingSignalEvent( + ctx chasm.MutableContext, + requestID string, + eventID int64, +) error { + if w.HasIncomingSignalEvent(ctx, requestID) { + w.IncomingSignals[requestID].Get(ctx).EventId = eventID + } + + return nil +} + +// HasIncomingSignalEvent returns true if a signal with this requestID is already persisted +// in this CHASM tree. +func (w *Workflow) HasIncomingSignalEvent(_ chasm.Context, requestID string) bool { + _, exists := w.IncomingSignals[requestID] + return exists +} + +// HasAnyBufferedEvent returns true if the workflow has any buffered event matching the given filter. +func (w *Workflow) HasAnyBufferedEvent(filter historybuilder.BufferedEventFilter) bool { + return w.MSPointer.HasAnyBufferedEvent(filter) +} + +func (w *Workflow) WorkflowTypeName() string { + return w.GetWorkflowTypeName() +} diff --git a/chasm/library.go b/chasm/library.go new file mode 100644 index 00000000000..af994a7e13a --- /dev/null +++ b/chasm/library.go @@ -0,0 +1,60 @@ +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination library_mock.go + +package chasm + +import ( + "github.com/nexus-rpc/sdk-go/nexus" + "google.golang.org/grpc" +) + +type ( + Library interface { + Name() string + Components() []*RegistrableComponent + Tasks() []*RegistrableTask + RegisterServices(server *grpc.Server) + // NexusServices returns a list of nexus.Service instances to register with the __temporal_system Nexus endpoint. + NexusServices() []*nexus.Service + // NexusServiceProcessors returns a list of NexusServiceProcessor instances to register with the __temporal_system + // Nexus endpoint. + NexusServiceProcessors() []*NexusServiceProcessor + + mustEmbedUnimplementedLibrary() + } + + UnimplementedLibrary struct{} + + namer interface { + Name() string + } +) + +func (UnimplementedLibrary) Components() []*RegistrableComponent { + return nil +} + +func (UnimplementedLibrary) Tasks() []*RegistrableTask { + return nil +} + +// RegisterServices Registers the gRPC calls to the handlers of the library. +func (UnimplementedLibrary) RegisterServices(_ *grpc.Server) { +} + +func (UnimplementedLibrary) NexusServices() []*nexus.Service { + return nil +} + +func (UnimplementedLibrary) NexusServiceProcessors() []*NexusServiceProcessor { + return nil +} + +func (UnimplementedLibrary) mustEmbedUnimplementedLibrary() {} + +// FullyQualifiedName creates a fully qualified name (FQN) by combining a library name +// and a component or task name. The FQN is used to uniquely identify components and +// tasks within the CHASM framework. +// The format of the returned FQN is: "libName.name" +func FullyQualifiedName(libName, name string) string { + return libName + "." + name +} diff --git a/chasm/library_core.go b/chasm/library_core.go new file mode 100644 index 00000000000..a0b1725cfad --- /dev/null +++ b/chasm/library_core.go @@ -0,0 +1,25 @@ +package chasm + +// CoreLibrary contains built-in components maintained as part of the CHASM framework. +type CoreLibrary struct { + UnimplementedLibrary +} + +func (b *CoreLibrary) Name() string { + return "core" +} + +func (b *CoreLibrary) Components() []*RegistrableComponent { + return []*RegistrableComponent{ + NewRegistrableComponent[*Visibility]("vis", WithDetached()), + } +} + +func (b *CoreLibrary) Tasks() []*RegistrableTask { + return []*RegistrableTask{ + NewRegistrableSideEffectTask( + "visTask", + defaultVisibilityTaskHandler, + ), + } +} diff --git a/chasm/library_mock.go b/chasm/library_mock.go new file mode 100644 index 00000000000..dc014e901e1 --- /dev/null +++ b/chasm/library_mock.go @@ -0,0 +1,174 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: library.go +// +// Generated by this command: +// +// mockgen -package chasm -source library.go -destination library_mock.go +// + +// Package chasm is a generated GoMock package. +package chasm + +import ( + reflect "reflect" + + nexus "github.com/nexus-rpc/sdk-go/nexus" + gomock "go.uber.org/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockLibrary is a mock of Library interface. +type MockLibrary struct { + ctrl *gomock.Controller + recorder *MockLibraryMockRecorder + isgomock struct{} +} + +// MockLibraryMockRecorder is the mock recorder for MockLibrary. +type MockLibraryMockRecorder struct { + mock *MockLibrary +} + +// NewMockLibrary creates a new mock instance. +func NewMockLibrary(ctrl *gomock.Controller) *MockLibrary { + mock := &MockLibrary{ctrl: ctrl} + mock.recorder = &MockLibraryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLibrary) EXPECT() *MockLibraryMockRecorder { + return m.recorder +} + +// Components mocks base method. +func (m *MockLibrary) Components() []*RegistrableComponent { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Components") + ret0, _ := ret[0].([]*RegistrableComponent) + return ret0 +} + +// Components indicates an expected call of Components. +func (mr *MockLibraryMockRecorder) Components() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Components", reflect.TypeOf((*MockLibrary)(nil).Components)) +} + +// Name mocks base method. +func (m *MockLibrary) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +// Name indicates an expected call of Name. +func (mr *MockLibraryMockRecorder) Name() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*MockLibrary)(nil).Name)) +} + +// NexusServiceProcessors mocks base method. +func (m *MockLibrary) NexusServiceProcessors() []*NexusServiceProcessor { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NexusServiceProcessors") + ret0, _ := ret[0].([]*NexusServiceProcessor) + return ret0 +} + +// NexusServiceProcessors indicates an expected call of NexusServiceProcessors. +func (mr *MockLibraryMockRecorder) NexusServiceProcessors() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NexusServiceProcessors", reflect.TypeOf((*MockLibrary)(nil).NexusServiceProcessors)) +} + +// NexusServices mocks base method. +func (m *MockLibrary) NexusServices() []*nexus.Service { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NexusServices") + ret0, _ := ret[0].([]*nexus.Service) + return ret0 +} + +// NexusServices indicates an expected call of NexusServices. +func (mr *MockLibraryMockRecorder) NexusServices() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NexusServices", reflect.TypeOf((*MockLibrary)(nil).NexusServices)) +} + +// RegisterServices mocks base method. +func (m *MockLibrary) RegisterServices(server *grpc.Server) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RegisterServices", server) +} + +// RegisterServices indicates an expected call of RegisterServices. +func (mr *MockLibraryMockRecorder) RegisterServices(server any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterServices", reflect.TypeOf((*MockLibrary)(nil).RegisterServices), server) +} + +// Tasks mocks base method. +func (m *MockLibrary) Tasks() []*RegistrableTask { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Tasks") + ret0, _ := ret[0].([]*RegistrableTask) + return ret0 +} + +// Tasks indicates an expected call of Tasks. +func (mr *MockLibraryMockRecorder) Tasks() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tasks", reflect.TypeOf((*MockLibrary)(nil).Tasks)) +} + +// mustEmbedUnimplementedLibrary mocks base method. +func (m *MockLibrary) mustEmbedUnimplementedLibrary() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "mustEmbedUnimplementedLibrary") +} + +// mustEmbedUnimplementedLibrary indicates an expected call of mustEmbedUnimplementedLibrary. +func (mr *MockLibraryMockRecorder) mustEmbedUnimplementedLibrary() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "mustEmbedUnimplementedLibrary", reflect.TypeOf((*MockLibrary)(nil).mustEmbedUnimplementedLibrary)) +} + +// Mocknamer is a mock of namer interface. +type Mocknamer struct { + ctrl *gomock.Controller + recorder *MocknamerMockRecorder + isgomock struct{} +} + +// MocknamerMockRecorder is the mock recorder for Mocknamer. +type MocknamerMockRecorder struct { + mock *Mocknamer +} + +// NewMocknamer creates a new mock instance. +func NewMocknamer(ctrl *gomock.Controller) *Mocknamer { + mock := &Mocknamer{ctrl: ctrl} + mock.recorder = &MocknamerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *Mocknamer) EXPECT() *MocknamerMockRecorder { + return m.recorder +} + +// Name mocks base method. +func (m *Mocknamer) Name() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Name") + ret0, _ := ret[0].(string) + return ret0 +} + +// Name indicates an expected call of Name. +func (mr *MocknamerMockRecorder) Name() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Name", reflect.TypeOf((*Mocknamer)(nil).Name)) +} diff --git a/chasm/map.go b/chasm/map.go new file mode 100644 index 00000000000..652afd75444 --- /dev/null +++ b/chasm/map.go @@ -0,0 +1,6 @@ +package chasm + +// mapKeyTypes must match actual Map key type definition. +const mapKeyTypes = "string | int | int8 | int32 | int64 | uint | uint8 | uint32 | uint64 | bool" + +type Map[K string | int | int8 | int32 | int64 | uint | uint8 | uint32 | uint64 | bool, T any] map[K]Field[T] diff --git a/chasm/map_test.go b/chasm/map_test.go new file mode 100644 index 00000000000..dc1323350f5 --- /dev/null +++ b/chasm/map_test.go @@ -0,0 +1,53 @@ +package chasm + +import ( + "go/ast" + "go/parser" + "go/printer" + "go/token" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// Another approach would be to code generate string const. +func TestMapKeyTypesMatchConst(t *testing.T) { + _, currentFile, _, ok := runtime.Caller(0) + require.True(t, ok, "failed to get current file path") + srcFile := filepath.Join(filepath.Dir(currentFile), "map.go") + + fset := token.NewFileSet() + file, err := parser.ParseFile(fset, srcFile, nil, parser.AllErrors) + require.NoError(t, err) + + var found string + // Walk the top‐level declarations looking for: + // type Map[K ... , T any] map[K]T + for _, decl := range file.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.TYPE { + continue + } + for _, spec := range gd.Specs { + ts, ok := spec.(*ast.TypeSpec) + if !ok || ts.Name.Name != "Map" { + continue + } + // ts.TypeParams.List[0] is the field for K + if ts.TypeParams != nil && len(ts.TypeParams.List) > 0 { + field := ts.TypeParams.List[0] + var buf strings.Builder + // pretty‐print the AST node for the constraint + err = printer.Fprint(&buf, fset, field.Type) + require.NoError(t, err) + found = buf.String() + } + } + } + + require.NotEmpty(t, found, "could not locate Map[K …] in AST") + require.Equal(t, mapKeyTypes, found) +} diff --git a/chasm/ms_pointer.go b/chasm/ms_pointer.go new file mode 100644 index 00000000000..301da259c9e --- /dev/null +++ b/chasm/ms_pointer.go @@ -0,0 +1,58 @@ +package chasm + +import ( + "time" + + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/server/common/nexus/nexusrpc" +) + +// MSPointer is a special CHASM type which components can use to access their Node's underlying backend (i.e. mutable +// state). It is used to expose methods needed from the mutable state without polluting the chasm.Context interface. +// When deserializing components with fields of this type, the CHASM engine will set the value to its NodeBackend. +// This should only be used by the Workflow component. +type MSPointer struct { + backend NodeBackend +} + +// NewMSPointer creates a new MSPointer instance. +func NewMSPointer(backend NodeBackend) MSPointer { + return MSPointer{ + backend: backend, + } +} + +// WorkflowRunTimeout returns the workflow run timeout duration. Returns 0 if no timeout is set. +func (m MSPointer) WorkflowRunTimeout() time.Duration { + return m.backend.GetExecutionInfo().GetWorkflowRunTimeout().AsDuration() +} + +// AddHistoryEvent adds a history event via the underlying mutable state. +func (m MSPointer) AddHistoryEvent(t enumspb.EventType, setAttributes func(*historypb.HistoryEvent)) *historypb.HistoryEvent { + return m.backend.AddHistoryEvent(t, setAttributes) +} + +// HasAnyBufferedEvent returns true if there is at least one buffered event that matches the provided filter. +func (m MSPointer) HasAnyBufferedEvent(filter func(*historypb.HistoryEvent) bool) bool { + return m.backend.HasAnyBufferedEvent(filter) +} + +func (m MSPointer) GenerateEventLoadToken(event *historypb.HistoryEvent) ([]byte, error) { + return m.backend.GenerateEventLoadToken(event) +} + +// LoadHistoryEvent loads a history event from the underlying mutable state using the given token. +func (m MSPointer) LoadHistoryEvent(ctx Context, token []byte) (*historypb.HistoryEvent, error) { + return m.backend.LoadHistoryEvent(ctx.goContext(), token) +} + +// GetNexusCompletion retrieves the Nexus operation completion data for the given request ID from the underlying mutable state. +func (m MSPointer) GetNexusCompletion(ctx Context, requestID string) (nexusrpc.CompleteOperationOptions, error) { + return m.backend.GetNexusCompletion(ctx.goContext(), requestID) +} + +// GetWorkflowTypeName retrieves the workflow type name from the underlying mutable state. +func (m MSPointer) GetWorkflowTypeName() string { + return m.backend.GetExecutionInfo().GetWorkflowTypeName() +} diff --git a/chasm/nexus_completion.go b/chasm/nexus_completion.go new file mode 100644 index 00000000000..da26eef3544 --- /dev/null +++ b/chasm/nexus_completion.go @@ -0,0 +1,47 @@ +package chasm + +import ( + "encoding/base64" + + commonpb "go.temporal.io/api/common/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" +) + +// NexusCompletionHandlerURL is the user-visible URL for Nexus->CHASM callbacks. +const NexusCompletionHandlerURL = "temporal://internal" + +// NexusCompletionHandler is implemented by CHASM components that want to handle Nexus operation completion callbacks. +type NexusCompletionHandler interface { + HandleNexusCompletion(ctx MutableContext, completion *persistencespb.ChasmNexusCompletion) error +} + +// NexusCompletionHandlerComponent is a CHASM [Component] that also implements [NexusCompletionHandler]. +type NexusCompletionHandlerComponent interface { + Component + NexusCompletionHandler +} + +// GenerateNexusCallback generates a Callback message indicating a CHASM component to receive Nexus operation completion +// callbacks. Particularly useful for components that want to track a workflow start with StartWorkflowExecution. +func GenerateNexusCallback(ctx Context, component NexusCompletionHandlerComponent) (*commonpb.Callback, error) { + ref, err := ctx.Ref(component) + if err != nil { + return nil, err + } + + encodedRef := base64.RawURLEncoding.EncodeToString(ref) + headers := map[string]string{ + // NOTE: There's a constant defined for this in common/nexus but to avoid circular dependencies we redefine it here. + // This is acceptable since we are going to eventually have a strongly typed field for passing tokens around. + "temporal-callback-token": encodedRef, + } + + return &commonpb.Callback{ + Variant: &commonpb.Callback_Nexus_{ + Nexus: &commonpb.Callback_Nexus{ + Url: NexusCompletionHandlerURL, + Header: headers, + }, + }, + }, nil +} diff --git a/chasm/nexus_operation_processor.go b/chasm/nexus_operation_processor.go new file mode 100644 index 00000000000..91610726726 --- /dev/null +++ b/chasm/nexus_operation_processor.go @@ -0,0 +1,201 @@ +package chasm + +import ( + "fmt" + "math/rand/v2" + + "github.com/nexus-rpc/sdk-go/nexus" + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" +) + +// NexusOperationProcessorContext contains context for processing a Nexus operation's input, including the target +// namespace and a request identifier used for tracing and idempotency. +type NexusOperationProcessorContext struct { + // Namespace is the target namespace used for routing and validation of Nexus operations. + Namespace *namespace.Namespace + // RequestID is a unique identifier for the incoming operation request, used for correlation and idempotency + // across retries. + RequestID string + Links []nexus.Link + // A boolean indicating whether the operation processor framework should re-serialize the input and store it in the + // [NexusOperationProcessorResult.ReserializedInputPayload] field. Processor implementations may mutate the input regardless. + ReserializeInputPayload bool +} + +// NexusOperationRoutingKey determines which history shard should process a Nexus operation. +// Different implementations provide different routing strategies (e.g., by execution, random). +type NexusOperationRoutingKey interface { + // ShardID returns the target shard ID for this routing key given the total number of shards. + ShardID(numShards int32) int32 +} + +// NexusOperationRoutingKeyExecution routes operations to a specific shard based on an execution key. +// This ensures that operations related to the same execution are processed on the same shard. +type NexusOperationRoutingKeyExecution struct { + // NamespaceID is the ID of the namespace containing the execution whose shard should be targeted. + NamespaceID string + // BusinessID is the business identifier (e.g., workflow ID) of the execution. + BusinessID string +} + +// ShardID returns the shard that owns the execution identified by the namespace and business IDs. +func (r NexusOperationRoutingKeyExecution) ShardID(numShards int32) int32 { + return common.WorkflowIDToHistoryShard(r.NamespaceID, r.BusinessID, numShards) +} + +// NexusOperationRoutingKeyRandom routes operations to a random shard. +// The ShardID method returns a randomly selected valid shard ID. +type NexusOperationRoutingKeyRandom struct { +} + +// ShardID returns a randomly selected shard ID in the range [1, numShards]. +func (NexusOperationRoutingKeyRandom) ShardID(numShards int32) int32 { + return rand.Int32N(numShards) + 1 +} + +// NexusOperationProcessorResult contains the result of processing a Nexus operation input, +// including the routing key that determines which shard should handle the operation. +type NexusOperationProcessorResult struct { + // RoutingKey determines which history shard should process the operation. + RoutingKey NexusOperationRoutingKey + // A field set by the framework to containing the re-serialized input payload if requested in the given context. + ReserializedInputPayload *commonpb.Payload +} + +// NexusOperationProcessor is an interface that can be implemented per operation to validate and determine routing for the operation. +type NexusOperationProcessor[I any] interface { + // ProcessInput validates the input, and returns routing information for processing this operation. The + // method may mutate the input to set default values. + ProcessInput(ctx NexusOperationProcessorContext, input I) (*NexusOperationProcessorResult, error) +} + +// RegisterableNexusOperationProcessor adapts a typed Nexus operation processor for dynamic registration +// and invocation within a service processor. +type RegisterableNexusOperationProcessor struct { + processInput func(ctx NexusOperationProcessorContext, input *commonpb.Payload) (*NexusOperationProcessorResult, error) +} + +func nexusOperationProcessorAdapter[I any](processor NexusOperationProcessor[I]) func(ctx NexusOperationProcessorContext, input *commonpb.Payload) (*NexusOperationProcessorResult, error) { + return func(ctx NexusOperationProcessorContext, input *commonpb.Payload) (*NexusOperationProcessorResult, error) { + var i I + if err := payloads.Decode(&commonpb.Payloads{Payloads: []*commonpb.Payload{input}}, &i); err != nil { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "failed to decode input payload: %v", err) + } + result, err := processor.ProcessInput(ctx, i) + if err != nil { + return nil, err + } + if ctx.ReserializeInputPayload { + pls, err := payloads.Encode(i) + if err != nil { + herr := nexus.NewHandlerErrorf(nexus.HandlerErrorTypeInternal, "failed to re-encode input payload: %v", err) + herr.RetryBehavior = nexus.HandlerErrorRetryBehaviorNonRetryable + return nil, herr + } + if len(pls.Payloads) == 1 { + result.ReserializedInputPayload = pls.Payloads[0] + } + } + return result, nil + } +} + +// NewRegisterableNexusOperationProcessor wraps a typed NexusOperationProcessor and returns a registerable adapter. +func NewRegisterableNexusOperationProcessor[I any](op NexusOperationProcessor[I]) RegisterableNexusOperationProcessor { + return RegisterableNexusOperationProcessor{ + processInput: nexusOperationProcessorAdapter(op), + } +} + +// NexusServiceProcessor handles input processing for operations within a specific Nexus service. +// It uses reflection to dynamically invoke the ProcessInput method on operations that implement it. +type NexusServiceProcessor struct { + name string + operations map[string]RegisterableNexusOperationProcessor +} + +// NewNexusServiceProcessor constructs a processor for a single Nexus service that can register and invoke operation +// processors by name. +func NewNexusServiceProcessor(name string) *NexusServiceProcessor { + return &NexusServiceProcessor{ + name: name, + operations: make(map[string]RegisterableNexusOperationProcessor), + } +} + +// RegisterOperation registers a named operation with this service processor. +// Returns an error if an operation with the same name is already registered. +func (p *NexusServiceProcessor) RegisterOperation(name string, op RegisterableNexusOperationProcessor) error { + if _, exists := p.operations[name]; exists { + return fmt.Errorf("operation %q already registered", name) + } + p.operations[name] = op + return nil +} + +// MustRegisterOperation registers a named operation and panics if registration fails. +func (p *NexusServiceProcessor) MustRegisterOperation(name string, op RegisterableNexusOperationProcessor) { + if err := p.RegisterOperation(name, op); err != nil { + // nolint:forbidigo // Panic is acceptable here for Must-style method. + panic(err) + } +} + +// ProcessInput routes the input processing request to the appropriate operation processor and returns routing information +// for the operation. +// +// Returns a nexus.HandlerError if the operation is not found or if input processing fails. +func (p *NexusServiceProcessor) ProcessInput(ctx NexusOperationProcessorContext, opName string, input *commonpb.Payload) (*NexusOperationProcessorResult, error) { + op, ok := p.operations[opName] + if !ok { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeNotFound, "operation %q not found", opName) + } + + return op.processInput(ctx, input) +} + +// NexusEndpointProcessor handles input processing for Nexus operations across multiple services within a Nexus +// endpoint. It routes requests to the appropriate service processor based on the service name. +type NexusEndpointProcessor struct { + serviceProcessors map[string]*NexusServiceProcessor +} + +// NewNexusEndpointProcessor creates a new NexusEndpointProcessor. +func NewNexusEndpointProcessor() *NexusEndpointProcessor { + return &NexusEndpointProcessor{ + serviceProcessors: make(map[string]*NexusServiceProcessor), + } +} + +// RegisterServiceProcessor adds a service-level processor to the endpoint keyed by its name. +// Returns an error if a processor with the same name is already registered. +func (p *NexusEndpointProcessor) RegisterServiceProcessor(processor *NexusServiceProcessor) error { + if _, exists := p.serviceProcessors[processor.name]; exists { + return fmt.Errorf("service processor %q already registered", processor.name) + } + p.serviceProcessors[processor.name] = processor + return nil +} + +// MustRegisterServiceProcessor registers the service processor and panics if registration fails. +func (p *NexusEndpointProcessor) MustRegisterServiceProcessor(processor *NexusServiceProcessor) { + if err := p.RegisterServiceProcessor(processor); err != nil { + // nolint:forbidigo // Panic is acceptable here for Must-style method. + panic(err) + } +} + +// ProcessInput routes the input processing request to the appropriate service processor and returns routing information +// for the operation. +// +// Returns a nexus.HandlerError if the service is not found or if input processing fails. +func (p *NexusEndpointProcessor) ProcessInput(ctx NexusOperationProcessorContext, service, operation string, input *commonpb.Payload) (*NexusOperationProcessorResult, error) { + serviceProcessor, ok := p.serviceProcessors[service] + if !ok { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeNotFound, "service %q not found", service) + } + return serviceProcessor.ProcessInput(ctx, operation, input) +} diff --git a/chasm/nexus_operation_processor_test.go b/chasm/nexus_operation_processor_test.go new file mode 100644 index 00000000000..93cd6f6e1df --- /dev/null +++ b/chasm/nexus_operation_processor_test.go @@ -0,0 +1,232 @@ +package chasm + +import ( + "testing" + + "github.com/nexus-rpc/sdk-go/nexus" + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/payloads" + "go.temporal.io/server/common/primitives/timestamp" +) + +type processableInput struct { + Value int +} + +type processableOperation struct { +} + +func (o *processableOperation) Name() string { + return "processable-operation" +} + +func (o *processableOperation) ProcessInput(ctx NexusOperationProcessorContext, input *processableInput) (*NexusOperationProcessorResult, error) { + if input.Value < 0 { + return nil, nexus.NewHandlerErrorf(nexus.HandlerErrorTypeBadRequest, "value must be non-negative") + } + // Mutate to test overwrite behavior. + input.Value += 1 + return &NexusOperationProcessorResult{ + RoutingKey: NexusOperationRoutingKeyRandom{}, + }, nil +} + +func newTestContext() NexusOperationProcessorContext { + ns := namespace.NewLocalNamespaceForTest( + &persistencespb.NamespaceInfo{ + Id: "test-namespace-id", + Name: "test-namespace", + }, + &persistencespb.NamespaceConfig{ + Retention: timestamp.DurationFromDays(1), + }, + "active-cluster", + ) + return NexusOperationProcessorContext{ + Namespace: ns, + RequestID: "test-request-id", + } +} + +func mustToPayload(t *testing.T, value any) *commonpb.Payload { + t.Helper() + ps, err := payloads.Encode(value) + require.NoError(t, err) + return ps.Payloads[0] +} + +func TestNexusOperationProcessor_ProcessInput(t *testing.T) { + t.Parallel() + + processableOp := &processableOperation{} + + processor := NewRegisterableNexusOperationProcessor(processableOp) + + tests := []struct { + name string + ctx NexusOperationProcessorContext + input *commonpb.Payload + checkResult func(*testing.T, *NexusOperationProcessorResult, error) + }{ + { + name: "valid input returns routing key, no overwrite", + ctx: newTestContext(), + input: mustToPayload(t, processableInput{Value: 23}), + checkResult: func(t *testing.T, result *NexusOperationProcessorResult, err error) { + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.RoutingKey) + require.Nil(t, result.ReserializedInputPayload) + }, + }, + { + name: "overwrite payload with valid input", + ctx: func() NexusOperationProcessorContext { + ctx := newTestContext() + ctx.ReserializeInputPayload = true + return ctx + }(), + input: mustToPayload(t, processableInput{Value: 23}), + checkResult: func(t *testing.T, result *NexusOperationProcessorResult, err error) { + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.RoutingKey) + mutatedInput := processableInput{} + require.NoError(t, payloads.Decode(&commonpb.Payloads{Payloads: []*commonpb.Payload{result.ReserializedInputPayload}}, &mutatedInput)) + require.Equal(t, 24, mutatedInput.Value) + }, + }, + { + name: "invalid input", + ctx: newTestContext(), + input: mustToPayload(t, processableInput{Value: -1}), + checkResult: func(t *testing.T, nopr *NexusOperationProcessorResult, err error) { + var handlerErr *nexus.HandlerError + require.ErrorAs(t, err, &handlerErr) + require.Equal(t, nexus.HandlerErrorTypeBadRequest, handlerErr.Type) + require.Contains(t, handlerErr.Error(), "value must be non-negative") + }, + }, + { + name: "decode error", + ctx: newTestContext(), + input: mustToPayload(t, "wrong type"), + checkResult: func(t *testing.T, nopr *NexusOperationProcessorResult, err error) { + var handlerErr *nexus.HandlerError + require.ErrorAs(t, err, &handlerErr) + require.Equal(t, nexus.HandlerErrorTypeBadRequest, handlerErr.Type) + require.Contains(t, handlerErr.Error(), "failed to decode input payload") + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := processor.processInput(tt.ctx, tt.input) + tt.checkResult(t, result, err) + }) + } +} + +func TestNexusServiceProcessor_ProcessInput(t *testing.T) { + t.Parallel() + + processableOp := &processableOperation{} + + processor := NewNexusServiceProcessor("test-service") + processor.MustRegisterOperation(processableOp.Name(), NewRegisterableNexusOperationProcessor(processableOp)) + + ctx := newTestContext() + + tests := []struct { + name string + opName string + input *commonpb.Payload + checkResult func(*testing.T, *NexusOperationProcessorResult, error) + }{ + { + name: "operation not found", + opName: "nonexistent-operation", + input: mustToPayload(t, processableInput{Value: 50}), + checkResult: func(t *testing.T, result *NexusOperationProcessorResult, err error) { + var handlerErr *nexus.HandlerError + require.ErrorAs(t, err, &handlerErr) + require.Equal(t, nexus.HandlerErrorTypeNotFound, handlerErr.Type) + require.Contains(t, handlerErr.Error(), `operation "nonexistent-operation" not found`) + }, + }, + { + name: "valid input", + opName: "processable-operation", + input: mustToPayload(t, processableInput{Value: 50}), + checkResult: func(t *testing.T, result *NexusOperationProcessorResult, err error) { + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.RoutingKey) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := processor.ProcessInput(ctx, tt.opName, tt.input) + tt.checkResult(t, result, err) + }) + } +} + +func TestNexusEndpointProcessor_ProcessInput(t *testing.T) { + t.Parallel() + + processableOp := &processableOperation{} + + serviceProcessor := NewNexusServiceProcessor("test-service") + serviceProcessor.MustRegisterOperation(processableOp.Name(), NewRegisterableNexusOperationProcessor(processableOp)) + + processor := NewNexusEndpointProcessor() + processor.MustRegisterServiceProcessor(serviceProcessor) + + ctx := newTestContext() + + tests := []struct { + name string + service string + operation string + input *commonpb.Payload + checkResult func(*testing.T, *NexusOperationProcessorResult, error) + }{ + { + name: "service not found", + service: "nonexistent-service", + operation: "processable-operation", + input: mustToPayload(t, processableInput{Value: 50}), + checkResult: func(t *testing.T, result *NexusOperationProcessorResult, err error) { + var handlerErr *nexus.HandlerError + require.ErrorAs(t, err, &handlerErr) + require.Equal(t, nexus.HandlerErrorTypeNotFound, handlerErr.Type) + require.Contains(t, handlerErr.Error(), `service "nonexistent-service" not found`) + }, + }, + { + name: "valid request", + service: "test-service", + operation: "processable-operation", + input: mustToPayload(t, processableInput{Value: 47}), + checkResult: func(t *testing.T, result *NexusOperationProcessorResult, err error) { + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.RoutingKey) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := processor.ProcessInput(ctx, tt.service, tt.operation, tt.input) + tt.checkResult(t, result, err) + }) + } +} diff --git a/chasm/node_backend_mock.go b/chasm/node_backend_mock.go new file mode 100644 index 00000000000..6ddc1a815c1 --- /dev/null +++ b/chasm/node_backend_mock.go @@ -0,0 +1,242 @@ +package chasm + +import ( + "context" + "sync" + "time" + + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/service/history/tasks" +) + +var _ NodeBackend = (*MockNodeBackend)(nil) + +// MockNodeBackend is a lightweight manual mock for the NodeBackend interface. +// Methods may be stubbed by assigning the corresponding Handle fields. Update call history is recorded in the struct +// fields (thread-safe). +type MockNodeBackend struct { + // Optional function overrides. If nil, methods return zero-values. + HandleGetExecutionState func() *persistencespb.WorkflowExecutionState + HandleGetExecutionInfo func() *persistencespb.WorkflowExecutionInfo + HandleGetCurrentVersion func() int64 + HandleNextTransitionCount func() int64 + HandleGetApproximatePersistedSize func() int + HandleCurrentVersionedTransition func() *persistencespb.VersionedTransition + HandleGetWorkflowKey func() definition.WorkflowKey + HandleUpdateWorkflowStateStatus func(state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus) (bool, error) + HandleIsWorkflow func() bool + HandleGetNexusCompletion func(ctx context.Context, requestID string) (nexusrpc.CompleteOperationOptions, error) + HandleAddHistoryEvent func(t enumspb.EventType, setAttributes func(*historypb.HistoryEvent)) *historypb.HistoryEvent + HandleLoadHistoryEvent func(ctx context.Context, token []byte) (*historypb.HistoryEvent, error) + HandleGenerateEventLoadToken func(event *historypb.HistoryEvent) ([]byte, error) + HandleHasAnyBufferedEvent func(filter func(*historypb.HistoryEvent) bool) bool + HandleGetNamespaceEntry func() *namespace.Namespace + HandleEndpointRegistry func() EndpointRegistry + + // Recorded calls (protected by mu). + mu sync.Mutex + TasksByCategory map[tasks.Category][]tasks.Task + DeletePureTaskCalls []time.Time + UpdateCalls []struct { + State enumsspb.WorkflowExecutionState + Status enumspb.WorkflowExecutionStatus + } +} + +func (m *MockNodeBackend) GetExecutionState() *persistencespb.WorkflowExecutionState { + if m.HandleGetExecutionState != nil { + return m.HandleGetExecutionState() + } + return &persistencespb.WorkflowExecutionState{} +} + +func (m *MockNodeBackend) GetExecutionInfo() *persistencespb.WorkflowExecutionInfo { + if m.HandleGetExecutionInfo != nil { + return m.HandleGetExecutionInfo() + } + return &persistencespb.WorkflowExecutionInfo{} +} + +func (m *MockNodeBackend) GetApproximatePersistedSize() int { + if m.HandleGetApproximatePersistedSize != nil { + return m.HandleGetApproximatePersistedSize() + } + return 0 +} + +func (m *MockNodeBackend) GetCurrentVersion() int64 { + if m.HandleGetCurrentVersion != nil { + return m.HandleGetCurrentVersion() + } + return 0 +} + +func (m *MockNodeBackend) NextTransitionCount() int64 { + if m.HandleNextTransitionCount != nil { + return m.HandleNextTransitionCount() + } + return 0 +} + +func (m *MockNodeBackend) CurrentVersionedTransition() *persistencespb.VersionedTransition { + if m.HandleCurrentVersionedTransition != nil { + return m.HandleCurrentVersionedTransition() + } + return nil +} + +func (m *MockNodeBackend) GetWorkflowKey() definition.WorkflowKey { + if m.HandleGetWorkflowKey != nil { + return m.HandleGetWorkflowKey() + } + return definition.WorkflowKey{} +} + +func (m *MockNodeBackend) AddTasks(ts ...tasks.Task) { + m.mu.Lock() + defer m.mu.Unlock() + if m.TasksByCategory == nil { + m.TasksByCategory = make(map[tasks.Category][]tasks.Task, 1) + } + for _, task := range ts { + category := task.GetCategory() + m.TasksByCategory[category] = append(m.TasksByCategory[category], task) + } +} + +func (m *MockNodeBackend) DeleteCHASMPureTasks(maxScheduledTime time.Time) { + m.mu.Lock() + defer m.mu.Unlock() + + m.DeletePureTaskCalls = append(m.DeletePureTaskCalls, maxScheduledTime) +} + +func (m *MockNodeBackend) LastDeletePureTaskCall() time.Time { + m.mu.Lock() + defer m.mu.Unlock() + + if len(m.DeletePureTaskCalls) == 0 { + return time.Time{} + } + return m.DeletePureTaskCalls[len(m.DeletePureTaskCalls)-1] +} + +func (m *MockNodeBackend) UpdateWorkflowStateStatus( + state enumsspb.WorkflowExecutionState, + status enumspb.WorkflowExecutionStatus, +) (bool, error) { + if m.HandleUpdateWorkflowStateStatus != nil { + ok, err := m.HandleUpdateWorkflowStateStatus(state, status) + + m.mu.Lock() + m.UpdateCalls = append(m.UpdateCalls, struct { + State enumsspb.WorkflowExecutionState + Status enumspb.WorkflowExecutionStatus + }{State: state, Status: status}) + m.mu.Unlock() + + return ok, err + } + + m.mu.Lock() + m.UpdateCalls = append(m.UpdateCalls, struct { + State enumsspb.WorkflowExecutionState + Status enumspb.WorkflowExecutionStatus + }{State: state, Status: status}) + m.mu.Unlock() + + return false, nil +} + +func (m *MockNodeBackend) LastUpdateWorkflowState() enumsspb.WorkflowExecutionState { + m.mu.Lock() + defer m.mu.Unlock() + if len(m.UpdateCalls) == 0 { + return enumsspb.WORKFLOW_EXECUTION_STATE_UNSPECIFIED + } + return m.UpdateCalls[len(m.UpdateCalls)-1].State +} + +func (m *MockNodeBackend) LastUpdateWorkflowStatus() enumspb.WorkflowExecutionStatus { + m.mu.Lock() + defer m.mu.Unlock() + if len(m.UpdateCalls) == 0 { + return enumspb.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED + } + return m.UpdateCalls[len(m.UpdateCalls)-1].Status +} + +func (m *MockNodeBackend) IsWorkflow() bool { + if m.HandleIsWorkflow != nil { + return m.HandleIsWorkflow() + } + return false +} + +func (m *MockNodeBackend) GetNexusCompletion( + ctx context.Context, + requestID string, +) (nexusrpc.CompleteOperationOptions, error) { + if m.HandleGetNexusCompletion != nil { + return m.HandleGetNexusCompletion(ctx, requestID) + } + return nexusrpc.CompleteOperationOptions{}, nil +} + +func (m *MockNodeBackend) AddHistoryEvent(t enumspb.EventType, setAttributes func(*historypb.HistoryEvent)) *historypb.HistoryEvent { + if m.HandleAddHistoryEvent != nil { + return m.HandleAddHistoryEvent(t, setAttributes) + } + return nil +} + +func (m *MockNodeBackend) GenerateEventLoadToken(event *historypb.HistoryEvent) ([]byte, error) { + if m.HandleGenerateEventLoadToken != nil { + return m.HandleGenerateEventLoadToken(event) + } + return []byte("test token"), nil +} + +func (m *MockNodeBackend) LoadHistoryEvent(ctx context.Context, token []byte) (*historypb.HistoryEvent, error) { + if m.HandleLoadHistoryEvent != nil { + return m.HandleLoadHistoryEvent(ctx, token) + } + return nil, nil +} + +func (m *MockNodeBackend) HasAnyBufferedEvent(filter func(*historypb.HistoryEvent) bool) bool { + if m.HandleHasAnyBufferedEvent != nil { + return m.HandleHasAnyBufferedEvent(filter) + } + return false +} + +func (m *MockNodeBackend) GetNamespaceEntry() *namespace.Namespace { + if m.HandleGetNamespaceEntry != nil { + return m.HandleGetNamespaceEntry() + } + return nil +} + +func (m *MockNodeBackend) EndpointRegistry() EndpointRegistry { + if m.HandleEndpointRegistry != nil { + return m.HandleEndpointRegistry() + } + return nil +} + +func (m *MockNodeBackend) NumTasksAdded() int { + m.mu.Lock() + defer m.mu.Unlock() + count := 0 + for _, ts := range m.TasksByCategory { + count += len(ts) + } + return count +} diff --git a/chasm/node_pure_task_mock.go b/chasm/node_pure_task_mock.go new file mode 100644 index 00000000000..07763793f25 --- /dev/null +++ b/chasm/node_pure_task_mock.go @@ -0,0 +1,86 @@ +package chasm + +import ( + "context" + "sync" +) + +// MockNodePureTask is a lightweight manual mock for the NodePureTask interface. +// Methods may be stubbed by assigning the corresponding Handle fields. Call history +// is recorded in the struct fields (thread-safe). +type MockNodePureTask struct { + HandleExecutePureTask func(baseCtx context.Context, taskAttributes TaskAttributes, taskInstance any) (bool, error) + HandleValidatePureTask func(baseCtx context.Context, taskAttributes TaskAttributes, taskInstance any) (bool, error) + + mu sync.Mutex + ExecuteCalls []struct { + BaseCtx context.Context + Attributes TaskAttributes + Task any + } + ValidateCalls []struct { + BaseCtx context.Context + Attributes TaskAttributes + Task any + } +} + +func (m *MockNodePureTask) ExecutePureTask( + baseCtx context.Context, + taskAttributes TaskAttributes, + taskInstance any, +) (bool, error) { + if m.HandleExecutePureTask != nil { + ok, err := m.HandleExecutePureTask(baseCtx, taskAttributes, taskInstance) + + m.mu.Lock() + m.ExecuteCalls = append(m.ExecuteCalls, struct { + BaseCtx context.Context + Attributes TaskAttributes + Task any + }{BaseCtx: baseCtx, Attributes: taskAttributes, Task: taskInstance}) + m.mu.Unlock() + + return ok, err + } + + m.mu.Lock() + m.ExecuteCalls = append(m.ExecuteCalls, struct { + BaseCtx context.Context + Attributes TaskAttributes + Task any + }{BaseCtx: baseCtx, Attributes: taskAttributes, Task: taskInstance}) + m.mu.Unlock() + + return false, nil +} + +func (m *MockNodePureTask) ValidatePureTask( + baseCtx context.Context, + taskAttributes TaskAttributes, + taskInstance any, +) (bool, error) { + if m.HandleValidatePureTask != nil { + ok, err := m.HandleValidatePureTask(baseCtx, taskAttributes, taskInstance) + + m.mu.Lock() + m.ValidateCalls = append(m.ValidateCalls, struct { + BaseCtx context.Context + Attributes TaskAttributes + Task any + }{BaseCtx: baseCtx, Attributes: taskAttributes, Task: taskInstance}) + m.mu.Unlock() + + return ok, err + } + + m.mu.Lock() + m.ValidateCalls = append(m.ValidateCalls, struct { + BaseCtx context.Context + Attributes TaskAttributes + Task any + }{BaseCtx: baseCtx, Attributes: taskAttributes, Task: taskInstance}) + m.mu.Unlock() + + return false, nil +} diff --git a/chasm/parent_pointer.go b/chasm/parent_pointer.go new file mode 100644 index 00000000000..b2717a0d8ce --- /dev/null +++ b/chasm/parent_pointer.go @@ -0,0 +1,102 @@ +package chasm + +import ( + "fmt" + "reflect" + + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/softassert" +) + +const ( + parentPtrInternalFieldName = "Internal" +) + +// ParentPtr is a in-memory pointer to the parent component of a CHASM component. +// +// CHASM map is not a component, so if a component is inside a map, its ParentPtr +// will point to the nearest ancestor component that is not a map. +// +// ParentPtr is only initialized and available for use **after** the transition that +// creates the component using ParentPtr is completed. +type ParentPtr[T any] struct { + // Exporting this field as this generic struct needs to be created via reflection, + // and reflection can't set private fields. + Internal parentPtrInternal +} + +type parentPtrInternal struct { + // Storing currentNode instead of parent component Node here so that + // we can differentiate between root node and non-initialized ParentPtr. + currentNode *Node +} + +// Get returns the parent component, deserializing it if necessary. +// Panics rather than returning an error, as errors are supposed to be handled by the framework as opposed to the +// application. +func (p ParentPtr[T]) Get(chasmContext Context) T { + vT, ok := p.TryGet(chasmContext) + if !ok { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(serviceerror.NewInternal("expect parent component value but got nil")) + } + return vT +} + +// TryGet returns the parent component and a boolean indicating if the value was found, +// deserializing if necessary. +// Panics rather than returning an error, as errors are supposed to be handled by the framework as opposed to the +// application. +func (p ParentPtr[T]) TryGet(chasmContext Context) (T, bool) { + var nilT T + if p.Internal.currentNode == nil { + // ParentPtr not initialized + return nilT, false + } + + parent := p.Internal.currentNode.parent + if parent == nil { + return nilT, false + } + + for parent.isMap() { + parent = parent.parent + if parent == nil { + encodedPath, _ := p.Internal.currentNode.getEncodedPath() + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(softassert.UnexpectedInternalErr( + p.Internal.currentNode.logger, + "unable to find parent component for CHASM component inside a map", + fmt.Errorf("child node name: %s", encodedPath), + )) + } + } + + if !parent.isComponent() { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(softassert.UnexpectedInternalErr( + parent.logger, + "unexpected CHASM node that has a child component", + fmt.Errorf("node %s, node metadata: %s", + parent.nodeName, + parent.serializedNode.GetMetadata().String(), + ), + )) + } + + if err := parent.prepareComponentValue(chasmContext); err != nil { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(err) + } + + if parent.value == nil { + return nilT, false + } + + vT, isT := parent.value.(T) + if !isT { + // nolint:forbidigo // Panic is intended here for framework error handling. + panic(serviceerror.NewInternalf("parent component value doesn't implement %s", reflect.TypeFor[T]().Name())) + } + return vT, true +} diff --git a/chasm/parent_pointer_mock.go b/chasm/parent_pointer_mock.go new file mode 100644 index 00000000000..93d32a95161 --- /dev/null +++ b/chasm/parent_pointer_mock.go @@ -0,0 +1,34 @@ +package chasm + +import ( + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/log" +) + +// NewMockParentPtr creates a ParentPtr[T] backed by real Node instances +// that returns the given parent value when Get or TryGet is called. +// This is intended for use in unit tests where a full CHASM tree is not needed. +func NewMockParentPtr[T any](parent T) ParentPtr[T] { + base := &nodeBase{ + logger: log.NewNoopLogger(), + } + + parentNode := newNode(base, nil, "") + parentNode.serializedNode = &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{}, + }, + }, + } + parentNode.value = parent + parentNode.valueState = valueStateSynced + + childNode := newNode(base, parentNode, "mock_child") + + return ParentPtr[T]{ + Internal: parentPtrInternal{ + currentNode: childNode, + }, + } +} diff --git a/chasm/path_encoder.go b/chasm/path_encoder.go new file mode 100644 index 00000000000..4513cf31ae0 --- /dev/null +++ b/chasm/path_encoder.go @@ -0,0 +1,132 @@ +package chasm + +import ( + "strings" + "unicode/utf8" + + "go.temporal.io/api/serviceerror" +) + +var _ NodePathEncoder = (*defaultPathEncoder)(nil) + +var DefaultPathEncoder NodePathEncoder = &defaultPathEncoder{} + +type defaultPathEncoder struct{} + +const ( + nameSeparator = '$' + collectionSeparator = '#' + escapeChar = '\\' +) + +var ( + rootPath = []string{} +) + +// The Encode method encodes node path in a way that the following uses cases can be +// achieved by doing a simple a range query in DB based on prefixes of the encoded path: +// 1. Getting all nodes for a chasm tree. +// 2. Getting all nodes for a sub-tree. +// 3. Getting all immediate children of a Collection node. +// Additionally, it allows getting all ancestor nodes of a given node. +// +// It does so by using a different separator for a node which is a direct child of a Collection node. +// The two separators used ('$' and '#') are next to each other in terms of values, which ensures all +// children for a node are grouped together. Additionally, all immediate children of a Collection node +// are grouped together as well. +// +// To get a sub-tree (say a node with name "foo"), we want to use a query look like the following: +// +// path >= "foo" AND path < "foo[something]" +// +// and we need to know the minimal value of [something] that can possibly be in the encoded path. +// This is achieved by escaping '\', '$', '#', and also every code point less than '#'. +// Since the escaped character itself is larger than '#', the minimal value is '%' and our query becomes: +// +// path >= "foo" AND path < "foo%" +func (e *defaultPathEncoder) Encode( + node *Node, + path []string, +) (string, error) { + if path == nil { + path = node.path() + } + + if len(path) == 0 { + return "", nil + } + + var b strings.Builder + lastIdx := len(path) - 1 + for i, nodeName := range path { + if i > 0 { + if i == lastIdx && + node.parent != nil && + node.parent.serializedNode.GetMetadata().GetCollectionAttributes() != nil { + _, _ = b.WriteRune(collectionSeparator) + } else { + _, _ = b.WriteRune(nameSeparator) + } + } + + if nodeName == "" { + return "", serviceerror.NewInternalf("path contains empty node name: %v", path) + } + + for _, r := range nodeName { + if r == utf8.RuneError { + return "", serviceerror.NewInvalidArgumentf("node name contains invalid UTF-8 code point: %v", nodeName) + } + + if r == escapeChar || + r == nameSeparator || + r <= collectionSeparator { + _, _ = b.WriteRune(escapeChar) + } + _, _ = b.WriteRune(r) + } + } + return b.String(), nil +} + +func (e *defaultPathEncoder) Decode( + encodedPath string, +) ([]string, error) { + if encodedPath == "" { + return rootPath, nil + } + + path := make([]string, 0, 3) + var b strings.Builder + escaped := false + for _, r := range encodedPath { + if r == utf8.RuneError { + return nil, serviceerror.NewInvalidArgumentf("encodedPath contains invalid UTF-8 code point: %v", encodedPath) + } + + if escaped { + _, _ = b.WriteRune(r) + escaped = false + continue + } + + if r == '\\' { + escaped = true + continue + } + + if r == '$' || r == '#' { + path = append(path, b.String()) + b.Reset() + continue + } + + _, _ = b.WriteRune(r) + } + if escaped { + return nil, serviceerror.NewInternalf("encoded path ends with escape character: %v", encodedPath) + } + + path = append(path, b.String()) + return path, nil +} diff --git a/chasm/path_encoder_test.go b/chasm/path_encoder_test.go new file mode 100644 index 00000000000..53f134ee5a5 --- /dev/null +++ b/chasm/path_encoder_test.go @@ -0,0 +1,88 @@ +package chasm + +import ( + "testing" + + "github.com/stretchr/testify/require" + persistencespb "go.temporal.io/server/api/persistence/v1" +) + +func TestDefaultPathEncoder_EncodeDecode(t *testing.T) { + e := &defaultPathEncoder{} + + root := &Node{ + nodeName: "", + serializedNode: &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ComponentAttributes: &persistencespb.ChasmComponentAttributes{}}, + }, + }, + } + child := &Node{ + parent: root, + nodeName: "child", + serializedNode: &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ComponentAttributes: &persistencespb.ChasmComponentAttributes{}}, + }, + }, + } + collection := &Node{ + parent: root, + nodeName: "collection", + serializedNode: &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + Attributes: &persistencespb.ChasmNodeMetadata_CollectionAttributes{CollectionAttributes: &persistencespb.ChasmCollectionAttributes{}}, + }, + }, + } + collectionItem := &Node{ + parent: collection, + nodeName: "item", + serializedNode: &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ComponentAttributes: &persistencespb.ChasmComponentAttributes{}}, + }, + }, + } + collectionItemData := &Node{ + parent: collectionItem, + nodeName: "data", + serializedNode: &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + Attributes: &persistencespb.ChasmNodeMetadata_DataAttributes{}, + }, + }, + } + + tests := []struct { + node *Node + path []string + encoded string + }{ + {root, []string{}, ""}, + + {child, []string{"child"}, "child"}, + {child, []string{"special\\#"}, "special\\\\\\#"}, + {child, []string{" !"}, "\\ \\!"}, + {child, []string{"你好"}, "你好"}, + + {collection, []string{"collection"}, "collection"}, + + {collectionItem, []string{"collection", "item"}, "collection#item"}, + {collectionItem, []string{"collection", "⌘"}, "collection#⌘"}, + + {collectionItemData, []string{"collection", "item", "data"}, "collection$item$data"}, + {collectionItemData, []string{"collection", "item", "世界"}, "collection$item$世界"}, + } + + for _, tt := range tests { + encoded, err := e.Encode(tt.node, tt.path) + require.NoError(t, err) + require.Equal(t, tt.encoded, encoded) + + decodedPath, err := e.Decode(encoded) + require.NoError(t, err) + require.Equal(t, tt.path, decodedPath) + } +} diff --git a/chasm/ref.go b/chasm/ref.go new file mode 100644 index 00000000000..11adc1b0ba2 --- /dev/null +++ b/chasm/ref.go @@ -0,0 +1,156 @@ +package chasm + +import ( + "reflect" + + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" +) + +// ErrMalformedComponentRef is returned when component ref bytes cannot be deserialized. +var ErrMalformedComponentRef = serviceerror.NewInvalidArgument("malformed component ref") + +// ErrInvalidComponentRef is returned when component ref bytes deserialize to an invalid component ref. +var ErrInvalidComponentRef = serviceerror.NewInvalidArgument("invalid component ref") + +// ExecutionKey uniquely identifies a CHASM execution in the system. +type ExecutionKey struct { + NamespaceID string + BusinessID string + RunID string +} + +type ComponentRef struct { + ExecutionKey + + // archetypeID is CHASM framework's internal ID for the type of the root component of the CHASM execution. + // + // It is used to find and validate the loaded execution has the right archetype, especially when runID + // is not specified in the ExecutionKey. + archetypeID ArchetypeID + // executionGoType is used for determining the ComponetRef's archetype. + // When CHASM deverloper needs to create a ComponentRef, they will only provide the component type, + // and leave the work of determining archetypeID to the CHASM framework. + executionGoType reflect.Type + + // executionLastUpdateVT is the consistency token for the entire execution. + executionLastUpdateVT *persistencespb.VersionedTransition + + // componentType is the fully qualified component type name. + // It is for performing partial loading more efficiently in future versions of CHASM. + // + // From the componentType, we can find the registered component struct definition, + // then use reflection to find sub-components and understand if those sub-components + // need to be loaded or not. + // We only need to do this for sub-components, path for parent/ancenstor components + // can be inferred from the current component path and they always needs to be loaded. + // + // componentType string + + // componentPath and componentInitialVT are used to identify a component. + componentPath []string + componentInitialVT *persistencespb.VersionedTransition + + validationFn func(NodeBackend, Context, Component, *Registry) error +} + +// NewComponentRef creates a new ComponentRef with a registered root component go type. +// +// In V1, if you don't have a ref, +// then you can only interact with the (top level) execution. +func NewComponentRef[C Component]( + executionKey ExecutionKey, +) ComponentRef { + return ComponentRef{ + ExecutionKey: executionKey, + executionGoType: reflect.TypeFor[C](), + } +} + +// NewComponentRefByArchetypeID creates a new ComponentRef with a known archetype ID. +// This should only be used by CHASM framework internals. +// CHASM library developers should use [NewComponentRef] instead. +func NewComponentRefByArchetypeID( + executionKey ExecutionKey, + archetypeID ArchetypeID, +) ComponentRef { + return ComponentRef{ + ExecutionKey: executionKey, + archetypeID: archetypeID, + } +} + +func (r *ComponentRef) ArchetypeID( + registry *Registry, +) (ArchetypeID, error) { + if r.archetypeID != UnspecifiedArchetypeID { + return r.archetypeID, nil + } + + rc, ok := registry.componentOf(r.executionGoType) + if !ok { + return 0, serviceerror.NewInternal("unknown chasm component type: " + r.executionGoType.String()) + } + r.archetypeID = rc.componentID + + return r.archetypeID, nil +} + +func (r *ComponentRef) Serialize( + registry *Registry, +) ([]byte, error) { + if r == nil { + return nil, nil + } + + archetypeID, err := r.ArchetypeID(registry) + if err != nil { + return nil, err + } + + pRef := persistencespb.ChasmComponentRef{ + NamespaceId: r.NamespaceID, + BusinessId: r.BusinessID, + RunId: r.RunID, + ArchetypeId: archetypeID, + ExecutionVersionedTransition: r.executionLastUpdateVT, + ComponentPath: r.componentPath, + ComponentInitialVersionedTransition: r.componentInitialVT, + } + return pRef.Marshal() +} + +// DeserializeComponentRef deserializes a byte slice into a ComponentRef. +// Provides caller the access to information including ExecutionKey, Archetype, and ShardingKey. +func DeserializeComponentRef(data []byte) (ComponentRef, error) { + if len(data) == 0 { + return ComponentRef{}, ErrInvalidComponentRef + } + var pRef persistencespb.ChasmComponentRef + if err := pRef.Unmarshal(data); err != nil { + return ComponentRef{}, ErrMalformedComponentRef + } + + ref := ProtoRefToComponentRef(&pRef) + if ref.BusinessID == "" || ref.NamespaceID == "" { + return ComponentRef{}, ErrInvalidComponentRef + } + return ref, nil +} + +// ProtoRefToComponentRef converts a persistence ChasmComponentRef reference to a +// ComponentRef. This is useful for situations where the protobuf ComponentRef has +// already been deserialized as part of an enclosing message. +func ProtoRefToComponentRef(pRef *persistencespb.ChasmComponentRef) ComponentRef { + return ComponentRef{ + ExecutionKey: ExecutionKey{ + NamespaceID: pRef.NamespaceId, + BusinessID: pRef.BusinessId, + RunID: pRef.RunId, + }, + archetypeID: pRef.ArchetypeId, + executionLastUpdateVT: pRef.ExecutionVersionedTransition, + componentPath: pRef.ComponentPath, + componentInitialVT: pRef.ComponentInitialVersionedTransition, + } +} diff --git a/chasm/ref_test.go b/chasm/ref_test.go new file mode 100644 index 00000000000..0d1242e5bfa --- /dev/null +++ b/chasm/ref_test.go @@ -0,0 +1,116 @@ +package chasm + +import ( + "math/rand" + "reflect" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/testing/protorequire" + "go.uber.org/mock/gomock" +) + +type componentRefSuite struct { + suite.Suite + *require.Assertions + protorequire.ProtoAssertions + + controller *gomock.Controller + + registry *Registry +} + +func TestComponentRefSuite(t *testing.T) { + suite.Run(t, new(componentRefSuite)) +} + +func (s *componentRefSuite) SetupTest() { + // Do this in SetupSubTest() as well, if we have sub tests in this suite. + s.Assertions = require.New(s.T()) + s.ProtoAssertions = protorequire.New(s.T()) + + s.controller = gomock.NewController(s.T()) + + s.registry = NewRegistry(log.NewTestLogger()) + err := s.registry.Register(newTestLibrary(s.controller)) + s.NoError(err) +} + +func (s *componentRefSuite) TestArchetypeID() { + executionKey := ExecutionKey{ + NamespaceID: primitives.NewUUID().String(), + BusinessID: primitives.NewUUID().String(), + RunID: primitives.NewUUID().String(), + } + ref := NewComponentRef[*TestComponent](executionKey) + + archetypeID, err := ref.ArchetypeID(s.registry) + s.NoError(err) + + rc, ok := s.registry.componentOf(reflect.TypeFor[*TestComponent]()) + s.True(ok) + + s.Equal(rc.componentID, archetypeID) +} + +func (s *componentRefSuite) TestNewComponentRefByArchetypeID() { + executionKey := ExecutionKey{ + NamespaceID: primitives.NewUUID().String(), + BusinessID: primitives.NewUUID().String(), + RunID: primitives.NewUUID().String(), + } + expectArchetypeID := WorkflowArchetypeID + ref := NewComponentRefByArchetypeID(executionKey, expectArchetypeID) + + s.Equal(executionKey, ref.ExecutionKey) + + archetypeID, err := ref.ArchetypeID(s.registry) + s.NoError(err) + s.Equal(expectArchetypeID, archetypeID) +} + +func (s *componentRefSuite) TestSerializeDeserialize() { + _, err := DeserializeComponentRef(nil) + s.ErrorIs(err, ErrInvalidComponentRef) + _, err = DeserializeComponentRef([]byte{}) + s.ErrorIs(err, ErrInvalidComponentRef) + + executionKey := ExecutionKey{ + NamespaceID: primitives.NewUUID().String(), + BusinessID: primitives.NewUUID().String(), + RunID: primitives.NewUUID().String(), + } + ref := ComponentRef{ + ExecutionKey: executionKey, + executionGoType: reflect.TypeFor[*TestComponent](), + executionLastUpdateVT: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: rand.Int63(), + TransitionCount: rand.Int63(), + }, + componentPath: []string{primitives.NewUUID().String(), primitives.NewUUID().String()}, + componentInitialVT: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: rand.Int63(), + TransitionCount: rand.Int63(), + }, + } + + serializedRef, err := ref.Serialize(s.registry) + s.NoError(err) + + deserializedRef, err := DeserializeComponentRef(serializedRef) + s.NoError(err) + + s.ProtoEqual(ref.executionLastUpdateVT, deserializedRef.executionLastUpdateVT) + s.ProtoEqual(ref.componentInitialVT, deserializedRef.componentInitialVT) + + rootRc, ok := s.registry.ComponentFor(&TestComponent{}) + s.True(ok) + s.Equal(rootRc.componentID, deserializedRef.archetypeID) + + s.Equal(ref.ExecutionKey, deserializedRef.ExecutionKey) + s.Equal(ref.componentPath, deserializedRef.componentPath) +} diff --git a/chasm/registrable_component.go b/chasm/registrable_component.go new file mode 100644 index 00000000000..15b7f659e41 --- /dev/null +++ b/chasm/registrable_component.go @@ -0,0 +1,231 @@ +package chasm + +import ( + "fmt" + "reflect" + + "github.com/dgryski/go-farm" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/common/searchattribute/sadefs" +) + +type ( + RegistrableComponent struct { + componentType string + goType reflect.Type + + // Following three fields are initialized when the component is registered to a library. + library namer + componentID uint32 + fqn string + + ephemeral bool + singleCluster bool + detached bool + + searchAttributesMapper *VisibilitySearchAttributesMapper + + contextValues map[any]any + } + + RegistrableComponentOption func(*RegistrableComponent) +) + +func NewRegistrableComponent[C Component]( + componentType string, + opts ...RegistrableComponentOption, +) *RegistrableComponent { + rc := &RegistrableComponent{ + componentType: componentType, + goType: reflect.TypeFor[C](), + } + for _, opt := range opts { + opt(rc) + } + return rc +} + +func WithEphemeral() RegistrableComponentOption { + return func(rc *RegistrableComponent) { + rc.ephemeral = true + } +} + +// Is there any use case where we don't want to replicate certain instances of a archetype? +func WithSingleCluster() RegistrableComponentOption { + return func(rc *RegistrableComponent) { + rc.singleCluster = true + } +} + +// WithDetached marks the registrable component as detached. Detached components ignore +// parent lifecycle validation, allowing them to continue operating when their +// parent is closed/terminated. +// If a registrable component is not detached by default, a component definition +// can specify its child as detached via ComponentFieldDetached() option. +func WithDetached() RegistrableComponentOption { + return func(rc *RegistrableComponent) { + rc.detached = true + } +} + +// IsDetached returns true if the component type is registered as detached. +func (rc *RegistrableComponent) IsDetached() bool { + return rc.detached +} + +// WithBusinessIDAlias allows specifying the business ID alias of the component. +// This option must be specified if the archetype uses the Visibility component. +func WithBusinessIDAlias( + alias string, +) RegistrableComponentOption { + return func(rc *RegistrableComponent) { + if rc.searchAttributesMapper == nil { + rc.searchAttributesMapper = &VisibilitySearchAttributesMapper{ + aliasToField: make(map[string]string), + fieldToAlias: make(map[string]string), + saTypeMap: make(map[string]enumspb.IndexedValueType), + systemAliasToField: make(map[string]string), + } + } + if rc.searchAttributesMapper.systemAliasToField == nil { + rc.searchAttributesMapper.systemAliasToField = make(map[string]string) + } + if _, ok := rc.searchAttributesMapper.aliasToField[alias]; ok { + //nolint:forbidigo + panic(fmt.Sprintf("registrable component validation error: business ID alias %q is already defined as a search attribute", alias)) + } + if _, ok := rc.searchAttributesMapper.systemAliasToField[alias]; ok { + //nolint:forbidigo + panic(fmt.Sprintf("registrable component validation error: business ID alias %q is already defined as a system search attribute", alias)) + } + rc.searchAttributesMapper.systemAliasToField[alias] = sadefs.WorkflowID + rc.searchAttributesMapper.fieldToAlias[sadefs.WorkflowID] = alias + rc.searchAttributesMapper.saTypeMap[sadefs.WorkflowID] = enumspb.INDEXED_VALUE_TYPE_KEYWORD + } +} + +func WithSearchAttributes( + searchAttributes ...SearchAttribute, +) RegistrableComponentOption { + return func(rc *RegistrableComponent) { + if len(searchAttributes) == 0 { + return + } + + if rc.searchAttributesMapper == nil { + rc.searchAttributesMapper = &VisibilitySearchAttributesMapper{ + aliasToField: make(map[string]string, len(searchAttributes)), + fieldToAlias: make(map[string]string, len(searchAttributes)), + saTypeMap: make(map[string]enumspb.IndexedValueType, len(searchAttributes)), + systemAliasToField: make(map[string]string), + } + } + + for _, sa := range searchAttributes { + alias := sa.definition().alias + field := sa.definition().field + valueType := sa.definition().valueType + + if sadefs.IsChasmSystem(alias) { + //nolint:forbidigo + panic(fmt.Sprintf("registrable component validation error: CHASM search attribute alias %q is a CHASM system search attribute", alias)) + } + if !sadefs.IsSystem(alias) && sadefs.IsReserved(alias) { + //nolint:forbidigo + panic(fmt.Sprintf("registrable component validation error: CHASM search attribute alias %q is a reserved search attribute", alias)) + } + + if _, ok := rc.searchAttributesMapper.systemAliasToField[alias]; ok { + //nolint:forbidigo + panic(fmt.Sprintf("registrable component validation error: CHASM search attribute alias %q is already defined as a system search attribute alias", alias)) + } + if _, ok := rc.searchAttributesMapper.aliasToField[alias]; ok { + //nolint:forbidigo + panic(fmt.Sprintf("registrable component validation error: search attribute alias %q is already defined", alias)) + } + if _, ok := rc.searchAttributesMapper.fieldToAlias[field]; ok { + //nolint:forbidigo + panic(fmt.Sprintf("registrable component validation error: search attribute field %q is already defined", field)) + } + + rc.searchAttributesMapper.aliasToField[alias] = field + rc.searchAttributesMapper.fieldToAlias[field] = alias + rc.searchAttributesMapper.saTypeMap[field] = valueType + } + } +} + +// WithContextValues allows specifying key-value pairs that will be available in the Context +// via the Value() method whenever the chasm framework starts, updates, reads, polls, executes or +// validates tasks on a component. +// +// This is useful for propagating values needed for those processing logic but are not avaiable via the +// component's struct definition, such as configurations. +// +// Keys need to be globally unique across components. Conflicting keys across will cause component registration to fail. +// +// Manually added key-value pairs via ContextWithValue() will take precedence over registered context values. +func WithContextValues( + keyVals map[any]any, +) RegistrableComponentOption { + return func(rc *RegistrableComponent) { + if rc.contextValues == nil { + rc.contextValues = make(map[any]any, len(keyVals)) + } + for k, v := range keyVals { + rc.contextValues[k] = v + } + } +} + +func (rc *RegistrableComponent) registerToLibrary( + library namer, +) (string, uint32, error) { + if rc.library != nil { + return "", 0, fmt.Errorf("component %s is already registered in library %s", rc.componentType, rc.library.Name()) + } + + rc.library = library + rc.fqn = FullyQualifiedName(rc.library.Name(), rc.componentType) + rc.componentID = GenerateTypeID(rc.fqn) + return rc.fqn, rc.componentID, nil +} + +// SearchAttributesMapper returns the search attributes mapper for this component. +func (rc *RegistrableComponent) SearchAttributesMapper() *VisibilitySearchAttributesMapper { + return rc.searchAttributesMapper +} + +// GenerateTypeID generates a unique 32-bit identifier from a fully qualified name (FQN). +// The generated ID is used to uniquely identify components and tasks within the CHASM framework. The same FQN will +// always produce the same ID. +func GenerateTypeID(fqn string) uint32 { + return farm.Fingerprint32([]byte(fqn)) +} + +// hasBusinessIDAlias returns true if the component has a businessID alias configured +// via WithBusinessIDAlias option. +func (rc *RegistrableComponent) hasBusinessIDAlias() bool { + if rc.searchAttributesMapper == nil { + return false + } + _, ok := rc.searchAttributesMapper.fieldToAlias[sadefs.WorkflowID] + return ok +} + +// GoType returns the reflect.Type of the component's Go struct. +func (rc *RegistrableComponent) GoType() reflect.Type { + return rc.goType +} + +// fqType returns the fully qualified name of the component, which is a combination of +// the library name and the component type. This is used to uniquely identify +// the component in the registry. +func (rc *RegistrableComponent) fqType() string { + if rc.fqn == "" { + // this should never happen because the component is only accessible from the library. + panic("component is not registered to a library") + } + return rc.fqn +} diff --git a/chasm/registrable_task.go b/chasm/registrable_task.go new file mode 100644 index 00000000000..3d99aeb624b --- /dev/null +++ b/chasm/registrable_task.go @@ -0,0 +1,196 @@ +package chasm + +import ( + "context" + "fmt" + "reflect" +) + +type ( + RegistrableTask struct { + taskType string + goType reflect.Type + componentGoType reflect.Type // It is not clear how this one is used. + validateFn validateFn + pureTaskExecuteFn pureTaskExecuteFn + sideEffectTaskExecuteFn sideEffectTaskExecuteFn + sideEffectTaskDiscardFn sideEffectTaskDiscardFn + isPureTask bool + outboundTaskGroup string // For grouping on the outbound queue. See [WithTaskGroup] for details. + + // Those two fields are initialized when the component is registered to a library. + library namer + taskTypeID uint32 + } + + RegistrableTaskOption func(*RegistrableTask) + + validateFn func(Context, any, TaskAttributes, any, *Registry) (bool, error) + pureTaskExecuteFn func(MutableContext, any, TaskAttributes, any, *Registry) error + sideEffectTaskExecuteFn func(context.Context, ComponentRef, TaskAttributes, any) error + sideEffectTaskDiscardFn func(context.Context, ComponentRef, TaskAttributes, any) error +) + +// NewRegistrableSideEffectTask creates a new registrable side-effect task. NOTE: C is not Component but any. +// The handler's Discard method is called on standby clusters when a task has been pending past the discard delay. +func NewRegistrableSideEffectTask[C any, T any]( + taskType string, + handler SideEffectTaskHandler[C, T], + opts ...RegistrableTaskOption, +) *RegistrableTask { + return newRegistrableTask( + taskType, + reflect.TypeFor[T](), + reflect.TypeFor[C](), + func( + ctx Context, + component any, + taskAttrs TaskAttributes, + taskData any, + registry *Registry, + ) (bool, error) { + return handler.Validate( + ctx, + component.(C), + taskAttrs, + taskData.(T), + ) + }, + nil, // pureTaskExecuteFn is not used for side effect tasks + func( + ctx context.Context, + componentRef ComponentRef, + taskAttrs TaskAttributes, + taskData any, + ) error { + return handler.Execute(ctx, componentRef, taskAttrs, taskData.(T)) + }, + false, + func(ctx context.Context, ref ComponentRef, attrs TaskAttributes, task any) error { + return handler.Discard(ctx, ref, attrs, task.(T)) + }, + opts..., + ) +} + +func NewRegistrablePureTask[C any, T any]( + taskType string, + handler PureTaskHandler[C, T], + opts ...RegistrableTaskOption, +) *RegistrableTask { + return newRegistrableTask( + taskType, + reflect.TypeFor[T](), + reflect.TypeFor[C](), + func( + ctx Context, + component any, + taskAttrs TaskAttributes, + taskData any, + registry *Registry, + ) (bool, error) { + return handler.Validate( + ctx, + component.(C), + taskAttrs, + taskData.(T), + ) + }, + func( + ctx MutableContext, + component any, + taskAttrs TaskAttributes, + taskData any, + registry *Registry, + ) error { + return handler.Execute( + ctx, + component.(C), + taskAttrs, + taskData.(T), + ) + }, + nil, // sideEffectTaskExecuteFn is not used for pure tasks + true, + nil, // sideEffectTaskDiscardFn is not used for pure tasks + opts..., + ) +} + +func newRegistrableTask( + taskType string, + goType, componentGoType reflect.Type, + validateFn validateFn, + pureTaskExecuteFn pureTaskExecuteFn, + sideEffectTaskExecuteFn sideEffectTaskExecuteFn, + isPureTask bool, + sideEffectTaskDiscardFn sideEffectTaskDiscardFn, + opts ...RegistrableTaskOption, +) *RegistrableTask { + rt := &RegistrableTask{ + taskType: taskType, + goType: goType, + componentGoType: componentGoType, + validateFn: validateFn, + pureTaskExecuteFn: pureTaskExecuteFn, + sideEffectTaskExecuteFn: sideEffectTaskExecuteFn, + sideEffectTaskDiscardFn: sideEffectTaskDiscardFn, + isPureTask: isPureTask, + } + + for _, opt := range opts { + opt(rt) + } + + return rt +} + +func (rt *RegistrableTask) registerToLibrary( + library namer, +) (string, uint32, error) { + if rt.library != nil { + return "", 0, fmt.Errorf("task %s is already registered in library %s", rt.taskType, rt.library.Name()) + } + + rt.library = library + + fqn := rt.fqType() + rt.taskTypeID = GenerateTypeID(fqn) + // If outboundTaskGroup wasn't set on creation default it here, + // since this is the first place we will have the fqn. + if rt.outboundTaskGroup == "" { + rt.outboundTaskGroup = fqn + } + return fqn, rt.taskTypeID, nil +} + +// TaskGroup returns the side-effect task group for the task. +func (rt *RegistrableTask) TaskGroup() string { + return rt.outboundTaskGroup +} + +// GoType returns the reflect.Type of the task's Go struct. +func (rt *RegistrableTask) GoType() reflect.Type { + return rt.goType +} + +// fqType returns the fully qualified name of the task, which is a combination of +// the library name and the task type. This is used to uniquely identify +// the task in the registry. +func (rt *RegistrableTask) fqType() string { + if rt.library == nil { + // this should never happen because the task is only accessible from the library. + panic("task is not registered to a library") + } + return FullyQualifiedName(rt.library.Name(), rt.taskType) +} + +// WithTaskGroup sets the task group for the task. The task group is used when +// the side effect's destination is specified for grouping semantics on the outbound queue, +// affects multi-cursor and the circuit breaker. +// If task group isn't provided, the task group will default to the fully qualified name at library registration. +func WithTaskGroup(taskgroup string) RegistrableTaskOption { + return func(rt *RegistrableTask) { + rt.outboundTaskGroup = taskgroup + } +} diff --git a/chasm/registry.go b/chasm/registry.go new file mode 100644 index 00000000000..910cdcf4991 --- /dev/null +++ b/chasm/registry.go @@ -0,0 +1,389 @@ +package chasm + +import ( + "errors" + "fmt" + "maps" + "reflect" + "regexp" + "strings" + + "github.com/nexus-rpc/sdk-go/nexus" + "go.temporal.io/server/common/log" + "google.golang.org/grpc" +) + +var ( + // This is golang type identifier regex. + nameValidator = regexp.MustCompile(`^[A-Za-z_][A-Za-z0-9_]*$`) +) + +type ( + Registry struct { + libraries map[string]Library // library name -> library + + // rc stands for RegistrableComponent. + rcByFqn map[string]*RegistrableComponent // fully qualified type name -> component + rcByID map[uint32]*RegistrableComponent // component type ID -> component + rcByGoType map[reflect.Type]*RegistrableComponent // component go type -> component + // rcContextValues is aggregated context values from all components, + // used for easy lookup when Context.Value(key) is called. + // Registration process will check for key conflicts and return error if same key is registered by multiple components. + rcContextValues map[any]valueWithFqn + + // rt stands for RegistrableTask. + rtByFqn map[string]*RegistrableTask // fully qualified type name -> task + rtByID map[uint32]*RegistrableTask // task type ID -> task + rtByGoType map[reflect.Type]*RegistrableTask // task go type -> task + + nexusServices map[string]*nexus.Service // service name -> nexus service + NexusEndpointProcessor *NexusEndpointProcessor + + logger log.Logger + } +) + +// valueWithFqn is a wrapper struct that associates a value with +// the fully qualified name (FQN) of the component that registered it. +type valueWithFqn struct { + v any + fqn string +} + +func NewRegistry(logger log.Logger) *Registry { + return &Registry{ + libraries: make(map[string]Library), + rcByFqn: make(map[string]*RegistrableComponent), + rcByID: make(map[uint32]*RegistrableComponent), + rcByGoType: make(map[reflect.Type]*RegistrableComponent), + rtByFqn: make(map[string]*RegistrableTask), + rtByID: make(map[uint32]*RegistrableTask), + rtByGoType: make(map[reflect.Type]*RegistrableTask), + rcContextValues: make(map[any]valueWithFqn), + nexusServices: make(map[string]*nexus.Service), + NexusEndpointProcessor: NewNexusEndpointProcessor(), + logger: logger, + } +} + +func (r *Registry) Register(lib Library) error { + if err := r.validateName(lib.Name()); err != nil { + return err + } + if _, ok := r.libraries[lib.Name()]; ok { + return fmt.Errorf("library %s is already registered", lib.Name()) + } + r.libraries[lib.Name()] = lib + + for _, c := range lib.Components() { + if err := r.registerComponent(lib, c); err != nil { + return err + } + } + for _, t := range lib.Tasks() { + if err := r.registerTask(lib, t); err != nil { + return err + } + } + + for _, svc := range lib.NexusServices() { + if err := r.registerNexusService(svc); err != nil { + return err + } + } + + for _, svc := range lib.NexusServiceProcessors() { + if err := r.NexusEndpointProcessor.RegisterServiceProcessor(svc); err != nil { + return err + } + } + + return nil +} + +// RegisterServices registers all gRPC services from all registered libraries. +func (r *Registry) RegisterServices(server *grpc.Server) { + for _, lib := range r.libraries { + lib.RegisterServices(server) + } +} + +// ComponentFqnByID converts component type ID to fully qualified component type name. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) ComponentFqnByID(id uint32) (string, bool) { + rc, ok := r.rcByID[id] + if !ok { + return "", false + } + return rc.fqType(), true +} + +// ComponentIDByFqn converts fully qualified component type name to component type ID. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) ComponentIDByFqn(fqn string) (uint32, bool) { + rc, ok := r.rcByFqn[fqn] + if !ok { + return 0, false + } + return rc.componentID, true +} + +// ComponentByID returns the registrable component for a given archetype ID. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) ComponentByID(id uint32) (*RegistrableComponent, bool) { + rc, ok := r.rcByID[id] + return rc, ok +} + +// ComponentIDFor converts registered component instance to component type ID. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) ComponentIDFor(componentInstance any) (uint32, bool) { + rc, ok := r.componentFor(componentInstance) + if !ok { + return 0, false + } + return rc.componentID, true +} + +// TaskByID returns the registrable task for a given task type ID. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) TaskByID(id uint32) (*RegistrableTask, bool) { + rt, ok := r.rtByID[id] + return rt, ok +} + +// TaskFqnByID converts task type ID to fully qualified task type name. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) TaskFqnByID(id uint32) (string, bool) { + rt, ok := r.rtByID[id] + if !ok { + return "", false + } + return rt.fqType(), true +} + +// TaskIDFor converts registered task instance to task type ID. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) TaskIDFor(taskInstance any) (uint32, bool) { + rt, ok := r.taskFor(taskInstance) + if !ok { + return 0, false + } + return rt.taskTypeID, true +} + +// ArchetypeDisplayName returns the human-readable name for a given archetype ID. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) ArchetypeDisplayName(id ArchetypeID) (string, bool) { + rc, ok := r.ComponentByID(id) + if !ok { + return "", false + } + return rc.componentType, true +} + +// ArchetypeIDOf returns the ArchetypeID for the given component Go type. +// This method should only be used by CHASM framework internal code, +// NOT CHASM library developers. +func (r *Registry) ArchetypeIDOf(componentGoType reflect.Type) (ArchetypeID, bool) { + rc, ok := r.rcByGoType[componentGoType] + if !ok { + return UnspecifiedArchetypeID, false + } + return rc.componentID, true +} + +func (r *Registry) component(fqn string) (*RegistrableComponent, bool) { + rc, ok := r.rcByFqn[fqn] + return rc, ok +} + +func (r *Registry) task(fqn string) (*RegistrableTask, bool) { + rt, ok := r.rtByFqn[fqn] + return rt, ok +} + +func (r *Registry) componentFor(componentInstance any) (*RegistrableComponent, bool) { + rc, ok := r.rcByGoType[reflect.TypeOf(componentInstance)] + return rc, ok +} + +func (r *Registry) taskFor(taskInstance any) (*RegistrableTask, bool) { + rt, ok := r.rtByGoType[reflect.TypeOf(taskInstance)] + return rt, ok +} + +func (r *Registry) componentOf(componentGoType reflect.Type) (*RegistrableComponent, bool) { + rc, ok := r.rcByGoType[componentGoType] + return rc, ok +} + +func (r *Registry) taskOf(taskGoType reflect.Type) (*RegistrableTask, bool) { + rt, ok := r.rtByGoType[taskGoType] + return rt, ok +} + +func (r *Registry) registerComponent( + lib namer, + rc *RegistrableComponent, +) error { + if err := r.validate(rc); err != nil { + return err + } + + fqn, id, err := rc.registerToLibrary(lib) + if err != nil { + return err + } + + if _, ok := r.rcByFqn[fqn]; ok { + return fmt.Errorf("component %s is already registered", fqn) + } + + if id == UnspecifiedArchetypeID { + return fmt.Errorf("component %s maps to a reserved archetype id %d, please use a different name", fqn, UnspecifiedArchetypeID) + } + + if existingComponent, ok := r.rcByID[id]; ok { + return fmt.Errorf("component ID %d collision between %s and %s", id, fqn, existingComponent.fqType()) + } + + for key, value := range rc.contextValues { + if existingValue, ok := r.rcContextValues[key]; ok { + return fmt.Errorf("context value key %v registered by component %s conflicts with component %s", key, fqn, existingValue.fqn) + } + r.rcContextValues[key] = valueWithFqn{ + v: value, + fqn: fqn, + } + } + + // rc.goType implements Component interface; therefore, it must be a struct. + // This check to protect against the interface itself being registered. + if !(rc.goType.Kind() == reflect.Struct || + (rc.goType.Kind() == reflect.Ptr && rc.goType.Elem().Kind() == reflect.Struct)) { + return fmt.Errorf("component type %s must be struct or pointer to struct", rc.goType.String()) + } + if _, ok := r.rcByGoType[rc.goType]; ok { + return fmt.Errorf("component type %s is already registered", rc.goType.String()) + } + r.warnUnmanagedFields(fqn, rc) + + r.rcByFqn[fqn] = rc + r.rcByID[id] = rc + r.rcByGoType[rc.goType] = rc + return nil +} + +func (r *Registry) validate(rc *RegistrableComponent) error { + if err := r.validateName(rc.componentType); err != nil { + return err + } + return r.validateVisibilityBusinessIDAlias(rc) +} + +func (r *Registry) registerTask( + lib namer, + rt *RegistrableTask, +) error { + if err := r.validateName(rt.taskType); err != nil { + return err + } + + fqn, id, err := rt.registerToLibrary(lib) + if err != nil { + return err + } + + if _, ok := r.rtByFqn[fqn]; ok { + return fmt.Errorf("task %s is already registered", fqn) + } + + if existingTask, ok := r.rtByID[id]; ok { + return fmt.Errorf("task type ID %d collision between %s and %s", id, fqn, existingTask.fqType()) + } + + if !(rt.goType.Kind() == reflect.Struct || + (rt.goType.Kind() == reflect.Ptr && rt.goType.Elem().Kind() == reflect.Struct)) { + return fmt.Errorf("task type %s must be struct or pointer to struct", rt.goType.String()) + } + if _, ok := r.rtByGoType[rt.goType]; ok { + return fmt.Errorf("task type %s is already registered", rt.goType.String()) + } + if !(rt.componentGoType.Kind() == reflect.Interface || + (rt.componentGoType.Kind() == reflect.Struct || + (rt.componentGoType.Kind() == reflect.Ptr && rt.componentGoType.Elem().Kind() == reflect.Struct)) && + rt.componentGoType.AssignableTo(reflect.TypeOf((*Component)(nil)).Elem())) { + return fmt.Errorf("component type %s must be and interface or struct that implements Component interface", rt.componentGoType.String()) + } + + r.rtByFqn[fqn] = rt + r.rtByID[id] = rt + r.rtByGoType[rt.goType] = rt + return nil +} + +func (r *Registry) validateName(n string) error { + if n == "" { + return errors.New("name must not be empty") + } + if !nameValidator.MatchString(n) { + return fmt.Errorf("name %s is invalid. name must follow golang identifier rules: %s", n, nameValidator.String()) + } + return nil +} + +func (r *Registry) validateVisibilityBusinessIDAlias(rc *RegistrableComponent) error { + if !hasVisibilityField(rc.goType) { + return nil + } + // Archetypes that contain a Field[*Visibility] must specify WithBusinessIDAlias. + if !rc.hasBusinessIDAlias() { + return fmt.Errorf("component %s has Field[*Visibility] but no businessID alias; use WithBusinessIDAlias option", rc.componentType) + } + return nil +} + +func (r *Registry) warnUnmanagedFields(fqn string, rc *RegistrableComponent) { + var unmanagedFields []string + for f := range unmanagedFieldsOf(rc.goType) { + unmanagedFields = append(unmanagedFields, fmt.Sprintf("%s %s", f.name, f.typ)) + } + if len(unmanagedFields) > 0 { + r.logger.Info(fmt.Sprintf( + "Warning: CHASM component %s declares state fields that won't be managed by CHASM:\n\t%s", + fqn, + strings.Join(unmanagedFields, "\n\t"))) + } +} + +func (r *Registry) registerNexusService(svc *nexus.Service) error { + if _, ok := r.nexusServices[svc.Name]; ok { + return fmt.Errorf("nexus service %s is already registered", svc.Name) + } + r.nexusServices[svc.Name] = svc + return nil +} + +// NexusServices returns all registered Nexus services. +func (r *Registry) NexusServices() map[string]*nexus.Service { + // Return a copy to prevent external modification + services := make(map[string]*nexus.Service, len(r.nexusServices)) + maps.Copy(services, r.nexusServices) + return services +} + +func (r *Registry) componentContextValue(key any) any { + if v, ok := r.rcContextValues[key]; ok { + return v.v + } + return nil +} diff --git a/chasm/registry_test.go b/chasm/registry_test.go new file mode 100644 index 00000000000..8429f03eb17 --- /dev/null +++ b/chasm/registry_test.go @@ -0,0 +1,534 @@ +package chasm_test + +import ( + "reflect" + "testing" + + "github.com/nexus-rpc/sdk-go/nexus" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.temporal.io/server/chasm" + "go.temporal.io/server/common/log" + "go.uber.org/mock/gomock" +) + +type ( + RegistryTestSuite struct { + suite.Suite + logger log.Logger + } + + testTask1 struct{} + testTask2 struct{} + testTaskComponentInterface interface { + DoSomething() + } + + // testComponentWithVisibility is a test component that has a Visibility field. + testComponentWithVisibility struct { + chasm.UnimplementedComponent + Visibility chasm.Field[*chasm.Visibility] + } +) + +func (t *testComponentWithVisibility) LifecycleState(_ chasm.Context) chasm.LifecycleState { + return chasm.LifecycleStateRunning +} + +func TestRegistryTestSuite(t *testing.T) { + suite.Run(t, new(RegistryTestSuite)) +} + +func (s *RegistryTestSuite) SetupTest() { + s.logger = log.NewTestLogger() +} + +func (s *RegistryTestSuite) TestRegistry_RegisterComponents_Success() { + r := chasm.NewRegistry(s.logger) + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + lib.EXPECT().Name().Return("TestLibrary").AnyTimes() + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*chasm.MockComponent]("Component1"), + }) + + lib.EXPECT().Tasks().Return(nil) + lib.EXPECT().NexusServices().Return(nil) + lib.EXPECT().NexusServiceProcessors().Return(nil) + + err := r.Register(lib) + require.NoError(s.T(), err) + + rc1, ok := r.Component("TestLibrary.Component1") + require.True(s.T(), ok) + require.Equal(s.T(), "TestLibrary.Component1", rc1.FqType()) + + missingRC, ok := r.Component("TestLibrary.Component2") + require.False(s.T(), ok) + require.Nil(s.T(), missingRC) + + cInstance1 := chasm.NewMockComponent(ctrl) + rc2, ok := r.ComponentFor(cInstance1) + require.True(s.T(), ok) + require.Equal(s.T(), "TestLibrary.Component1", rc2.FqType()) + + rc2, ok = r.ComponentOf(reflect.TypeOf(cInstance1)) + require.True(s.T(), ok) + require.Equal(s.T(), "TestLibrary.Component1", rc2.FqType()) + + cInstance2 := "invalid component instance" + rc3, ok := r.ComponentFor(cInstance2) + require.False(s.T(), ok) + require.Nil(s.T(), rc3) +} + +func (s *RegistryTestSuite) TestRegistry_RegisterComponents_WithDetached() { + r := chasm.NewRegistry(s.logger) + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + lib.EXPECT().Name().Return("TestLibrary").AnyTimes() + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*chasm.MockComponent]("DetachedComponent", chasm.WithDetached()), + }) + lib.EXPECT().Tasks().Return(nil) + lib.EXPECT().NexusServices().Return(nil) + lib.EXPECT().NexusServiceProcessors().Return(nil) + + err := r.Register(lib) + s.Require().NoError(err) + + // Detached component should have IsDetached() return true + detachedRC, ok := r.Component("TestLibrary.DetachedComponent") + s.Require().True(ok) + s.Require().True(detachedRC.IsDetached()) + + // Verify that a component without WithDetached() has IsDetached() return false + normalRC := chasm.NewRegistrableComponent[*chasm.MockComponent]("NormalComponent") + s.Require().False(normalRC.IsDetached()) +} + +func (s *RegistryTestSuite) TestRegistry_RegisterTasks_Success() { + r := chasm.NewRegistry(s.logger) + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + lib.EXPECT().Name().Return("TestLibrary").AnyTimes() + lib.EXPECT().Components().Return(nil) + lib.EXPECT().NexusServices().Return(nil) + lib.EXPECT().NexusServiceProcessors().Return(nil) + + lib.EXPECT().Tasks().Return([]*chasm.RegistrableTask{ + chasm.NewRegistrableSideEffectTask( + "Task1", + chasm.NewMockSideEffectTaskHandler[*chasm.MockComponent, testTask1](ctrl), + chasm.WithTaskGroup("test-task-group"), + ), + chasm.NewRegistrablePureTask( + "Task2", + chasm.NewMockPureTaskHandler[testTaskComponentInterface, testTask2](ctrl), + ), + }) + + err := r.Register(lib) + require.NoError(s.T(), err) + + rt1, ok := r.Task("TestLibrary.Task1") + require.True(s.T(), ok) + require.Equal(s.T(), "TestLibrary.Task1", rt1.FqType()) + s.Require().Equal("test-task-group", rt1.TaskGroup()) + + missingRT, ok := r.Task("TestLibrary.TaskMissing") + require.False(s.T(), ok) + require.Nil(s.T(), missingRT) + + tInstance1 := testTask2{} + rt2, ok := r.TaskFor(tInstance1) + require.True(s.T(), ok) + require.Equal(s.T(), "TestLibrary.Task2", rt2.FqType()) + s.Require().Equal(rt2.FqType(), rt2.TaskGroup()) + + rt2, ok = r.TaskOf(reflect.TypeOf(tInstance1)) + require.True(s.T(), ok) + require.Equal(s.T(), "TestLibrary.Task2", rt2.FqType()) + + tInstance2 := "invalid task instance" + rt3, ok := r.TaskFor(tInstance2) + require.False(s.T(), ok) + require.Nil(s.T(), rt3) +} + +func (s *RegistryTestSuite) TestRegistry_Register_LibraryError() { + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + + s.T().Run("library name must not be empty", func(t *testing.T) { + lib.EXPECT().Name().Return("") + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "name must not be empty") + }) + + s.T().Run("library name must follow rules", func(t *testing.T) { + lib.EXPECT().Name().Return("bad.lib.name") + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "name must follow golang identifier rules") + }) +} + +func (s *RegistryTestSuite) TestRegistry_RegisterComponents_Error() { + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + lib.EXPECT().Name().Return("TestLibrary").AnyTimes() + + s.T().Run("component name must not be empty", func(t *testing.T) { + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*chasm.MockComponent](""), + }) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "name must not be empty") + }) + + s.T().Run("component name must follow rules", func(t *testing.T) { + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*chasm.MockComponent]("bad.component.name"), + }) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "name must follow golang identifier rules") + }) + + s.T().Run("component is already registered by name", func(t *testing.T) { + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*chasm.MockComponent]("Component1"), + chasm.NewRegistrableComponent[*chasm.MockComponent]("Component1"), + }) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "is already registered") + }) + + s.T().Run("component is already registered by type", func(t *testing.T) { + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*chasm.MockComponent]("Component1"), + chasm.NewRegistrableComponent[*chasm.MockComponent]("Component2"), + }) + r := chasm.NewRegistry(s.logger) + + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "is already registered") + }) + + s.T().Run("component is already registered in another library", func(t *testing.T) { + lib2 := chasm.NewMockLibrary(ctrl) + lib2.EXPECT().Name().Return("TestLibrary2").AnyTimes() + + component := chasm.NewRegistrableComponent[*chasm.MockComponent]("Component1") + lib2.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + component, + }) + lib2.EXPECT().Tasks().Return(nil) + lib2.EXPECT().NexusServices().Return(nil) + lib2.EXPECT().NexusServiceProcessors().Return(nil) + r2 := chasm.NewRegistry(s.logger) + err := r2.Register(lib2) + require.NoError(t, err) + + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + component, + }) + r := chasm.NewRegistry(s.logger) + + err = r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "is already registered in library TestLibrary2") + }) + + s.T().Run("component must be a struct", func(t *testing.T) { + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[chasm.Component]("Component1"), + }) + r := chasm.NewRegistry(s.logger) + + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "must be struct or pointer to struct") + }) + + s.Run("duplicate search attribute alias panics", func() { + s.Require().PanicsWithValue("registrable component validation error: search attribute alias \"MyAlias\" is already defined", + func() { + chasm.NewRegistrableComponent[*chasm.MockComponent]( + "Component1", + chasm.WithSearchAttributes( + chasm.NewSearchAttributeBool("MyAlias", chasm.SearchAttributeFieldBool01), + chasm.NewSearchAttributeInt("MyAlias", chasm.SearchAttributeFieldInt01), + ), + ) + }, + ) + }) + + s.Run("duplicate search attribute field panics", func() { + s.Require().PanicsWithValue("registrable component validation error: search attribute field \"TemporalBool01\" is already defined", + func() { + chasm.NewRegistrableComponent[*chasm.MockComponent]( + "Component1", + chasm.WithSearchAttributes( + chasm.NewSearchAttributeBool("Alias1", chasm.SearchAttributeFieldBool01), + chasm.NewSearchAttributeBool("Alias2", chasm.SearchAttributeFieldBool01), + ), + ) + }, + ) + }) + + s.Run("valid search attributes do not panic", func() { + s.Require().NotPanics(func() { + chasm.NewRegistrableComponent[*chasm.MockComponent]( + "Component1", + chasm.WithSearchAttributes( + chasm.NewSearchAttributeBool("Completed", chasm.SearchAttributeFieldBool01), + chasm.NewSearchAttributeInt("Count", chasm.SearchAttributeFieldInt01), + chasm.NewSearchAttributeKeyword("Status", chasm.SearchAttributeFieldKeyword01), + ), + ) + }) + }) + + s.Run("ExecutionStatus alias is allowed for CHASM components", func() { + s.Require().NotPanics(func() { + chasm.NewRegistrableComponent[*chasm.MockComponent]( + "Component1", + chasm.WithSearchAttributes( + chasm.NewSearchAttributeKeyword("ExecutionStatus", chasm.SearchAttributeFieldLowCardinalityKeyword01), + ), + ) + }) + }) + + s.Run("TaskQueue preallocated search attribute is allowed", func() { + s.Require().NotPanics(func() { + chasm.NewRegistrableComponent[*chasm.MockComponent]( + "Component1", + chasm.WithSearchAttributes( + chasm.SearchAttributeTaskQueue, + ), + ) + }) + }) + + s.Run("CHASM system search attribute alias panics", func() { + s.Require().PanicsWithValue( + "registrable component validation error: CHASM search attribute alias \"WorkflowId\" is a CHASM system search attribute", + func() { + chasm.NewRegistrableComponent[*chasm.MockComponent]( + "Component1", + chasm.WithSearchAttributes( + chasm.NewSearchAttributeKeyword("WorkflowId", chasm.SearchAttributeFieldKeyword01), + ), + ) + }, + ) + }) + + s.Run("component with Visibility field must have businessID alias", func() { + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*testComponentWithVisibility]("ComponentWithVis"), + }) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + s.Require().Error(err) + s.Require().Contains(err.Error(), "has Field[*Visibility] but no businessID alias") + }) + + s.Run("component with Visibility field and businessID alias succeeds", func() { + lib.EXPECT().Components().Return([]*chasm.RegistrableComponent{ + chasm.NewRegistrableComponent[*testComponentWithVisibility]( + "ComponentWithVis", + chasm.WithBusinessIDAlias("MyBusinessId"), + ), + }) + lib.EXPECT().Tasks().Return(nil) + lib.EXPECT().NexusServices().Return(nil) + lib.EXPECT().NexusServiceProcessors().Return(nil) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + s.Require().NoError(err) + }) + +} + +func (s *RegistryTestSuite) TestRegistry_RegisterTasks_Error() { + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + lib.EXPECT().Name().Return("TestLibrary").AnyTimes() + lib.EXPECT().Components().Return(nil).AnyTimes() + + s.T().Run("task name must not be empty", func(t *testing.T) { + r := chasm.NewRegistry(s.logger) + lib.EXPECT().Tasks().Return([]*chasm.RegistrableTask{ + chasm.NewRegistrablePureTask[*chasm.MockComponent, testTask1]( + "", + chasm.NewMockPureTaskHandler[*chasm.MockComponent, testTask1](ctrl), + ), + }) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "name must not be empty") + }) + + s.T().Run("task name must follow rules", func(t *testing.T) { + lib.EXPECT().Tasks().Return([]*chasm.RegistrableTask{ + chasm.NewRegistrablePureTask[*chasm.MockComponent, testTask1]( + "bad.task.name", + chasm.NewMockPureTaskHandler[*chasm.MockComponent, testTask1](ctrl), + ), + }) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "name must follow golang identifier rules") + }) + + s.T().Run("task is already registered by name", func(t *testing.T) { + lib.EXPECT().Tasks().Return([]*chasm.RegistrableTask{ + chasm.NewRegistrablePureTask[*chasm.MockComponent, testTask1]( + "Task1", + chasm.NewMockPureTaskHandler[*chasm.MockComponent, testTask1](ctrl), + ), + chasm.NewRegistrableSideEffectTask[*chasm.MockComponent, testTask1]( + "Task1", + chasm.NewMockSideEffectTaskHandler[*chasm.MockComponent, testTask1](ctrl), + ), + }) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "is already registered") + }) + + s.T().Run("task is already registered by type", func(t *testing.T) { + lib.EXPECT().Tasks().Return([]*chasm.RegistrableTask{ + chasm.NewRegistrablePureTask[*chasm.MockComponent, testTask1]( + "Task1", + chasm.NewMockPureTaskHandler[*chasm.MockComponent, testTask1](ctrl), + ), + chasm.NewRegistrablePureTask[*chasm.MockComponent, testTask1]( + "Task2", + chasm.NewMockPureTaskHandler[*chasm.MockComponent, testTask1](ctrl), + ), + }) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + require.Error(t, err) + require.Contains(t, err.Error(), "is already registered") + }) + + s.Run("task is already registered in another library", func() { + lib2 := chasm.NewMockLibrary(ctrl) + lib2.EXPECT().Name().Return("TestLibrary2").AnyTimes() + + lib2.EXPECT().Components().Return(nil) + lib2.EXPECT().NexusServices().Return(nil) + lib2.EXPECT().NexusServiceProcessors().Return(nil) + task := chasm.NewRegistrablePureTask[*chasm.MockComponent, testTask1]( + "Task1", + chasm.NewMockPureTaskHandler[*chasm.MockComponent, testTask1](ctrl), + ) + lib2.EXPECT().Tasks().Return([]*chasm.RegistrableTask{task}) + r2 := chasm.NewRegistry(s.logger) + err := r2.Register(lib2) + s.Require().NoError(err) + + lib.EXPECT().Tasks().Return([]*chasm.RegistrableTask{task}) + r := chasm.NewRegistry(s.logger) + + err = r.Register(lib) + s.ErrorContains(err, "is already registered in library TestLibrary2") + }) + + s.Run("task must be struct", func() { + lib.EXPECT().Tasks().Return([]*chasm.RegistrableTask{ + chasm.NewRegistrablePureTask[*chasm.MockComponent, string]( + "Task1", + chasm.NewMockPureTaskHandler[*chasm.MockComponent, string](ctrl), + ), + }) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + s.ErrorContains(err, "must be struct or pointer to struct") + }) +} + +func (s *RegistryTestSuite) TestRegistry_RegisterNexusServices_Success() { + r := chasm.NewRegistry(s.logger) + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + lib.EXPECT().Name().Return("TestLibrary").AnyTimes() + lib.EXPECT().Components().Return(nil) + lib.EXPECT().Tasks().Return(nil) + lib.EXPECT().NexusServiceProcessors().Return(nil) + + svc1 := nexus.NewService("Service1") + svc2 := nexus.NewService("Service2") + lib.EXPECT().NexusServices().Return([]*nexus.Service{svc1, svc2}) + + err := r.Register(lib) + s.Require().NoError(err) + + services := r.NexusServices() + s.Require().Len(services, 2) + s.Require().Contains(services, "Service1") + s.Require().Contains(services, "Service2") + s.Require().Equal(svc1, services["Service1"]) + s.Require().Equal(svc2, services["Service2"]) +} + +func (s *RegistryTestSuite) TestRegistry_RegisterNexusServices_Error() { + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + lib.EXPECT().Name().Return("TestLibrary").AnyTimes() + lib.EXPECT().Components().Return(nil).AnyTimes() + lib.EXPECT().Tasks().Return(nil).AnyTimes() + lib.EXPECT().NexusServiceProcessors().Return(nil).AnyTimes() + + s.Run("nexus service is already registered", func() { + svc := nexus.NewService("Service1") + lib.EXPECT().NexusServices().Return([]*nexus.Service{svc, svc}) + r := chasm.NewRegistry(s.logger) + err := r.Register(lib) + s.Require().ErrorContains(err, "is already registered") + }) +} + +func (s *RegistryTestSuite) TestRegistry_RegisterNexusServiceProcessors() { + r := chasm.NewRegistry(s.logger) + ctrl := gomock.NewController(s.T()) + lib := chasm.NewMockLibrary(ctrl) + lib.EXPECT().Name().Return("TestLibrary").AnyTimes() + lib.EXPECT().Components().Return(nil) + lib.EXPECT().Tasks().Return(nil) + lib.EXPECT().NexusServices().Return(nil) + + proc1 := chasm.NewNexusServiceProcessor("ServiceProcessor1") + proc2 := chasm.NewNexusServiceProcessor("ServiceProcessor2") + lib.EXPECT().NexusServiceProcessors().Return([]*chasm.NexusServiceProcessor{proc1, proc2}) + + err := r.Register(lib) + s.Require().NoError(err) + + // Verify the processors were registered by attempting to use them + // We can verify registration indirectly by trying to register them again which should fail + err = r.NexusEndpointProcessor.RegisterServiceProcessor(proc1) + s.Require().ErrorContains(err, "already registered") + + err = r.NexusEndpointProcessor.RegisterServiceProcessor(proc2) + s.Require().ErrorContains(err, "already registered") +} diff --git a/chasm/scheduler.go b/chasm/scheduler.go new file mode 100644 index 00000000000..d4d0c1f5683 --- /dev/null +++ b/chasm/scheduler.go @@ -0,0 +1,14 @@ +package chasm + +// This file defines constants for Scheduler which is special to the CHASM framework +// because it shares the same ID space with Workflow for backwards compatibility reasons. + +const ( + SchedulerLibraryName = "scheduler" + SchedulerComponentName = "scheduler" +) + +var ( + SchedulerArchetype = Archetype(FullyQualifiedName(SchedulerLibraryName, SchedulerComponentName)) + SchedulerArchetypeID = ArchetypeID(GenerateTypeID(SchedulerArchetype)) +) diff --git a/chasm/search_attribute.go b/chasm/search_attribute.go new file mode 100644 index 00000000000..db2e966b9b1 --- /dev/null +++ b/chasm/search_attribute.go @@ -0,0 +1,469 @@ +package chasm + +import ( + "fmt" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/common/searchattribute/sadefs" +) + +// CHASM Search Attribute User Guide: +// +// This contains CHASM search attribute field constants. These predefined fields correspond to the exact column name in Visibility storage. +// For each root component, search attributes can be mapped from a user defined alias to these fields. +// Each component must register its search attributes with the CHASM Registry. +// +// To define a CHASM search attribute, create this as a package/global scoped variable. Below is an example: +// var testComponentCompletedSearchAttribute = NewSearchAttributeBool("Completed", SearchAttributeFieldBool01) +// var testComponentFailedSearchAttribute = NewSearchAttributeBool("Failed", SearchAttributeFieldBool02) +// var testComponentStartTimeSearchAttribute = NewSearchAttributeTime("StartTime", SearchAttributeFieldDateTime01) +// var testComponentCategorySearchAttribute = NewSearchAttributeLowCardinalityKeyword("Category", SearchAttributeFieldLowCardinalityKeyword01) +// +// Each CHASM search attribute field is associated with a specific indexed value type. The Value() method of a search attribute +// specifies the supported value type to set at compile time. eg. DateTime values must be set with a time.Time typed value. +// +// Low Cardinality Keyword Fields: used for categorical data that support GROUP BY aggregations. Values must be limited to a small number of dimensions. +// +// Each root component can only use a predefined search attribute field once. Developers should not reassign aliases to different fields. +// Reassiging aliases to different fields will result in incorrect visibility query results. +// +// To register these search attributes with the CHASM Registry, use the WithSearchAttributes() option when creating the component in the library. +// eg. +// NewRegistrableComponent[T]("testcomponent", WithSearchAttributes(testComponentCompletedSearchAttribute, testComponentStartTimeSearchAttribute)) +var ( + SearchAttributeFieldBool01 = newSearchAttributeFieldBool(1) + SearchAttributeFieldBool02 = newSearchAttributeFieldBool(2) + + SearchAttributeFieldDateTime01 = newSearchAttributeFieldDateTime(1) + SearchAttributeFieldDateTime02 = newSearchAttributeFieldDateTime(2) + + SearchAttributeFieldInt01 = newSearchAttributeFieldInt(1) + SearchAttributeFieldInt02 = newSearchAttributeFieldInt(2) + + SearchAttributeFieldDouble01 = newSearchAttributeFieldDouble(1) + SearchAttributeFieldDouble02 = newSearchAttributeFieldDouble(2) + + SearchAttributeFieldKeyword01 = newSearchAttributeFieldKeyword(1) + SearchAttributeFieldKeyword02 = newSearchAttributeFieldKeyword(2) + SearchAttributeFieldKeyword03 = newSearchAttributeFieldKeyword(3) + SearchAttributeFieldKeyword04 = newSearchAttributeFieldKeyword(4) + + // SearchAttributeFieldLowCardinalityKeyword is a search attribute field for a low cardinality keyword value. + // Used for categorical data that support GROUP BY aggregations, eg. CHASM Execution Statuses. + SearchAttributeFieldLowCardinalityKeyword01 = newSearchAttributeFieldLowCardinalityKeyword(1) + + SearchAttributeFieldKeywordList01 = newSearchAttributeFieldKeywordList(1) + SearchAttributeFieldKeywordList02 = newSearchAttributeFieldKeywordList(2) + + SearchAttributeTaskQueue = newSearchAttributeKeywordByField(sadefs.TaskQueue) + SearchAttributeTemporalChangeVersion = newSearchAttributeKeywordListByField(sadefs.TemporalChangeVersion) + SearchAttributeBinaryChecksums = newSearchAttributeKeywordListByField(sadefs.BinaryChecksums) + SearchAttributeBuildIds = newSearchAttributeKeywordListByField(sadefs.BuildIds) + SearchAttributeBatcherNamespace = newSearchAttributeKeywordByField(sadefs.BatcherNamespace) + SearchAttributeBatcherUser = newSearchAttributeKeywordByField(sadefs.BatcherUser) + SearchAttributeTemporalScheduledStartTime = newSearchAttributeDateTimeByField(sadefs.TemporalScheduledStartTime) + SearchAttributeTemporalScheduledByID = newSearchAttributeKeywordByField(sadefs.TemporalScheduledById) + SearchAttributeTemporalSchedulePaused = newSearchAttributeBoolByField(sadefs.TemporalSchedulePaused) + SearchAttributeTemporalNamespaceDivision = newSearchAttributeKeywordByField(sadefs.TemporalNamespaceDivision) + SearchAttributeTemporalPauseInfo = newSearchAttributeKeywordListByField(sadefs.TemporalPauseInfo) + SearchAttributeTemporalReportedProblems = newSearchAttributeKeywordListByField(sadefs.TemporalReportedProblems) + SearchAttributeTemporalWorkerDeploymentVersion = newSearchAttributeKeywordByField(sadefs.TemporalWorkerDeploymentVersion) + SearchAttributeTemporalWorkflowVersioningBehavior = newSearchAttributeKeywordByField(sadefs.TemporalWorkflowVersioningBehavior) + SearchAttributeTemporalWorkerDeployment = newSearchAttributeKeywordByField(sadefs.TemporalWorkerDeployment) + SearchAttributeTemporalUsedWorkerDeploymentVersions = newSearchAttributeKeywordListByField(sadefs.TemporalUsedWorkerDeploymentVersions) +) + +var ( + // CHASM search attribute of type Text is not supported at this moment. + // Note that it's currently assumed that string type values are Keyword search attributes. + _ SearchAttribute = (*SearchAttributeBool)(nil) + _ SearchAttribute = (*SearchAttributeDateTime)(nil) + _ SearchAttribute = (*SearchAttributeInt)(nil) + _ SearchAttribute = (*SearchAttributeDouble)(nil) + _ SearchAttribute = (*SearchAttributeKeyword)(nil) + _ SearchAttribute = (*SearchAttributeKeywordList)(nil) + + _ typedSearchAttribute[bool] = (*SearchAttributeBool)(nil) + _ typedSearchAttribute[time.Time] = (*SearchAttributeDateTime)(nil) + _ typedSearchAttribute[int64] = (*SearchAttributeInt)(nil) + _ typedSearchAttribute[float64] = (*SearchAttributeDouble)(nil) + _ typedSearchAttribute[string] = (*SearchAttributeKeyword)(nil) + _ typedSearchAttribute[[]string] = (*SearchAttributeKeywordList)(nil) +) + +type ( + // SearchAttribute is a shared interface for all search attribute types. Each type must embed searchAttributeDefinition. + SearchAttribute interface { + definition() searchAttributeDefinition + } + + typedSearchAttribute[T any] interface { + SearchAttribute + typeMarker(T) + } + + searchAttributeDefinition struct { + alias string + field string + valueType enumspb.IndexedValueType + } + + // SearchAttributeKeyValue is a key value pair of a search attribute. + // Represents the current value of a search attribute in a CHASM Component during a transaction. + SearchAttributeKeyValue struct { + // Alias refers to the user defined name of the search attribute + Alias string + // Field refers to a fully formed schema field, which is a Predefined CHASM search attribute + Field string + // Value refers to the current value of the search attribute. Must support encoding to a Payload. + Value VisibilityValue + } +) + +// SearchAttributeFieldBool is a search attribute field for a boolean value. +type SearchAttributeFieldBool struct { + field string +} + +func newSearchAttributeFieldBool(index int) SearchAttributeFieldBool { + return SearchAttributeFieldBool{ + field: resolveFieldName(enumspb.INDEXED_VALUE_TYPE_BOOL, index), + } +} + +// SearchAttributeFieldDateTime is a search attribute field for a datetime value. +type SearchAttributeFieldDateTime struct { + field string +} + +func newSearchAttributeFieldDateTime(index int) SearchAttributeFieldDateTime { + return SearchAttributeFieldDateTime{ + field: resolveFieldName(enumspb.INDEXED_VALUE_TYPE_DATETIME, index), + } +} + +// SearchAttributeFieldInt is a search attribute field for an integer value. +type SearchAttributeFieldInt struct { + field string +} + +func newSearchAttributeFieldInt(index int) SearchAttributeFieldInt { + return SearchAttributeFieldInt{ + field: resolveFieldName(enumspb.INDEXED_VALUE_TYPE_INT, index), + } +} + +// SearchAttributeFieldDouble is a search attribute field for a double value. +type SearchAttributeFieldDouble struct { + field string +} + +func newSearchAttributeFieldDouble(index int) SearchAttributeFieldDouble { + return SearchAttributeFieldDouble{ + field: resolveFieldName(enumspb.INDEXED_VALUE_TYPE_DOUBLE, index), + } +} + +// SearchAttributeFieldKeyword is a search attribute field for a keyword value. +type SearchAttributeFieldKeyword struct { + field string +} + +func newSearchAttributeFieldKeyword(index int) SearchAttributeFieldKeyword { + return SearchAttributeFieldKeyword{ + field: resolveFieldName(enumspb.INDEXED_VALUE_TYPE_KEYWORD, index), + } +} + +func newSearchAttributeFieldLowCardinalityKeyword(index int) SearchAttributeFieldKeyword { + return SearchAttributeFieldKeyword{ + field: fmt.Sprintf("%s%s%02d", sadefs.ReservedPrefix, "LowCardinalityKeyword", index), + } +} + +// SearchAttributeFieldKeywordList is a search attribute field for a keyword list value. +type SearchAttributeFieldKeywordList struct { + field string +} + +func newSearchAttributeFieldKeywordList(index int) SearchAttributeFieldKeywordList { + return SearchAttributeFieldKeywordList{ + field: resolveFieldName(enumspb.INDEXED_VALUE_TYPE_KEYWORD_LIST, index), + } +} + +func resolveFieldName(valueType enumspb.IndexedValueType, index int) string { + // Columns are named like TemporalBool01, TemporalDatetime01, TemporalDouble01, TemporalInt01. + return fmt.Sprintf("%s%s%02d", sadefs.ReservedPrefix, valueType.String(), index) +} + +func (s searchAttributeDefinition) definition() searchAttributeDefinition { + return s +} + +// SearchAttributeBool is a search attribute for a boolean value. +type SearchAttributeBool struct { + searchAttributeDefinition +} + +// NewSearchAttributeBool creates a new boolean search attribute given a predefined chasm field +func NewSearchAttributeBool(alias string, boolField SearchAttributeFieldBool) SearchAttributeBool { + return SearchAttributeBool{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: alias, + field: boolField.field, + valueType: enumspb.INDEXED_VALUE_TYPE_BOOL, + }, + } +} + +func newSearchAttributeBoolByField(field string) SearchAttributeBool { + return SearchAttributeBool{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: field, + field: field, + valueType: enumspb.INDEXED_VALUE_TYPE_BOOL, + }, + } +} + +// Value sets the boolean value of the search attribute. +func (s SearchAttributeBool) Value(value bool) SearchAttributeKeyValue { + return SearchAttributeKeyValue{ + Alias: s.alias, + Field: s.field, + Value: VisibilityValueBool(value), + } +} + +func (s SearchAttributeBool) typeMarker(_ bool) {} + +// SearchAttributeDateTime is a search attribute for a datetime value. +type SearchAttributeDateTime struct { + searchAttributeDefinition +} + +// NewSearchAttributeDateTime creates a new date time search attribute given a predefined chasm field +func NewSearchAttributeDateTime(alias string, datetimeField SearchAttributeFieldDateTime) SearchAttributeDateTime { + return SearchAttributeDateTime{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: alias, + field: datetimeField.field, + valueType: enumspb.INDEXED_VALUE_TYPE_DATETIME, + }, + } +} + +func newSearchAttributeDateTimeByField(field string) SearchAttributeDateTime { + return SearchAttributeDateTime{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: field, + field: field, + valueType: enumspb.INDEXED_VALUE_TYPE_DATETIME, + }, + } +} + +// Value sets the date time value of the search attribute. +func (s SearchAttributeDateTime) Value(value time.Time) SearchAttributeKeyValue { + return SearchAttributeKeyValue{ + Alias: s.alias, + Field: s.field, + Value: VisibilityValueTime(value), + } +} + +func (s SearchAttributeDateTime) typeMarker(_ time.Time) {} + +// SearchAttributeInt is a search attribute for an integer value. +type SearchAttributeInt struct { + searchAttributeDefinition +} + +// NewSearchAttributeInt creates a new integer search attribute given a predefined chasm field +func NewSearchAttributeInt(alias string, intField SearchAttributeFieldInt) SearchAttributeInt { + return SearchAttributeInt{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: alias, + field: intField.field, + valueType: enumspb.INDEXED_VALUE_TYPE_INT, + }, + } +} + +// Value sets the integer value of the search attribute. +func (s SearchAttributeInt) Value(value int64) SearchAttributeKeyValue { + return SearchAttributeKeyValue{ + Alias: s.alias, + Field: s.field, + Value: VisibilityValueInt64(value), + } +} + +func (s SearchAttributeInt) typeMarker(_ int64) {} + +// SearchAttributeDouble is a search attribute for a double value. +type SearchAttributeDouble struct { + searchAttributeDefinition +} + +// NewSearchAttributeDouble creates a new double search attribute given a predefined chasm field +func NewSearchAttributeDouble(alias string, doubleField SearchAttributeFieldDouble) SearchAttributeDouble { + return SearchAttributeDouble{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: alias, + field: doubleField.field, + valueType: enumspb.INDEXED_VALUE_TYPE_DOUBLE, + }, + } +} + +func newSearchAttributeDoubleByField(field string) SearchAttributeDouble { + return SearchAttributeDouble{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: field, + field: field, + valueType: enumspb.INDEXED_VALUE_TYPE_DOUBLE, + }, + } +} + +// Value sets the double value of the search attribute. +func (s SearchAttributeDouble) Value(value float64) SearchAttributeKeyValue { + return SearchAttributeKeyValue{ + Alias: s.alias, + Field: s.field, + Value: VisibilityValueFloat64(value), + } +} + +func (s SearchAttributeDouble) typeMarker(_ float64) {} + +// SearchAttributeKeyword is a search attribute for a keyword value. +type SearchAttributeKeyword struct { + searchAttributeDefinition +} + +// NewSearchAttributeKeyword creates a new keyword search attribute given a predefined chasm field +func NewSearchAttributeKeyword(alias string, keywordField SearchAttributeFieldKeyword) SearchAttributeKeyword { + return SearchAttributeKeyword{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: alias, + field: keywordField.field, + valueType: enumspb.INDEXED_VALUE_TYPE_KEYWORD, + }, + } +} + +func newSearchAttributeKeywordByField(field string) SearchAttributeKeyword { + return SearchAttributeKeyword{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: field, + field: field, + valueType: enumspb.INDEXED_VALUE_TYPE_KEYWORD, + }, + } +} + +// Value sets the string value of the search attribute. +func (s SearchAttributeKeyword) Value(value string) SearchAttributeKeyValue { + return SearchAttributeKeyValue{ + Alias: s.alias, + Field: s.field, + Value: VisibilityValueKeyword(value), + } +} + +func (s SearchAttributeKeyword) typeMarker(_ string) {} + +// SearchAttributeKeywordList is a search attribute for a keyword list value. +type SearchAttributeKeywordList struct { + searchAttributeDefinition +} + +// NewSearchAttributeKeywordList creates a new keyword list search attribute given a predefined chasm field +func NewSearchAttributeKeywordList(alias string, keywordListField SearchAttributeFieldKeywordList) SearchAttributeKeywordList { + return SearchAttributeKeywordList{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: alias, + field: keywordListField.field, + valueType: enumspb.INDEXED_VALUE_TYPE_KEYWORD_LIST, + }, + } +} + +func newSearchAttributeKeywordListByField(field string) SearchAttributeKeywordList { + return SearchAttributeKeywordList{ + searchAttributeDefinition: searchAttributeDefinition{ + alias: field, + field: field, + valueType: enumspb.INDEXED_VALUE_TYPE_KEYWORD_LIST, + }, + } +} + +// Value sets the string list value of the search attribute. +func (s SearchAttributeKeywordList) Value(value []string) SearchAttributeKeyValue { + return SearchAttributeKeyValue{ + Alias: s.alias, + Field: s.field, + Value: VisibilityValueStringSlice(value), + } +} + +func (s SearchAttributeKeywordList) typeMarker(_ []string) {} + +// SearchAttributesMap wraps search attribute values with type-safe access. +type SearchAttributesMap struct { + values map[string]VisibilityValue +} + +// NewSearchAttributesMap creates a new SearchAttributeMap from raw values. +func NewSearchAttributesMap(values map[string]VisibilityValue) SearchAttributesMap { + return SearchAttributesMap{values: values} +} + +// newSearchAttributesMapFromProto creates a new SearchAttributesMap from commonpb.SearchAttributes. +func newSearchAttributesMapFromProto( + searchAttributes *commonpb.SearchAttributes, +) (SearchAttributesMap, error) { + if len(searchAttributes.GetIndexedFields()) == 0 { + return SearchAttributesMap{}, nil + } + result := SearchAttributesMap{ + values: make(map[string]VisibilityValue), + } + for saName, saPayload := range searchAttributes.IndexedFields { + value, err := visibilityValueFromPayload(saPayload) + if err != nil { + return SearchAttributesMap{}, nil + } + result.values[saName] = value + } + return result, nil +} + +// SearchAttributeValue returns the value for a given SearchAttribute with compile-time type safety. +// The return type T is inferred from the SearchAttribute's type parameter. +// For example, SearchAttributeBool will return a bool value. +// If the value is not found or the type does not match, the zero value for the type T is returned and the second return value is false. +func SearchAttributeValue[T any](m SearchAttributesMap, sa typedSearchAttribute[T]) (val T, ok bool) { + var zero T + if len(m.values) == 0 { + return zero, false + } + + alias := sa.definition().alias + visibilityValue, exists := m.values[alias] + if !exists { + return zero, false + } + + finalVal, ok := visibilityValue.Value().(T) + if !ok { + return zero, false + } + return finalVal, true +} diff --git a/chasm/search_attribute_test.go b/chasm/search_attribute_test.go new file mode 100644 index 00000000000..88de59df6c4 --- /dev/null +++ b/chasm/search_attribute_test.go @@ -0,0 +1,173 @@ +package chasm + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/common/searchattribute/sadefs" +) + +func TestSearchAttributesMap_Get(t *testing.T) { + // Define test search attributes + boolAttr := NewSearchAttributeBool("completed", SearchAttributeFieldBool01) + intAttr := NewSearchAttributeInt("count", SearchAttributeFieldInt01) + doubleAttr := NewSearchAttributeDouble("score", SearchAttributeFieldDouble01) + keywordAttr := NewSearchAttributeKeyword("status", SearchAttributeFieldKeyword01) + datetimeAttr := NewSearchAttributeDateTime("timestamp", SearchAttributeFieldDateTime01) + keywordListAttr := NewSearchAttributeKeywordList("tags", SearchAttributeFieldKeywordList01) + + now := time.Now() + + // Create map with test values + values := map[string]VisibilityValue{ + "completed": VisibilityValueBool(true), + "count": VisibilityValueInt64(42), + "score": VisibilityValueFloat64(3.14), + "status": VisibilityValueKeyword("active"), + "timestamp": VisibilityValueTime(now), + "tags": VisibilityValueStringSlice([]string{"tag1", "tag2"}), + } + m := NewSearchAttributesMap(values) + + t.Run("GetBool", func(t *testing.T) { + val, ok := SearchAttributeValue(m, boolAttr) + require.True(t, ok) + require.True(t, val) + }) + + t.Run("GetInt64", func(t *testing.T) { + val, ok := SearchAttributeValue(m, intAttr) + require.True(t, ok) + require.Equal(t, int64(42), val) + }) + + t.Run("GetFloat64", func(t *testing.T) { + val, ok := SearchAttributeValue(m, doubleAttr) + require.True(t, ok) + require.InDelta(t, 3.14, val, 0.0001) + }) + + t.Run("GetString", func(t *testing.T) { + val, ok := SearchAttributeValue(m, keywordAttr) + require.True(t, ok) + require.Equal(t, "active", val) + }) + + t.Run("GetTime", func(t *testing.T) { + val, ok := SearchAttributeValue(m, datetimeAttr) + require.True(t, ok) + require.True(t, now.Equal(val)) + }) + + t.Run("GetStringSlice", func(t *testing.T) { + val, ok := SearchAttributeValue(m, keywordListAttr) + require.True(t, ok) + require.Equal(t, []string{"tag1", "tag2"}, val) + }) + + t.Run("NotFound", func(t *testing.T) { + missingAttr := NewSearchAttributeBool("missing", SearchAttributeFieldBool02) + val, ok := SearchAttributeValue(m, missingAttr) + require.False(t, ok) + require.False(t, val) + }) + + t.Run("NilMap", func(t *testing.T) { + emptyMap := NewSearchAttributesMap(nil) + val, ok := SearchAttributeValue(emptyMap, boolAttr) + require.False(t, ok) + require.False(t, val) + }) +} + +func TestNewSearchAttributesMapFromProto(t *testing.T) { + t.Run("NilSearchAttributes", func(t *testing.T) { + m, err := newSearchAttributesMapFromProto(nil) + require.NoError(t, err) + require.Empty(t, m.values) + }) + + t.Run("EmptyIndexedFields", func(t *testing.T) { + m, err := newSearchAttributesMapFromProto(&commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{}, + }) + require.NoError(t, err) + require.Empty(t, m.values) + }) + + t.Run("SingleBoolValue", func(t *testing.T) { + sa := &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + "completed": sadefs.MustEncodeValue(true, enumspb.INDEXED_VALUE_TYPE_BOOL), + }, + } + m, err := newSearchAttributesMapFromProto(sa) + require.NoError(t, err) + + boolAttr := NewSearchAttributeBool("completed", SearchAttributeFieldBool01) + val, ok := SearchAttributeValue(m, boolAttr) + require.True(t, ok) + require.True(t, val) + }) + + t.Run("MultipleValueTypes", func(t *testing.T) { + now := time.Now().UTC().Truncate(time.Millisecond) + sa := &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + "completed": sadefs.MustEncodeValue(true, enumspb.INDEXED_VALUE_TYPE_BOOL), + "count": sadefs.MustEncodeValue(int64(42), enumspb.INDEXED_VALUE_TYPE_INT), + "score": sadefs.MustEncodeValue(3.14, enumspb.INDEXED_VALUE_TYPE_DOUBLE), + "status": sadefs.MustEncodeValue("active", enumspb.INDEXED_VALUE_TYPE_KEYWORD), + "timestamp": sadefs.MustEncodeValue(now, enumspb.INDEXED_VALUE_TYPE_DATETIME), + "tags": sadefs.MustEncodeValue([]string{"tag1", "tag2"}, enumspb.INDEXED_VALUE_TYPE_KEYWORD_LIST), + }, + } + m, err := newSearchAttributesMapFromProto(sa) + require.NoError(t, err) + + boolAttr := NewSearchAttributeBool("completed", SearchAttributeFieldBool01) + boolVal, ok := SearchAttributeValue(m, boolAttr) + require.True(t, ok) + require.True(t, boolVal) + + intAttr := NewSearchAttributeInt("count", SearchAttributeFieldInt01) + intVal, ok := SearchAttributeValue(m, intAttr) + require.True(t, ok) + require.Equal(t, int64(42), intVal) + + doubleAttr := NewSearchAttributeDouble("score", SearchAttributeFieldDouble01) + doubleVal, ok := SearchAttributeValue(m, doubleAttr) + require.True(t, ok) + require.InDelta(t, 3.14, doubleVal, 0.0001) + + keywordAttr := NewSearchAttributeKeyword("status", SearchAttributeFieldKeyword01) + keywordVal, ok := SearchAttributeValue(m, keywordAttr) + require.True(t, ok) + require.Equal(t, "active", keywordVal) + + datetimeAttr := NewSearchAttributeDateTime("timestamp", SearchAttributeFieldDateTime01) + timeVal, ok := SearchAttributeValue(m, datetimeAttr) + require.True(t, ok) + require.True(t, now.Equal(timeVal)) + + keywordListAttr := NewSearchAttributeKeywordList("tags", SearchAttributeFieldKeywordList01) + listVal, ok := SearchAttributeValue(m, keywordListAttr) + require.True(t, ok) + require.Equal(t, []string{"tag1", "tag2"}, listVal) + }) + + t.Run("InvalidPayload", func(t *testing.T) { + sa := &commonpb.SearchAttributes{ + IndexedFields: map[string]*commonpb.Payload{ + "bad": {Data: []byte("not valid")}, + }, + } + m, err := newSearchAttributesMapFromProto(sa) + // Current implementation returns nil error on decode failure + require.NoError(t, err) + require.Empty(t, m.values) + }) +} diff --git a/chasm/statemachine.go b/chasm/statemachine.go new file mode 100644 index 00000000000..86e559c80a1 --- /dev/null +++ b/chasm/statemachine.go @@ -0,0 +1,60 @@ +package chasm + +import ( + "fmt" + "slices" + + "go.temporal.io/api/serviceerror" +) + +// ErrInvalidTransition is returned from [Transition.Apply] on an invalid state transition. +var ErrInvalidTransition = serviceerror.NewFailedPrecondition("invalid transition") + +// A StateMachine is anything that can get and set a comparable state S and re-generate tasks based on current state. +// It is meant to be used with [Transition] objects to safely transition their state on a given event. +type StateMachine[S comparable] interface { + StateMachineState() S + SetStateMachineState(S) +} + +// Transition represents a state machine transition for a machine of type SM with state S and event E. +type Transition[S comparable, SM StateMachine[S], E any] struct { + // Source states that are valid for this transition. + Sources []S + // Destination state to transition to. + Destination S + // Function to apply the transition. Mutate the state machine object here and schedule tasks. + apply func(SM, MutableContext, E) error +} + +// NewTransition creates a new [Transition] from the given source states to a destination state for a given event. +// The apply function is called after verifying the transition is possible but before setting the destination state, +// so it can inspect the current (source) state. +func NewTransition[S comparable, SM StateMachine[S], E any](src []S, dst S, apply func(SM, MutableContext, E) error) Transition[S, SM, E] { + return Transition[S, SM, E]{ + Sources: src, + Destination: dst, + apply: apply, + } +} + +// Possible returns a boolean indicating whether the transition is possible for the current state. +func (t Transition[S, SM, E]) Possible(sm SM) bool { + return slices.Contains(t.Sources, sm.StateMachineState()) +} + +// Apply applies a transition event to the given state machine changing the state machine's state to the transition's +// Destination on success. The apply function is called before the state is changed, so it can inspect the current +// (source) state. +func (t Transition[S, SM, E]) Apply(sm SM, ctx MutableContext, event E) error { + prevState := sm.StateMachineState() + if !t.Possible(sm) { + return fmt.Errorf("%w from %v", ErrInvalidTransition, prevState) + } + + if err := t.apply(sm, ctx, event); err != nil { + return err + } + sm.SetStateMachineState(t.Destination) + return nil +} diff --git a/chasm/task.go b/chasm/task.go new file mode 100644 index 00000000000..43698837b3f --- /dev/null +++ b/chasm/task.go @@ -0,0 +1,89 @@ +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination task_mock.go + +package chasm + +import ( + "context" + "errors" + "time" +) + +// ErrTaskDiscarded is the error returned by the default [SideEffectTaskHandlerBase] Discard implementation, +// indicating that a side-effect task on a standby cluster has been pending past the discard delay. +var ErrTaskDiscarded = errors.New("standby task pending for too long") + +type ( + // TaskAttributes specifies scheduling metadata for a task. + TaskAttributes struct { + // ScheduledTime is when the task should fire. Use [TaskScheduledTimeImmediate] (zero value) + // for tasks that should execute as soon as possible. + ScheduledTime time.Time + // Destination is an optional routing key for outbound tasks (e.g., a URL host for HTTP + // callbacks). When non-empty, the task is categorized as outbound; when empty, it is + // categorized as a transfer task. Destination must only be set on immediate tasks. + Destination string + } + + // SideEffectTaskHandler handles side effect tasks that run outside of the state lock and have access to a Go + // context to perform I/O and access chasm engine methods such as [UpdateComponent]. Implementations must embed + // [SideEffectTaskHandlerBase]. + SideEffectTaskHandler[C any, T any] interface { + TaskValidator[C, T] + Execute(context.Context, ComponentRef, TaskAttributes, T) error + // Discard implements custom discard behavior on standby clusters. When a side-effect task has been + // pending on standby past the discard delay, the framework calls Discard instead of silently dropping + // the task. For example, the activity dispatch handler implements this to spill tasks to matching. + // The ctx carries engine access, but implementations must avoid mutating component state on standby + // clusters. + Discard(context.Context, ComponentRef, TaskAttributes, T) error + sideEffectTaskHandler() + } + + // PureTaskHandler handles pure tasks that run while holding execution state write lock and should not do I/O. + // Implementations must embed [PureTaskHandlerBase]. + PureTaskHandler[C any, T any] interface { + TaskValidator[C, T] + Execute(MutableContext, C, TaskAttributes, T) error + pureTaskHandler() + } + + // TaskValidator is implemented by both [SideEffectTaskHandler] and [PureTaskHandler] to gate + // whether a task should proceed with execution. + TaskValidator[C any, T any] interface { + // Validate determines whether a task should proceed with execution based on the current context, component + // state, task attributes, and task data. + // + // This function serves as a gate to prevent unnecessary task execution in several scenarios: + // 1. Standby cluster deduplication: When state is replicated to standby clusters, tasks are also replicated. + // Validate allows standby clusters to check if a task was already completed on the active cluster and + // skip execution if so (e.g., checking if an activity already transitioned from scheduled to started state). + // 2. Task obsolescence: Tasks can become irrelevant when state changes invalidate them (e.g., when a scheduler + // is updated to run at a different time, making the previously scheduled task invalid for the new state). + // For pure tasks that can run in a single transaction, Validate is called before execution to avoid + // unnecessary work. + // + // The framework automatically calls Validate at key points, such as after closing transactions, to check all + // generated tasks before they execute. + // + // Returns: + // - (true, nil) if the task is valid and should be executed + // - (false, nil) if the task should be silently dropped (it's no longer relevant) + // - (anything, error) if validation fails with an error + Validate(Context, C, TaskAttributes, T) (bool, error) + } +) + +// TaskScheduledTimeImmediate is the zero time value used to indicate that a task should execute immediately. +var TaskScheduledTimeImmediate = time.Time{} + +// IsImmediate reports whether the task is scheduled for immediate execution (zero or unset scheduled time). +func (a *TaskAttributes) IsImmediate() bool { + return a.ScheduledTime.IsZero() || + a.ScheduledTime.Equal(TaskScheduledTimeImmediate) +} + +// IsValid reports whether the task attributes are well-formed. A Destination may only be set on +// immediate tasks; deferred tasks with a Destination are invalid. +func (a *TaskAttributes) IsValid() bool { + return a.Destination == "" || a.IsImmediate() +} diff --git a/chasm/task_handler_base.go b/chasm/task_handler_base.go new file mode 100644 index 00000000000..1c7312468d2 --- /dev/null +++ b/chasm/task_handler_base.go @@ -0,0 +1,18 @@ +package chasm + +import "context" + +// SideEffectTaskHandlerBase provides a default Discard implementation that returns ErrTaskDiscarded. +// Embed this in side-effect task handler structs to satisfy the SideEffectTaskHandler interface. +type SideEffectTaskHandlerBase[T any] struct{} + +func (SideEffectTaskHandlerBase[T]) Discard(_ context.Context, _ ComponentRef, _ TaskAttributes, _ T) error { + return ErrTaskDiscarded +} + +func (SideEffectTaskHandlerBase[T]) sideEffectTaskHandler() {} + +// PureTaskHandlerBase must be embedded in all pure task handler implementations. +type PureTaskHandlerBase struct{} + +func (PureTaskHandlerBase) pureTaskHandler() {} diff --git a/chasm/task_mock.go b/chasm/task_mock.go new file mode 100644 index 00000000000..4b41f5f50aa --- /dev/null +++ b/chasm/task_mock.go @@ -0,0 +1,200 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: task.go +// +// Generated by this command: +// +// mockgen -package chasm -source task.go -destination task_mock.go +// + +// Package chasm is a generated GoMock package. +package chasm + +import ( + context "context" + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockSideEffectTaskHandler is a mock of SideEffectTaskHandler interface. +type MockSideEffectTaskHandler[C any, T any] struct { + ctrl *gomock.Controller + recorder *MockSideEffectTaskHandlerMockRecorder[C, T] + isgomock struct{} +} + +// MockSideEffectTaskHandlerMockRecorder is the mock recorder for MockSideEffectTaskHandler. +type MockSideEffectTaskHandlerMockRecorder[C any, T any] struct { + mock *MockSideEffectTaskHandler[C, T] +} + +// NewMockSideEffectTaskHandler creates a new mock instance. +func NewMockSideEffectTaskHandler[C any, T any](ctrl *gomock.Controller) *MockSideEffectTaskHandler[C, T] { + mock := &MockSideEffectTaskHandler[C, T]{ctrl: ctrl} + mock.recorder = &MockSideEffectTaskHandlerMockRecorder[C, T]{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSideEffectTaskHandler[C, T]) EXPECT() *MockSideEffectTaskHandlerMockRecorder[C, T] { + return m.recorder +} + +// Discard mocks base method. +func (m *MockSideEffectTaskHandler[C, T]) Discard(arg0 context.Context, arg1 ComponentRef, arg2 TaskAttributes, arg3 T) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Discard", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Discard indicates an expected call of Discard. +func (mr *MockSideEffectTaskHandlerMockRecorder[C, T]) Discard(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discard", reflect.TypeOf((*MockSideEffectTaskHandler[C, T])(nil).Discard), arg0, arg1, arg2, arg3) +} + +// Execute mocks base method. +func (m *MockSideEffectTaskHandler[C, T]) Execute(arg0 context.Context, arg1 ComponentRef, arg2 TaskAttributes, arg3 T) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Execute", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Execute indicates an expected call of Execute. +func (mr *MockSideEffectTaskHandlerMockRecorder[C, T]) Execute(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockSideEffectTaskHandler[C, T])(nil).Execute), arg0, arg1, arg2, arg3) +} + +// Validate mocks base method. +func (m *MockSideEffectTaskHandler[C, T]) Validate(arg0 Context, arg1 C, arg2 TaskAttributes, arg3 T) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Validate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Validate indicates an expected call of Validate. +func (mr *MockSideEffectTaskHandlerMockRecorder[C, T]) Validate(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockSideEffectTaskHandler[C, T])(nil).Validate), arg0, arg1, arg2, arg3) +} + +// sideEffectTaskHandler mocks base method. +func (m *MockSideEffectTaskHandler[C, T]) sideEffectTaskHandler() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "sideEffectTaskHandler") +} + +// sideEffectTaskHandler indicates an expected call of sideEffectTaskHandler. +func (mr *MockSideEffectTaskHandlerMockRecorder[C, T]) sideEffectTaskHandler() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "sideEffectTaskHandler", reflect.TypeOf((*MockSideEffectTaskHandler[C, T])(nil).sideEffectTaskHandler)) +} + +// MockPureTaskHandler is a mock of PureTaskHandler interface. +type MockPureTaskHandler[C any, T any] struct { + ctrl *gomock.Controller + recorder *MockPureTaskHandlerMockRecorder[C, T] + isgomock struct{} +} + +// MockPureTaskHandlerMockRecorder is the mock recorder for MockPureTaskHandler. +type MockPureTaskHandlerMockRecorder[C any, T any] struct { + mock *MockPureTaskHandler[C, T] +} + +// NewMockPureTaskHandler creates a new mock instance. +func NewMockPureTaskHandler[C any, T any](ctrl *gomock.Controller) *MockPureTaskHandler[C, T] { + mock := &MockPureTaskHandler[C, T]{ctrl: ctrl} + mock.recorder = &MockPureTaskHandlerMockRecorder[C, T]{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPureTaskHandler[C, T]) EXPECT() *MockPureTaskHandlerMockRecorder[C, T] { + return m.recorder +} + +// Execute mocks base method. +func (m *MockPureTaskHandler[C, T]) Execute(arg0 MutableContext, arg1 C, arg2 TaskAttributes, arg3 T) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Execute", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Execute indicates an expected call of Execute. +func (mr *MockPureTaskHandlerMockRecorder[C, T]) Execute(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Execute", reflect.TypeOf((*MockPureTaskHandler[C, T])(nil).Execute), arg0, arg1, arg2, arg3) +} + +// Validate mocks base method. +func (m *MockPureTaskHandler[C, T]) Validate(arg0 Context, arg1 C, arg2 TaskAttributes, arg3 T) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Validate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Validate indicates an expected call of Validate. +func (mr *MockPureTaskHandlerMockRecorder[C, T]) Validate(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockPureTaskHandler[C, T])(nil).Validate), arg0, arg1, arg2, arg3) +} + +// pureTaskHandler mocks base method. +func (m *MockPureTaskHandler[C, T]) pureTaskHandler() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "pureTaskHandler") +} + +// pureTaskHandler indicates an expected call of pureTaskHandler. +func (mr *MockPureTaskHandlerMockRecorder[C, T]) pureTaskHandler() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "pureTaskHandler", reflect.TypeOf((*MockPureTaskHandler[C, T])(nil).pureTaskHandler)) +} + +// MockTaskValidator is a mock of TaskValidator interface. +type MockTaskValidator[C any, T any] struct { + ctrl *gomock.Controller + recorder *MockTaskValidatorMockRecorder[C, T] + isgomock struct{} +} + +// MockTaskValidatorMockRecorder is the mock recorder for MockTaskValidator. +type MockTaskValidatorMockRecorder[C any, T any] struct { + mock *MockTaskValidator[C, T] +} + +// NewMockTaskValidator creates a new mock instance. +func NewMockTaskValidator[C any, T any](ctrl *gomock.Controller) *MockTaskValidator[C, T] { + mock := &MockTaskValidator[C, T]{ctrl: ctrl} + mock.recorder = &MockTaskValidatorMockRecorder[C, T]{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTaskValidator[C, T]) EXPECT() *MockTaskValidatorMockRecorder[C, T] { + return m.recorder +} + +// Validate mocks base method. +func (m *MockTaskValidator[C, T]) Validate(arg0 Context, arg1 C, arg2 TaskAttributes, arg3 T) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Validate", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Validate indicates an expected call of Validate. +func (mr *MockTaskValidatorMockRecorder[C, T]) Validate(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Validate", reflect.TypeOf((*MockTaskValidator[C, T])(nil).Validate), arg0, arg1, arg2, arg3) +} diff --git a/chasm/test_component_test.go b/chasm/test_component_test.go new file mode 100644 index 00000000000..b675acff150 --- /dev/null +++ b/chasm/test_component_test.go @@ -0,0 +1,423 @@ +// TODO: move this to chasm_test package +package chasm + +import ( + "fmt" + "reflect" + "sort" + "strings" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "google.golang.org/protobuf/proto" +) + +type ( + // TestComponent is a sample CHASM component used in tests. + // It would be nice to move it another package, but this creates a circular dependency. + + protoMessageType = persistencespb.WorkflowExecutionState // Random proto message. + TestComponent struct { + UnimplementedComponent + + ComponentData *protoMessageType + SubComponent1 Field[*TestSubComponent1] + SubComponent2 Field[*TestSubComponent2] + SubData1 Field[*protoMessageType] + SubComponents Map[string, *TestSubComponent1] + PendingActivities Map[int, *TestSubComponent1] + SubComponent11Pointer Field[*TestSubComponent11] + SubComponent11Pointer2 Field[*TestSubComponent11] + SubComponentInterfacePointer Field[Component] + + MSPointer MSPointer + ParentPtr ParentPtr[*TestComponent] + + Visibility Field[*Visibility] + } + + TestSubComponent1 struct { + UnimplementedComponent + + SubComponent1Data *protoMessageType + SubComponent11 Field[*TestSubComponent11] + SubComponent11_2 Field[*TestSubComponent11] + SubData11 Field[*protoMessageType] // Random proto message. + RootPointer Field[*TestComponent] + + ParentPtr ParentPtr[*TestComponent] + } + + TestSubComponent11 struct { + UnimplementedComponent + + SubComponent11Data *protoMessageType + GrandparentPointer Field[*TestComponent] + ParentComponentPtr Field[*TestSubComponent1] + + ParentPtr ParentPtr[*TestSubComponent1] + } + + TestSubComponent2 struct { + UnimplementedComponent + SubComponent2Data *protoMessageType + } + + TestSubComponent interface { + GetData() string + } +) + +const ( + TestComponentStartTimeSAKey = "StartTimeSAKey" + TestComponentRunIDSAKey = "RunIdSAKey" + TestComponentStartTimeMemoKey = "StartTimeMemoKey" +) + +var ( + TestComponentStartTimeSearchAttribute = NewSearchAttributeDateTime(TestComponentStartTimeSAKey, SearchAttributeFieldDateTime01) + TestComponentRunIDPredefinedSA = newSearchAttributeKeywordByField(TestComponentRunIDSAKey) + + _ VisibilitySearchAttributesProvider = (*TestComponent)(nil) + _ VisibilityMemoProvider = (*TestComponent)(nil) + _ RootComponent = (*TestComponent)(nil) +) + +func (tc *TestComponent) LifecycleState(_ Context) LifecycleState { + switch tc.ComponentData.GetStatus() { + case enumspb.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING: + return LifecycleStateRunning + case enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT: + return LifecycleStatePaused + case enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW: + return LifecycleStateCompleted + default: + return LifecycleStateFailed + } +} + +func (tc *TestComponent) Pause(_ MutableContext) { + tc.ComponentData.Status = enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT +} + +func (tc *TestComponent) Unpause(_ MutableContext) { + tc.ComponentData.Status = enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING +} + +func (tc *TestComponent) Terminate( + mutableContext MutableContext, + _ TerminateComponentRequest, +) (TerminateComponentResponse, error) { + tc.Fail(mutableContext) + return TerminateComponentResponse{}, nil +} + +func (tc *TestComponent) Complete(_ MutableContext) { + tc.ComponentData.Status = enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED +} + +func (tc *TestComponent) Fail(_ MutableContext) { + tc.ComponentData.Status = enumspb.WORKFLOW_EXECUTION_STATUS_FAILED +} + +func (tc *TestComponent) ContextMetadata(_ Context) map[string]string { + // TODO: Export context metadata from this test root. + return nil +} + +// SearchAttributes implements VisibilitySearchAttributesProvider interface. +func (tc *TestComponent) SearchAttributes(_ Context) []SearchAttributeKeyValue { + return []SearchAttributeKeyValue{ + TestComponentStartTimeSearchAttribute.Value(tc.ComponentData.GetStartTime().AsTime()), + TestComponentRunIDPredefinedSA.Value(tc.ComponentData.GetRunId()), + SearchAttributeTemporalScheduledByID.Value(tc.ComponentData.GetRunId()), + } +} + +// Memo implements VisibilityMemoProvider interface. +func (tc *TestComponent) Memo(_ Context) proto.Message { + return tc.ComponentData +} + +func (tsc1 *TestSubComponent1) LifecycleState(_ Context) LifecycleState { + switch tsc1.SubComponent1Data.GetStatus() { + case enumspb.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING: + return LifecycleStateRunning + case enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT: + return LifecycleStatePaused + case enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW: + return LifecycleStateCompleted + default: + return LifecycleStateFailed + } +} + +func (tsc1 *TestSubComponent1) Pause(_ MutableContext) { + tsc1.SubComponent1Data.Status = enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT +} + +func (tsc1 *TestSubComponent1) Unpause(_ MutableContext) { + tsc1.SubComponent1Data.Status = enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING +} + +func (tsc1 *TestSubComponent1) GetData() string { + return tsc1.SubComponent1Data.GetCreateRequestId() +} + +func (tsc11 *TestSubComponent11) LifecycleState(_ Context) LifecycleState { + switch tsc11.SubComponent11Data.GetStatus() { + case enumspb.WORKFLOW_EXECUTION_STATUS_UNSPECIFIED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING: + return LifecycleStateRunning + case enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT: + return LifecycleStatePaused + case enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW: + return LifecycleStateCompleted + default: + return LifecycleStateFailed + } +} + +func (tsc2 *TestSubComponent2) LifecycleState(_ Context) LifecycleState { + return LifecycleStateRunning +} + +func setTestComponentFields(c *TestComponent, backend *MockNodeBackend) { + c.ComponentData = &protoMessageType{ + CreateRequestId: "component-data", + } + c.SubComponent1 = NewComponentField(nil, &TestSubComponent1{ + SubComponent1Data: &protoMessageType{ + CreateRequestId: "sub-component1-data", + }, + SubComponent11: NewComponentField(nil, &TestSubComponent11{ + SubComponent11Data: &protoMessageType{ + CreateRequestId: "sub-component1-sub-component11-data", + }, + }), + SubData11: NewDataField(nil, &protoMessageType{ + CreateRequestId: "sub-component1-sub-data11", + }), + }) + c.SubComponent2 = NewEmptyField[*TestSubComponent2]() + c.SubData1 = NewDataField(nil, &protoMessageType{ + CreateRequestId: "sub-data1", + }) + c.MSPointer = NewMSPointer(backend) +} + +// returns serialized version of TestComponent from above. +// Generated by generateMapInit function below. +func testComponentSerializedNodes() map[string]*persistencespb.ChasmNode { + serializedNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 2, + TransitionCount: 2, + }, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task(nil), + PureTasks: []*persistencespb.ChasmComponentAttributes_Task(nil), + }, + }, + }, + Data: &commonpb.DataBlob{ + EncodingType: 1, + Data: []byte{0xa, 0xe, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x2d, 0x64, 0x61, 0x74, 0x61}, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 2, + TransitionCount: 2, + }, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task(nil), + PureTasks: []*persistencespb.ChasmComponentAttributes_Task(nil), + }, + }, + }, + Data: &commonpb.DataBlob{ + EncodingType: 1, + Data: []byte{0xa, 0x13, 0x73, 0x75, 0x62, 0x2d, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x31, 0x2d, 0x64, 0x61, 0x74, 0x61}, + }, + }, + "SubComponent1/SubComponent11": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 2, + TransitionCount: 2, + }, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent11TypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task(nil), + PureTasks: []*persistencespb.ChasmComponentAttributes_Task(nil), + }, + }, + }, + Data: &commonpb.DataBlob{ + EncodingType: 1, + Data: []byte{0xa, 0x23, 0x73, 0x75, 0x62, 0x2d, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x31, 0x2d, 0x73, 0x75, 0x62, 0x2d, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x31, 0x31, 0x2d, 0x64, 0x61, 0x74, 0x61}, + }, + }, + "SubComponent1/SubData11": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 2, + TransitionCount: 2, + }, + Attributes: &persistencespb.ChasmNodeMetadata_DataAttributes{ + DataAttributes: &persistencespb.ChasmDataAttributes{}, + }, + }, + Data: &commonpb.DataBlob{ + EncodingType: 1, + Data: []byte{0xa, 0x19, 0x73, 0x75, 0x62, 0x2d, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x31, 0x2d, 0x73, 0x75, 0x62, 0x2d, 0x64, 0x61, 0x74, 0x61, 0x31, 0x31}, + }, + }, + "SubData1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 2, + TransitionCount: 2, + }, + Attributes: &persistencespb.ChasmNodeMetadata_DataAttributes{ + DataAttributes: &persistencespb.ChasmDataAttributes{}, + }, + }, + Data: &commonpb.DataBlob{ + EncodingType: 1, + Data: []byte{0xa, 0x9, 0x73, 0x75, 0x62, 0x2d, 0x64, 0x61, 0x74, 0x61, 0x31}, + }, + }, + } + + return serializedNodes +} + +// Helper functions to regenerate testComponentSerializedNodes() function body. +// Use: generateMapInit(serializedNodes, "serializedNodes") +func generateMapInit(m any, mapName string) { + val := reflect.ValueOf(m) + if val.Kind() != reflect.Map { + fmt.Println("Provided value is not a map") + return + } + + keyType := val.Type().Key() + elemType := val.Type().Elem() + + fmt.Printf("%s := map[%s]%s{\n", mapName, keyType, elemType) + + // Sort string keys for deterministic output + var keys []reflect.Value + keys = append(keys, val.MapKeys()...) + if keyType.Kind() == reflect.String { + sort.Slice(keys, func(i, j int) bool { + return keys[i].String() < keys[j].String() + }) + } + + for _, key := range keys { + value := val.MapIndex(key) + fmt.Printf("\t%#v: %s,\n", key.Interface(), renderProtoPointer(value)) + } + fmt.Println("}") +} + +func renderProtoPointer(v reflect.Value) string { + if v.IsNil() { + return "nil" + } + + elem := v.Elem() // Dereference the pointer to struct + t := elem.Type() + result := fmt.Sprintf("&%s{\n", t.String()) + + for i := 0; i < elem.NumField(); i++ { + field := t.Field(i) + + // Skip unexported and known proto internal fields + if field.PkgPath != "" || isProtoInternalField(field.Name) { + continue + } + + fieldValue := elem.Field(i) + + // Handle oneof-style interface fields + if field.Type.Kind() == reflect.Interface && !fieldValue.IsNil() { + oneofVal := fieldValue.Elem() + if oneofVal.Kind() == reflect.Ptr { + oneofVal = oneofVal.Elem() + } + + result += fmt.Sprintf("\t\t%s: &%s{\n", field.Name, oneofVal.Type().String()) + for j := 0; j < oneofVal.NumField(); j++ { + oneofField := oneofVal.Type().Field(j) + if oneofField.PkgPath != "" || isProtoInternalField(oneofField.Name) { + continue + } + oneofFieldValue := oneofVal.Field(j) + + // Handle nested proto inside oneof + if oneofFieldValue.Kind() == reflect.Ptr && oneofFieldValue.Elem().Kind() == reflect.Struct { + result += fmt.Sprintf("\t\t\t%s: %s,\n", oneofField.Name, renderProtoPointer(oneofFieldValue)) + } else { + result += fmt.Sprintf("\t\t\t%s: %#v,\n", oneofField.Name, oneofFieldValue.Interface()) + } + } + result += "\t\t},\n" + continue + } + + // Recursively handle nested proto messages (pointer to struct) + if field.Type.Kind() == reflect.Ptr && fieldValue.Kind() == reflect.Ptr && fieldValue.Elem().Kind() == reflect.Struct { + result += fmt.Sprintf("\t\t%s: %s,\n", field.Name, renderProtoPointer(fieldValue)) + continue + } + + // Print normal fields + result += fmt.Sprintf("\t\t%s: %#v,\n", field.Name, fieldValue.Interface()) + } + result += "\t}" + + result = strings.ReplaceAll(result, "persistence.", "persistencespb.") + + return result +} + +func isProtoInternalField(fieldName string) bool { + switch fieldName { + case "state", "sizeCache", "unknownFields": + return true + default: + return false + } +} diff --git a/chasm/test_library_test.go b/chasm/test_library_test.go new file mode 100644 index 00000000000..29a3d06234c --- /dev/null +++ b/chasm/test_library_test.go @@ -0,0 +1,69 @@ +// TODO: move this to chasm_test package +package chasm + +import ( + "go.uber.org/mock/gomock" +) + +type TestLibrary struct { + UnimplementedLibrary + + controller *gomock.Controller + + mockSideEffectTaskHandler *MockSideEffectTaskHandler[any, *TestSideEffectTask] + mockDiscardableSideEffectHandler *MockSideEffectTaskHandler[any, *TestDiscardableSideEffectTask] + mockOutboundSideEffectTaskHandler *MockSideEffectTaskHandler[any, TestOutboundSideEffectTask] + mockPureTaskHandler *MockPureTaskHandler[any, *TestPureTask] +} + +func newTestLibrary( + controller *gomock.Controller, +) *TestLibrary { + return &TestLibrary{ + controller: controller, + + mockSideEffectTaskHandler: NewMockSideEffectTaskHandler[any, *TestSideEffectTask](controller), + mockDiscardableSideEffectHandler: NewMockSideEffectTaskHandler[any, *TestDiscardableSideEffectTask](controller), + mockOutboundSideEffectTaskHandler: NewMockSideEffectTaskHandler[any, TestOutboundSideEffectTask](controller), + mockPureTaskHandler: NewMockPureTaskHandler[any, *TestPureTask](controller), + } +} + +func (l *TestLibrary) Name() string { + return testLibraryName +} + +func (l *TestLibrary) Components() []*RegistrableComponent { + return []*RegistrableComponent{ + NewRegistrableComponent[*TestComponent]( + testComponentName, + WithBusinessIDAlias("TestBusinessId"), + WithSearchAttributes(TestComponentStartTimeSearchAttribute), + ), + NewRegistrableComponent[*TestSubComponent1](testSubComponent1Name), + NewRegistrableComponent[*TestSubComponent11](testSubComponent11Name), + NewRegistrableComponent[*TestSubComponent2](testSubComponent2Name), + } +} + +func (l *TestLibrary) Tasks() []*RegistrableTask { + return []*RegistrableTask{ + NewRegistrableSideEffectTask( + testSideEffectTaskName, + l.mockSideEffectTaskHandler, + ), + NewRegistrableSideEffectTask( + testDiscardableSideEffectTaskName, + l.mockDiscardableSideEffectHandler, + ), + NewRegistrableSideEffectTask( + // NOTE this task is registered as a struct, instead of pointer to struct. + testOutboundSideEffectTaskName, + l.mockOutboundSideEffectTaskHandler, + ), + NewRegistrablePureTask( + testPureTaskName, + l.mockPureTaskHandler, + ), + } +} diff --git a/chasm/test_task_test.go b/chasm/test_task_test.go new file mode 100644 index 00000000000..e048e8d62ee --- /dev/null +++ b/chasm/test_task_test.go @@ -0,0 +1,18 @@ +// TODO: move this to chasm_test package +package chasm + +import ( + commonpb "go.temporal.io/api/common/v1" +) + +type ( + TestSideEffectTask = commonpb.Payload + + TestDiscardableSideEffectTask struct{} + + TestOutboundSideEffectTask struct{} + + TestPureTask struct { + Payload *commonpb.Payload + } +) diff --git a/chasm/test_var_test.go b/chasm/test_var_test.go new file mode 100644 index 00000000000..2cd91679878 --- /dev/null +++ b/chasm/test_var_test.go @@ -0,0 +1,38 @@ +package chasm + +const ( + testLibraryName = "TestLibrary" + testComponentName = "test_component" + testSubComponent1Name = "test_sub_component_1" + testSubComponent11Name = "test_sub_component_11" + testSubComponent2Name = "test_sub_component_2" + + testSideEffectTaskName = "test_side_effect_task" + testDiscardableSideEffectTaskName = "test_discardable_side_effect_task" + testOutboundSideEffectTaskName = "test_outbound_side_effect_task" + testPureTaskName = "test_pure_task" +) + +var ( + testComponentFQN = FullyQualifiedName(testLibraryName, testComponentName) + testSubComponent1FQN = FullyQualifiedName(testLibraryName, testSubComponent1Name) + testSubComponent11FQN = FullyQualifiedName(testLibraryName, testSubComponent11Name) + testSubComponent2FQN = FullyQualifiedName(testLibraryName, testSubComponent2Name) + + testSideEffectTaskFQN = FullyQualifiedName(testLibraryName, testSideEffectTaskName) + testDiscardableSideEffectTaskFQN = FullyQualifiedName(testLibraryName, testDiscardableSideEffectTaskName) + testOutboundSideEffectTaskFQN = FullyQualifiedName(testLibraryName, testOutboundSideEffectTaskName) + testPureTaskFQN = FullyQualifiedName(testLibraryName, testPureTaskName) +) + +var ( + testComponentTypeID = GenerateTypeID(testComponentFQN) + testSubComponent1TypeID = GenerateTypeID(testSubComponent1FQN) + testSubComponent11TypeID = GenerateTypeID(testSubComponent11FQN) + testSubComponent2TypeID = GenerateTypeID(testSubComponent2FQN) + + testSideEffectTaskTypeID = GenerateTypeID(testSideEffectTaskFQN) + testDiscardableSideEffectTaskTypeID = GenerateTypeID(testDiscardableSideEffectTaskFQN) + testOutboundSideEffectTaskTypeID = GenerateTypeID(testOutboundSideEffectTaskFQN) + testPureTaskTypeID = GenerateTypeID(testPureTaskFQN) +) diff --git a/chasm/test_visibility.go b/chasm/test_visibility.go new file mode 100644 index 00000000000..b525e89264f --- /dev/null +++ b/chasm/test_visibility.go @@ -0,0 +1,20 @@ +package chasm + +import enumspb "go.temporal.io/api/enums/v1" + +// NewTestVisibilitySearchAttributesMapper creates a new VisibilitySearchAttributesMapper. +// For testing only. +func NewTestVisibilitySearchAttributesMapper( + fieldToAlias map[string]string, + saTypeMap map[string]enumspb.IndexedValueType, +) *VisibilitySearchAttributesMapper { + aliasToField := make(map[string]string, len(fieldToAlias)) + for field, alias := range fieldToAlias { + aliasToField[alias] = field + } + return &VisibilitySearchAttributesMapper{ + aliasToField: aliasToField, + fieldToAlias: fieldToAlias, + saTypeMap: saTypeMap, + } +} diff --git a/chasm/transition_history.go b/chasm/transition_history.go new file mode 100644 index 00000000000..02b4cde8b3b --- /dev/null +++ b/chasm/transition_history.go @@ -0,0 +1,35 @@ +package chasm + +import ( + "go.temporal.io/server/common/persistence/transitionhistory" + "go.temporal.io/server/service/history/consts" +) + +// ExecutionStateChanged returns true if execution state has advanced beyond the state encoded in +// refBytes. It may return ErrInvalidComponentRef or ErrMalformedComponentRef. Callers should +// consider converting these to serviceerror.NewInvalidArgument. +func ExecutionStateChanged(c Component, ctx Context, refBytes []byte) (bool, error) { + ref, err := DeserializeComponentRef(refBytes) + if err != nil { + return false, ErrMalformedComponentRef + } + currentRef, err := ctx.structuredRef(c) + if err != nil { + return false, err + } + if ref.ExecutionKey != currentRef.ExecutionKey { + return false, ErrInvalidComponentRef + } + switch transitionhistory.Compare(ref.executionLastUpdateVT, currentRef.executionLastUpdateVT) { + case -1: + // Execution state has advanced beyond submitted ref + return true, nil + case 0: + // Execution state has not advanced beyond submitted ref + return false, nil + case 1: + // Execution state is behind submitted ref + return false, consts.ErrStaleState + } + panic("unexpected result from transitionhistory.Compare") //nolint:forbidigo +} diff --git a/chasm/tree.go b/chasm/tree.go new file mode 100644 index 00000000000..e8771576490 --- /dev/null +++ b/chasm/tree.go @@ -0,0 +1,3432 @@ +package chasm + +import ( + "cmp" + "context" + "errors" + "fmt" + "iter" + "reflect" + "slices" + "strconv" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/api/serviceerror" + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/nexus/nexusrpc" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/persistence/transitionhistory" + "go.temporal.io/server/common/softassert" + "go.temporal.io/server/service/history/tasks" + "golang.org/x/exp/maps" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +var ( + protoMessageT = reflect.TypeFor[proto.Message]() +) + +var ( + errAccessCheckFailed = serviceerror.NewNotFound("access check failed, CHASM tree is closed for writes") + errComponentNotFound = serviceerror.NewNotFound("component not found") + errDataNotFound = serviceerror.NewNotFound("data not found") + errTaskNotValid = serviceerror.NewNotFound("task is no longer valid") +) + +// valueState is an in-memory indicator of the dirtiness of a deserialized node value. +// The dirtiness has two parts: +// 1. If the data part of the value is in sync with the serializedNode field. +// 2. For component node, if the structure of the component is in sync with the children field. +// +// The enum value below is defined in increasing order of "dirtiness". +// - NeedDeserialize: Value is not even deserialized yet. +// - Synced: Value is deserialized and in sync with both serializedNode and children. +// - NeedSerialize: Value is deserialized, the child tree structure is synced, but the value is not in sync with serializedNode. +// - NeedSyncStructure: Value is deserialized, neither data nor tree structure is synced. +// +// For simplicity, for a dirty component node, the logic always sync structure (potentially multiple times within a transaction) first, +// and the serialize the data at the very end of a transaction. So there will never base a case where value is synced with seralizedNode, +// but not with children. +// +// To update this field, ALWAYS use setValueState() method. +// +// NOTE: This is a different concept from the IsDirty() method which is needed by MutableState implementation to determine +// if the state in memory matches the state in DB. +type valueState uint8 + +const ( + valueStateUndefined valueState = iota + valueStateNeedDeserialize + valueStateSynced + valueStateNeedSerialize + valueStateNeedSyncStructure +) + +const ( + physicalTaskStatusNone int32 = iota + physicalTaskStatusCreated +) + +type ( + // Node is the in-memory representation of a persisted CHASM node. + // + // Node and all its methods are NOT meant to be used by CHASM component authors. + // They are exported for use by the CHASM engine and underlying MutableState implementation only. + Node struct { + *nodeBase + + parent *Node + children map[string]*Node // child name (path segment) -> child node + nodeName string // key of this node in parent's children map, empty string for root node. + + // Type of attributes controls the type of the node. + serializedNode *persistencespb.ChasmNode // serialized component | data | collection with metadata + // Deserialized component | data | map + // Do NOT set this field directly, use setValue() method instead. + value any + // Do NOT set this field directly, use setValueState() method instead. + valueState valueState + + // Cached encoded path for this node. + // DO NOT read this field directly. Always use getEncodedPath() method to retrieve the encoded path. + // + // Empty string is a valid encoded path (for root node), so using *string here to differentiate. + // + // TODO: Consider using unique package here. + // Encoded path for different runs of the same Component type are the same. + encodedPath *string + + // When terminated is true, regardless of the Lifecycle state of the component, + // the component will be considered as closed. + // + // NOTE: this is an in-memory only field and will be lost upon mutable state reload or replication. + // The purpose of this field is only for the transaction that force terminates the execution to + // update executionState & State in mutable state and generate retention timers, so it only needs to be + // in-memory and on the active side. + // If your logic needs to check if an execution is ever force terminated, check both this field (for the current + // transaction) and also the executionState from backend (for previous transactions). + // + // We can consider extending the force terminate concept to sub-components as well, and make the field durable. + terminated bool + + // deleteAfterClose suppresses the close visibility task when an execution is being + // terminated as part of a delete operation. Like terminated, this is in-memory only + // and only needed for the current transaction. Set via SetDeleteAfterClose. + deleteAfterClose bool + } + + // nodeBase is a set of dependencies and states shared by all nodes in a CHASM tree. + nodeBase struct { + registry *Registry + timeSource clock.TimeSource + backend NodeBackend + pathEncoder NodePathEncoder + logger log.Logger + metricsHandler metrics.Handler + + // Following fields are changes accumulated in this transaction, + // and will get cleaned up after CloseTransaction(). + + // mutation field captures all user state changes (those will be replicated) + mutation NodesMutation + // systemMutation field captures all cell specific system changes (those will NOT be replicated) + systemMutation NodesMutation + + newTasks map[any][]taskWithAttributes // component value -> task & attributes + immediatePureTasks map[any][]taskWithAttributes // similar to newTasks, but will be executed at the end of the transaction + + // Node value -> node + // Only component and data node values are tracked right now + valueToNode map[any]*Node + + taskValueCache map[*commonpb.DataBlob]reflect.Value + + // isActiveStateDirty is true if any user data is mutated. + // NOTE: this only captures active cluster's user data mutation. + // Replication logic (ApplySnapshot/Mutation) will not set this field. + // + // This flag in a CHASM tree level, while valueState is on node level. + // Tracking this flag on tree level avoids traversing the whole tree every time + // we want to know if something is updated. + // + // This flag is equivalent to checking if any node's valueState >= valueStateNeedSerialize + isActiveStateDirty bool + + // Root component's search attributes and memo at the start of a transaction. + // They will be updated upon CloseTransaction() if they are changed. + currentSA map[string]VisibilityValue + currentMemo proto.Message + + needsPointerResolution bool + } + + taskWithAttributes struct { + task any + attributes TaskAttributes + } + + // NodesMutation is a set of mutations for all nodes rooted at a given node n, + // including the node n itself. + NodesMutation struct { + UpdatedNodes map[string]*persistencespb.ChasmNode // encoded node path -> chasm node + DeletedNodes map[string]struct{} + } + + // NodesSnapshot is a snapshot for all nodes rooted at a given node n, + // including the node n itself. + NodesSnapshot struct { + Nodes map[string]*persistencespb.ChasmNode // encoded node path -> chasm node + } + + // NodeBackend is a set of methods needed from MutableState. + // + // This is for breaking cycle dependency between + // this package and service/history/workflow package + // where MutableState is defined. + NodeBackend interface { + // TODO: Add methods needed from MutateState here. + GetExecutionState() *persistencespb.WorkflowExecutionState + GetExecutionInfo() *persistencespb.WorkflowExecutionInfo + GetApproximatePersistedSize() int + GetNamespaceEntry() *namespace.Namespace + GetCurrentVersion() int64 + NextTransitionCount() int64 + CurrentVersionedTransition() *persistencespb.VersionedTransition + GetWorkflowKey() definition.WorkflowKey + AddTasks(...tasks.Task) + AddHistoryEvent(t enumspb.EventType, setAttributes func(*historypb.HistoryEvent)) *historypb.HistoryEvent + GenerateEventLoadToken(event *historypb.HistoryEvent) ([]byte, error) + LoadHistoryEvent(ctx context.Context, token []byte) (*historypb.HistoryEvent, error) + HasAnyBufferedEvent(filter func(*historypb.HistoryEvent) bool) bool + DeleteCHASMPureTasks(maxScheduledTime time.Time) + UpdateWorkflowStateStatus( + state enumsspb.WorkflowExecutionState, + status enumspb.WorkflowExecutionStatus, + ) (bool, error) + IsWorkflow() bool + GetNexusCompletion( + ctx context.Context, + requestID string, + ) (nexusrpc.CompleteOperationOptions, error) + EndpointRegistry() EndpointRegistry + } + + // NodePathEncoder is an interface for encoding and decoding node paths. + // Logic outside the chasm package should only work with encoded paths. + NodePathEncoder interface { + Encode(node *Node, path []string) (string, error) + // TODO: Return a iterator on node name instead of []string, + // so that we can get a node by encoded path without additional + // allocation for the decoded path. + Decode(encodedPath string) ([]string, error) + } + + // NodePureTask is intended to be implemented and used within the CHASM + // framework only. + NodePureTask interface { + ExecutePureTask(baseCtx context.Context, taskAttributes TaskAttributes, taskInstance any) (bool, error) + ValidatePureTask(baseCtx context.Context, taskAttributes TaskAttributes, taskInstance any) (bool, error) + } +) + +// NewTreeFromDB creates a new in-memory CHASM tree from a collection of flattened persistence CHASM nodes. +// This method should only be used when loading an existing CHASM tree from database. +// If serializedNodes is empty, the tree will be considered as a legacy Workflow execution without any CHASM nodes. +func NewTreeFromDB( + serializedNodes map[string]*persistencespb.ChasmNode, // This is coming from MS map[nodePath]ChasmNode. + registry *Registry, + timeSource clock.TimeSource, + backend NodeBackend, + pathEncoder NodePathEncoder, + logger log.Logger, + metricsHandler metrics.Handler, +) (*Node, error) { + if len(serializedNodes) == 0 { + root := NewEmptyTree(registry, timeSource, backend, pathEncoder, logger, metricsHandler) + // NewEmptyTree initializes the serializedNode to an empty component node, + root.serializedNode.Metadata.GetComponentAttributes().TypeId = WorkflowArchetypeID + return root, nil + } + + root := newTreeHelper(registry, timeSource, backend, pathEncoder, logger, metricsHandler) + for encodedPath, serializedNode := range serializedNodes { + nodePath, err := pathEncoder.Decode(encodedPath) + if err != nil { + return nil, err + } + root.setSerializedNode(nodePath, encodedPath, serializedNode) + } + + if err := newTreeInitSearchAttributesAndMemo(root, registry); err != nil { + return nil, err + } + return root, nil +} + +// NewEmptyTree creates a new empty in-memory CHASM tree. +func NewEmptyTree( + registry *Registry, + timeSource clock.TimeSource, + backend NodeBackend, + pathEncoder NodePathEncoder, + logger log.Logger, + metricsHandler metrics.Handler, +) *Node { + root := newTreeHelper(registry, timeSource, backend, pathEncoder, logger, metricsHandler) + + // If serializedNodes is empty, it means that this new tree. + // Initialize empty serializedNode. + root.initSerializedNode(fieldTypeComponent) + // Default to Workflow archetype as empty tree is created for workflow as well. + root.serializedNode.Metadata.GetComponentAttributes().TypeId = WorkflowArchetypeID + // Although both value and serializedNode.Data are nil, they are considered NOT synced + // because value has no type and serializedNode does. + // deserialize method should set value when called. + root.setValueState(valueStateNeedDeserialize) + return root +} + +func newTreeHelper( + registry *Registry, + timeSource clock.TimeSource, + backend NodeBackend, + pathEncoder NodePathEncoder, + logger log.Logger, + metricsHandler metrics.Handler, +) *Node { + base := &nodeBase{ + registry: registry, + timeSource: timeSource, + backend: backend, + pathEncoder: pathEncoder, + logger: logger, + metricsHandler: metricsHandler, + + mutation: NodesMutation{ + UpdatedNodes: make(map[string]*persistencespb.ChasmNode), + DeletedNodes: make(map[string]struct{}), + }, + systemMutation: NodesMutation{ + UpdatedNodes: make(map[string]*persistencespb.ChasmNode), + DeletedNodes: make(map[string]struct{}), + }, + newTasks: make(map[any][]taskWithAttributes), + immediatePureTasks: make(map[any][]taskWithAttributes), + valueToNode: make(map[any]*Node), + taskValueCache: make(map[*commonpb.DataBlob]reflect.Value), + needsPointerResolution: false, + } + + return newNode(base, nil, "") +} + +func newTreeInitSearchAttributesAndMemo( + root *Node, + registry *Registry, +) error { + immutableContext := NewContext(context.Background(), root) + rootComponent, err := root.Component(immutableContext, ComponentRef{}) + if err != nil { + return err + } + + // Theoritically we should check if the root node has a Visibility component or not. + // But that doesn't really matter. Even if it doesn't have one, currentSearchAttributes + // and currentMemo will just never be used. + + if saProvider, ok := rootComponent.(VisibilitySearchAttributesProvider); ok { + saSlice := saProvider.SearchAttributes(immutableContext) + root.currentSA = searchAttributeKeyValuesToMap(saSlice) + } + if memoProvider, ok := rootComponent.(VisibilityMemoProvider); ok { + root.currentMemo = proto.Clone(memoProvider.Memo(immutableContext)) + } + + return nil +} + +func searchAttributeKeyValuesToMap(saSlice []SearchAttributeKeyValue) map[string]VisibilityValue { + result := make(map[string]VisibilityValue, len(saSlice)) + for _, sa := range saSlice { + result[sa.Field] = sa.Value + } + return result +} + +func (n *Node) SetRootComponent( + rootComponent RootComponent, +) error { + root := n.root() + root.setValue(rootComponent) + root.setValueState(valueStateNeedSyncStructure) + if componentID, ok := n.registry.ComponentIDFor(rootComponent); ok { + root.serializedNode.GetMetadata().GetComponentAttributes().TypeId = componentID + } + return root.syncSubComponents() +} + +// setValue sets the value field of the node. +// If the node is a component or data node, the index from node value to node (valueToNode) +// is also updated. +func (n *Node) setValue(value any) { + if !n.isComponent() && !n.isData() { + n.value = value + return + } + + if n.value != nil { + delete(n.valueToNode, n.value) + } + + n.value = value + + if value != nil { + n.valueToNode[value] = n + } +} + +func (n *Node) setValueState(state valueState) { + n.valueState = state + if state >= valueStateNeedSerialize { + n.isActiveStateDirty = true + } +} + +// Component retrieves a component from the tree rooted at node n +// using the provided component reference +// It also performs access rule, and task validation checks +// (for task processing requests) before returning the component. +func (n *Node) Component( + chasmContext Context, + ref ComponentRef, +) (Component, error) { + // Archetype is already validated before this method is called. + // (when the mutable state is loaded, in chasm engine implementation) + + node, ok := n.findNode(ref.componentPath) + if !ok { + return nil, errComponentNotFound + } + + if ref.componentInitialVT != nil && transitionhistory.Compare( + ref.componentInitialVT, + node.serializedNode.Metadata.InitialVersionedTransition, + ) != 0 { + return nil, errComponentNotFound + } + + validationContext := NewContext(chasmContext.goContext(), node) + if err := node.prepareComponentValue(validationContext); err != nil { + return nil, err + } + + componentValue, ok := node.value.(Component) + if !ok { + return nil, softassert.UnexpectedInternalErr( + n.logger, + "component value is not of type Component", + fmt.Errorf("%s", reflect.TypeOf(node.value).String())) + } + + if err := node.validateAccess(validationContext, false); err != nil { + return nil, err + } + + if ref.validationFn != nil { + if err := ref.validationFn(node.root().backend, validationContext, componentValue, node.registry); err != nil { + return nil, err + } + } + + // prepare component value again using incoming context to mark node as dirty if needed. + if err := node.prepareComponentValue(chasmContext); err != nil { + return nil, err + } + return componentValue, nil +} + +// validateAccess performs the access rule check on a node. +// +// When the context's intent is OperationIntentProgress, This check validates that +// all of a node's ancestors are still in a running state, and can accept writes. In +// the case of a newly created node, a detached node, or an OperationIntentObserve +// intent, the check is skipped. +// +// When checkPaused is true (used during task validation), the check is extended to +// also treat a paused lifecycle state as a blocking condition - for both ancestors +// and the node itself. This collapses the paused-subtree traversal into the same +// single pass, avoiding a second tree walk. +// Note: engine mutations on paused components are still accepted (checkPaused=false), +// per the current requirement. +func (n *Node) validateAccess(ctx Context, checkPaused bool) error { + intent := operationIntentFromContext(ctx.goContext()) + if intent != OperationIntentProgress { + // Read-only operations are always allowed. + return nil + } + + // Detached nodes skip ancestor validation entirely. + if n.isDetached() { + return nil + } + + if n.parent != nil { + if err := n.parent.validateAccessHelper(ctx, checkPaused); err != nil { + return err + } + } + + // validateAccessHelper traverses ancestors but never checks n itself. + // For task validation we must also check whether n is paused. + if checkPaused && n.isComponent() { + if err := n.prepareComponentValue(ctx); err != nil { + return err + } + componentValue, _ := n.value.(Component) //nolint:revive // unchecked-type-assertion + if componentValue.LifecycleState(ctx).IsPaused() { + return errAccessCheckFailed + } + } + + return nil +} + +// validateAccessHelper is a helper method that validates both the current +// node's lifecycle state AND its ancestors recursively. +// Do not call this method directly, call validateAccess instead. +func (n *Node) validateAccessHelper(ctx Context, checkPaused bool) error { + // Check ancestors first (if not detached). + if !n.isDetached() && n.parent != nil { + if err := n.parent.validateAccessHelper(ctx, checkPaused); err != nil { + return err + } + } + + // Only Component nodes need to be validated. + if !n.isComponent() { + return nil + } + + // Hydrate the component so we can access its LifecycleState. + if err := n.prepareComponentValue(ctx); err != nil { + return err + } + componentValue, _ := n.value.(Component) //nolint:revive // unchecked-type-assertion + + lifecycleState := componentValue.LifecycleState(ctx) + if lifecycleState.IsClosed() { + return errAccessCheckFailed + } + + if checkPaused && lifecycleState.IsPaused() { + return errAccessCheckFailed + } + + if n.terminated { + // Terminated nodes can never be written to. + // This handles the case where root is terminated in the current transaction. + return errAccessCheckFailed + } + + // terminated field check above is in memory only, so handle the case where root is terminated (closed) + // in a previous transaction and we have a mutable state reload which clears the field. + if n.parent == nil && n.backend.GetExecutionState().State == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { + return errAccessCheckFailed + } + + return nil +} + +func (n *Node) prepareComponentValue( + chasmContext Context, +) error { + if n.valueState == valueStateNeedDeserialize { + metadata := n.serializedNode.Metadata + componentAttr := metadata.GetComponentAttributes() + if componentAttr == nil { + return softassert.UnexpectedInternalErr( + n.logger, + "expect chasm node to have ComponentAttributes", + fmt.Errorf("actual attributes: %v", metadata.Attributes)) + } + + registrableComponent, ok := n.registry.ComponentByID(componentAttr.GetTypeId()) + if !ok { + return softassert.UnexpectedInternalErr( + n.logger, + "unknown component type ID", + fmt.Errorf("%d", componentAttr.GetTypeId())) + } + + if err := n.deserialize(registrableComponent.goType); err != nil { + return fmt.Errorf("failed to deserialize component: %w", err) + } + } + + // For now, we assume if a node is accessed with a MutableContext, + // its value will be mutated and no longer in sync with the serializedNode. + _, componentCanBeMutated := chasmContext.(MutableContext) + if componentCanBeMutated { + n.setValueState(valueStateNeedSyncStructure) + } + + return nil +} + +func (n *Node) prepareDataValue( + chasmContext Context, + valueT reflect.Type, +) error { + metadata := n.serializedNode.Metadata + dataAttr := metadata.GetDataAttributes() + if dataAttr == nil { + return softassert.UnexpectedInternalErr( + n.logger, + "expect chasm node to have DataAttributes", + fmt.Errorf("actual attributes: %v", metadata.Attributes)) + } + + if n.valueState == valueStateNeedDeserialize { + if err := n.deserialize(valueT); err != nil { + return fmt.Errorf("failed to deserialize data: %w", err) + } + } + + // For now, we assume if a node is accessed with a MutableContext, + // its value will be mutated and no longer in sync with the serializedNode. + _, componentCanBeMutated := chasmContext.(MutableContext) + if componentCanBeMutated { + n.setValueState(valueStateNeedSerialize) + } + + return nil +} + +func (n *Node) preparePointerValue() error { + metadata := n.serializedNode.Metadata + pointerAttr := metadata.GetPointerAttributes() + if pointerAttr == nil { + return softassert.UnexpectedInternalErr( + n.logger, + "expect chasm node to have PointerAttributes", + fmt.Errorf("actual attributes: %v", metadata.Attributes)) + } + + if n.valueState == valueStateNeedDeserialize { + if err := n.deserialize(nil); err != nil { + return fmt.Errorf("failed to deserialize data: %w", err) + } + } + + return nil +} + +func (n *Node) isComponent() bool { + return n.serializedNode.GetMetadata().GetComponentAttributes() != nil +} + +func (n *Node) isData() bool { + return n.serializedNode.GetMetadata().GetDataAttributes() != nil +} + +func (n *Node) isMap() bool { + return n.serializedNode.GetMetadata().GetCollectionAttributes() != nil +} + +func (n *Node) isDetached() bool { + componentAttr := n.serializedNode.GetMetadata().GetComponentAttributes() + if componentAttr == nil { + return false + } + componentTypeID := componentAttr.GetTypeId() + if componentTypeID == CallbackComponentID || + componentTypeID == visibilityComponentTypeID { + // For backward compatibility purpose, we need to special handle callback and visibility components, + // which are implemented before detached component is properly supported by the framework. + return true + } + return componentAttr.GetDetached() +} + +func (n *Node) fieldType() fieldType { + if n.serializedNode.GetMetadata().GetComponentAttributes() != nil { + return fieldTypeComponent + } + + if n.serializedNode.GetMetadata().GetDataAttributes() != nil { + return fieldTypeData + } + + if n.serializedNode.GetMetadata().GetPointerAttributes() != nil { + return fieldTypePointer + } + + if n.serializedNode.GetMetadata().GetCollectionAttributes() != nil { + softassert.Fail( + n.logger, + "fieldType can't be called on Collection node because Collection is not a Field") + } + + return fieldTypeUnspecified +} + +func (n *Node) valueFields() iter.Seq[fieldInfo] { + return fieldsOf(reflect.ValueOf(n.value)) +} + +func assertStructPointer(t reflect.Type) error { + if t == nil { + return nil + } + + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return serviceerror.NewInternalf("only pointer to struct is supported for tree node value: got %s", t.String()) + } + return nil +} + +func (n *Node) initSerializedNode(ft fieldType) { + switch ft { + case fieldTypeData: + n.serializedNode = &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: n.backend.NextTransitionCount(), + NamespaceFailoverVersion: n.backend.GetCurrentVersion(), + }, + Attributes: &persistencespb.ChasmNodeMetadata_DataAttributes{ + DataAttributes: &persistencespb.ChasmDataAttributes{}, + }, + }, + } + case fieldTypeComponent: + n.serializedNode = &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: n.backend.NextTransitionCount(), + NamespaceFailoverVersion: n.backend.GetCurrentVersion(), + }, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{}, + }, + }, + } + case fieldTypePointer, fieldTypeDeferredPointer: + // A deferred pointer will be resolved to a regular pointer before persistence. + n.serializedNode = &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: n.backend.NextTransitionCount(), + NamespaceFailoverVersion: n.backend.GetCurrentVersion(), + }, + Attributes: &persistencespb.ChasmNodeMetadata_PointerAttributes{ + PointerAttributes: &persistencespb.ChasmPointerAttributes{}, + }, + }, + } + case fieldTypeUnspecified: + softassert.Fail(n.logger, + "initSerializedNode can't be called with unspecified field type") + } +} + +func (n *Node) initSerializedCollectionNode() { + n.serializedNode = &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: n.backend.NextTransitionCount(), + NamespaceFailoverVersion: n.backend.GetCurrentVersion(), + }, + Attributes: &persistencespb.ChasmNodeMetadata_CollectionAttributes{ + CollectionAttributes: &persistencespb.ChasmCollectionAttributes{}, + }, + }, + } +} + +func (n *Node) setSerializedNode( + nodePath []string, + encodedPath string, + serializedNode *persistencespb.ChasmNode, +) *Node { + if len(nodePath) == 0 { + n.serializedNode = serializedNode + n.setValueState(valueStateNeedDeserialize) + n.encodedPath = &encodedPath + return n + } + + childName := nodePath[0] + childNode, ok := n.children[childName] + if !ok { + childNode = newNode(n.nodeBase, n, childName) + n.children[childName] = childNode + } + return childNode.setSerializedNode(nodePath[1:], encodedPath, serializedNode) +} + +// serialize sets or updates serializedValue field of the node n with serialized value. +// It sets node's valueState to valueStateSynced and updates LastUpdateVersionedTransition. +func (n *Node) serialize() error { + switch n.serializedNode.GetMetadata().GetAttributes().(type) { + case *persistencespb.ChasmNodeMetadata_ComponentAttributes: + return n.serializeComponentNode() + case *persistencespb.ChasmNodeMetadata_DataAttributes: + return n.serializeDataNode() + case *persistencespb.ChasmNodeMetadata_CollectionAttributes: + return n.serializeCollectionNode() + case *persistencespb.ChasmNodeMetadata_PointerAttributes: + return n.serializePointerNode() + default: + return softassert.UnexpectedInternalErr(n.logger, "unknown node type", nil) + } +} + +func (n *Node) serializeComponentNode() error { + for field := range n.valueFields() { + if field.err != nil { + return field.err + } + + if field.kind != fieldKindData { + continue + } + + var blob *commonpb.DataBlob + if !field.val.IsNil() { + var err error + if blob, err = serialization.ProtoEncode(field.val.Interface().(proto.Message)); err != nil { + return err + } + } + + rc, ok := n.registry.componentFor(n.value) + if !ok { + return softassert.UnexpectedInternalErr( + n.logger, + "component type is not registered", + fmt.Errorf("%s", reflect.TypeOf(n.value).String())) + } + + n.serializedNode.Data = blob + n.serializedNode.GetMetadata().GetComponentAttributes().TypeId = rc.componentID + n.updateLastUpdateVersionedTransition() + n.setValueState(valueStateSynced) + + // continue to iterate over fields to validate that there is only one proto field in the component. + } + return nil +} + +// syncSubComponents syncs the entire tree recursively (starting from the root node n) from the underlining component value: +// - Create: +// -- if child node is nil but subcomponent is not empty or key present in the collection, +// a new node with subcomponent/collection_item value is created. +// - Delete: +// -- if subcomponent is empty, the corresponding child is removed from the tree, +// -- if subcomponent is no longer in a component, the corresponding child is removed from the tree, +// -- if collection item is not in the collection, the corresponding child is removed from the tree, +// -- when a child is removed, all its children are removed too. +// +// All removed paths are added to mutation.DeletedNodes (which is shared between all nodes in the tree). +// +// True is returned when CHASM must perform deferred pointer resolution. +// +// nolint:revive,cognitive-complexity +func (n *Node) syncSubComponents() error { + if n.valueState < valueStateNeedSyncStructure { + for _, childNode := range n.children { + err := childNode.syncSubComponents() + if err != nil { + return err + } + } + return nil + } + + childrenToKeep := make(map[string]struct{}) + for field := range n.valueFields() { + if field.err != nil { + return field.err + } + + switch field.kind { + case fieldKindUnspecified: + softassert.Fail(n.logger, + "field.kind can be unspecified only if err is not nil, and there is a check for it above") + case fieldKindData: + // Nothing to sync. + case fieldKindSubField: + keepChild, updatedFieldV, err := n.syncSubField(field.val, field.name) + if err != nil { + return err + } + if updatedFieldV.IsValid() { + field.val.Set(updatedFieldV) + } + if keepChild { + childrenToKeep[field.name] = struct{}{} + } + case fieldKindParentPtr: + internalField := field.val.FieldByName(parentPtrInternalFieldName) + internal, ok := internalField.Interface().(parentPtrInternal) + if !ok { + return softassert.UnexpectedInternalErr( + n.logger, + "CHASM parent pointer's internal field is not of parentPtrInternal type", + fmt.Errorf("node %s, actual type: %T", n.nodeName, internalField.Interface())) + } + if internal.currentNode == nil || internal.currentNode != n { + internal.currentNode = n + internalField.Set(reflect.ValueOf(internal)) + } + case fieldKindSubMap: + if field.val.IsNil() { + // If Map field is nil then delete all collection items nodes and collection node itself. + continue + } + + collectionNode := n.children[field.name] + if collectionNode == nil { + collectionNode = newNode(n.nodeBase, n, field.name) + collectionNode.initSerializedCollectionNode() + collectionNode.setValueState(valueStateNeedSyncStructure) + n.children[field.name] = collectionNode + } + + // Validate map type. + if field.val.Kind() != reflect.Map { + return softassert.UnexpectedInternalErr( + n.logger, + "CHASM map must be of map type", + fmt.Errorf("node %s", n.nodeName)) + } + + if len(field.val.MapKeys()) == 0 { + // If Map field is empty then delete all collection items nodes and collection node itself. + continue + } + + mapValT := field.typ.Elem() + if mapValT.Kind() != reflect.Struct || genericTypePrefix(mapValT) != chasmFieldTypePrefix { + return softassert.UnexpectedInternalErr( + n.logger, + "CHASM map value must be of Field[T] type", + fmt.Errorf("node %s got %s", n.nodeName, mapValT)) + } + + collectionItemsToKeep := make(map[string]struct{}) + for _, mapKeyV := range field.val.MapKeys() { + mapItemV := field.val.MapIndex(mapKeyV) + collectionKey, err := n.mapKeyToString(mapKeyV) + if err != nil { + return err + } + keepItem, updatedMapItemV, err := collectionNode.syncSubField(mapItemV, collectionKey) + if err != nil { + return err + } + if updatedMapItemV.IsValid() { + // The only way to update item in the map is to set it back. + field.val.SetMapIndex(mapKeyV, updatedMapItemV) + } + if keepItem { + collectionItemsToKeep[collectionKey] = struct{}{} + } + } + if err := collectionNode.deleteChildren(collectionItemsToKeep); err != nil { + return err + } + collectionNode.setValueState(min(valueStateNeedSerialize, collectionNode.valueState)) + childrenToKeep[field.name] = struct{}{} + } + } + + err := n.deleteChildren(childrenToKeep) + n.setValueState(valueStateNeedSerialize) + + return err +} + +func (n *Node) mapKeyToString(keyV reflect.Value) (string, error) { + switch keyV.Kind() { + case reflect.String: + return keyV.String(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(keyV.Int(), 10), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(keyV.Uint(), 10), nil + case reflect.Bool: + return strconv.FormatBool(keyV.Bool()), nil + default: + return "", softassert.UnexpectedInternalErr( + n.logger, + "CHASM map key type is not supported", + fmt.Errorf("node %s must be one of [%s], got %s", n.nodeName, mapKeyTypes, keyV.Type().String())) + } +} + +func (n *Node) stringToMapKey(nodeName string, key string, keyT reflect.Type) (reflect.Value, error) { + var ( + keyV reflect.Value + err error + ) + switch keyT.Kind() { + case reflect.String: + keyV = reflect.ValueOf(key) + case reflect.Int: + var x int64 + x, err = strconv.ParseInt(key, 10, 0) + keyV = reflect.ValueOf(int(x)) + case reflect.Int8: + var x int64 + x, err = strconv.ParseInt(key, 10, 8) + keyV = reflect.ValueOf(int8(x)) + case reflect.Int16: + var x int64 + x, err = strconv.ParseInt(key, 10, 16) + keyV = reflect.ValueOf(int16(x)) + case reflect.Int32: + var x int64 + x, err = strconv.ParseInt(key, 10, 32) + keyV = reflect.ValueOf(int32(x)) + case reflect.Int64: + var x int64 + x, err = strconv.ParseInt(key, 10, 64) + keyV = reflect.ValueOf(x) + case reflect.Uint: + var x uint64 + x, err = strconv.ParseUint(key, 10, 0) + keyV = reflect.ValueOf(uint(x)) + case reflect.Uint8: + var x uint64 + x, err = strconv.ParseUint(key, 10, 8) + keyV = reflect.ValueOf(uint8(x)) + case reflect.Uint16: + var x uint64 + x, err = strconv.ParseUint(key, 10, 16) + keyV = reflect.ValueOf(uint16(x)) + case reflect.Uint32: + var x uint64 + x, err = strconv.ParseUint(key, 10, 32) + keyV = reflect.ValueOf(uint32(x)) + case reflect.Uint64: + var x uint64 + x, err = strconv.ParseUint(key, 10, 64) + keyV = reflect.ValueOf(x) + case reflect.Bool: + var b bool + b, err = strconv.ParseBool(key) + keyV = reflect.ValueOf(b) + default: + // Use softassert only here because this is the only case that indicates "compile" time error. + // The other errors below can come from data type mismatch between a component and persisted data. + err = softassert.UnexpectedInternalErr( + n.logger, + "unsupported CHASM map key type", + fmt.Errorf("unsupported type %s of kind %s: supported key types: %s", keyT.String(), keyT.Kind().String(), mapKeyTypes), + tag.Error(err)) + } + + if err == nil && !keyV.IsValid() { + err = fmt.Errorf("value %s is not valid of type %s of kind %s", key, keyT.String(), keyT.Kind().String()) + } + + if err != nil { + err = softassert.UnexpectedInternalErr( + n.logger, + "serialized map key value can't be parsed to CHASM map key type", + fmt.Errorf("nodeName: %s, key: %s, keyType: %s, error: %s", nodeName, key, keyT.String(), err.Error())) + } + + return keyV, err +} + +// syncSubField syncs node n with value from fieldV parameter. +// If fieldV is a component, then it will sync all subcomponents recursively. +// It returns: +// - bool keepNode indicates if node needs to be removed from parent's children map. +// - updatedFieldV if fieldV needs to be updated with new value. +// If updatedFieldV is invalid, then fieldV doesn't need to be updated. +// NOTE: this function doesn't update fieldV because it might come from the map which is not addressable. +// - error. +func (n *Node) syncSubField( + fieldV reflect.Value, + fieldN string, +) ( + keepNode bool, + updatedFieldV reflect.Value, + err error, +) { + internalV := fieldV.FieldByName(internalFieldName) + //nolint:revive // Internal field is guaranteed to be of type fieldInternal. + internal := internalV.Interface().(fieldInternal) + if internal.isEmpty() { + // Internal is empty only when Field was explicitly set to NewEmptyField[T] which is a way to clear its value. + // In this case, return keepNode=false and this node (and all it children) will be added to DeletedNodes map. + return + } + + fieldValue := internal.value() + if internal.node == nil && fieldValue != nil { + fieldType := internal.fieldType() + + // Field is not empty but tree node is not set. It means this is a new field, and a node must be created. + childNode := newNode(n.nodeBase, n, fieldN) + childNode.initSerializedNode(fieldType) + childNode.setValueState(valueStateNeedSerialize) + + // set node value after validation + switch fieldType { + case fieldTypeComponent: + if err = assertStructPointer(reflect.TypeOf(fieldValue)); err != nil { + return + } + + childNode.setValueState(valueStateNeedSyncStructure) + + // Set detached flag from field option or component type registration. + componentAttr := childNode.serializedNode.GetMetadata().GetComponentAttributes() + componentAttr.Detached = internal.detached + if !componentAttr.Detached { + if rc, ok := n.registry.componentFor(fieldValue); ok { + componentAttr.Detached = rc.IsDetached() + } + } + case fieldTypeData: + if err = assertStructPointer(reflect.TypeOf(fieldValue)); err != nil { + return + } + case fieldTypePointer: + if _, ok := fieldValue.([]string); !ok { + err = softassert.UnexpectedInternalErr( + n.logger, + "value must be of type []string for the field of pointer type", + fmt.Errorf("got %T", fieldValue)) + return + } + case fieldTypeDeferredPointer: + n.needsPointerResolution = true + default: + err = softassert.UnexpectedInternalErr( + n.logger, + "unexpected field type", + fmt.Errorf("%d", fieldType), + ) + return + } + childNode.setValue(fieldValue) + + n.children[fieldN] = childNode + internal.node = childNode + + updatedFieldV = reflect.New(fieldV.Type()).Elem() + updatedFieldV.FieldByName(internalFieldName).Set(reflect.ValueOf(internal)) + } + + if internal.fieldType() == fieldTypeComponent && internal.value() != nil { + err = internal.node.syncSubComponents() + if err != nil { + return + } + } + + return true, updatedFieldV, nil +} + +func (n *Node) deleteChildren( + childrenToKeep map[string]struct{}, +) error { + for childName, childNode := range n.children { + if _, childToKeep := childrenToKeep[childName]; !childToKeep { + if err := childNode.delete(false); err != nil { + return err + } + } + } + return nil +} + +func (n *Node) serializeDataNode() error { + protoValue, ok := n.value.(proto.Message) + if !ok { + return serviceerror.NewInternal("only support proto.Message as chasm data") + } + + var blob *commonpb.DataBlob + if protoValue != nil { + var err error + if blob, err = serialization.ProtoEncode(protoValue); err != nil { + return err + } + } + n.serializedNode.Data = blob + n.updateLastUpdateVersionedTransition() + n.setValueState(valueStateSynced) + + return nil +} + +func (n *Node) serializeCollectionNode() error { + // The collection node has no data; therefore, only metadata needs to be updated. + n.updateLastUpdateVersionedTransition() + n.setValueState(valueStateSynced) + return nil +} + +// serializePointerNode doesn't serialize anything but named this way for consistency. +func (n *Node) serializePointerNode() error { + path, isPathValid := n.value.([]string) + if !isPathValid { + return softassert.UnexpectedInternalErr( + n.logger, + "pointer path is not []string", + fmt.Errorf("got %T for node %s", n.value, n.nodeName)) + } + + n.serializedNode.GetMetadata().GetPointerAttributes().NodePath = path + n.updateLastUpdateVersionedTransition() + n.setValueState(valueStateSynced) + + return nil +} + +func (n *Node) updateLastUpdateVersionedTransition() { + if n.serializedNode.GetMetadata().GetLastUpdateVersionedTransition() == nil { + n.serializedNode.GetMetadata().LastUpdateVersionedTransition = &persistencespb.VersionedTransition{} + } + n.serializedNode.GetMetadata().GetLastUpdateVersionedTransition().TransitionCount = n.backend.NextTransitionCount() + n.serializedNode.GetMetadata().GetLastUpdateVersionedTransition().NamespaceFailoverVersion = n.backend.GetCurrentVersion() +} + +// deserialize initializes the node's value from its serializedNode. +// If a value is of the component type, it initializes every chasm.Field of it and sets serializedNode field but not value field, +// i.e., it doesn't deserialize recursively and must be called on every node separately. +// valueT must be a pointer to a concrete type (not interface). To support deserialization of a component to interface, +// a registry lookup must be done outside the deserialize method. +func (n *Node) deserialize( + valueT reflect.Type, +) error { + if err := assertStructPointer(valueT); err != nil { + return err + } + + if n.valueState != valueStateNeedDeserialize && reflect.TypeOf(n.value) == valueT { + return nil + } + + switch n.serializedNode.GetMetadata().GetAttributes().(type) { + case *persistencespb.ChasmNodeMetadata_ComponentAttributes: + return n.deserializeComponentNode(valueT) + case *persistencespb.ChasmNodeMetadata_DataAttributes: + return n.deserializeDataNode(valueT) + case *persistencespb.ChasmNodeMetadata_CollectionAttributes: + softassert.Fail( + n.logger, + "deserialize shouldn't be called on the collection node because it is deserialized with the parent component.") + case *persistencespb.ChasmNodeMetadata_PointerAttributes: + return n.deserializePointerNode() + } + return nil +} + +func (n *Node) deserializeComponentNode( + valueT reflect.Type, +) error { + // valueT is guaranteed to be a pointer to the struct because it was already validated by the assertStructPointer method. + valueV := reflect.New(valueT.Elem()) + + for field := range fieldsOf(valueV) { + if field.err != nil { + return field.err + } + + switch field.kind { + case fieldKindUnspecified: + softassert.Fail( + n.logger, + "field.kind can be unspecified only if err is not nil, and there is a check for it above", + tag.String("node name", n.nodeName)) + case fieldKindData: + value, err := unmarshalProto(n.serializedNode.GetData(), field.typ) + if err != nil { + return err + } + field.val.Set(value) + case fieldKindSubField: + if childNode, found := n.children[field.name]; found { + chasmFieldV := reflect.New(field.typ).Elem() + internalValue := reflect.ValueOf(newFieldInternalWithNode(childNode)) + chasmFieldV.FieldByName(internalFieldName).Set(internalValue) + field.val.Set(chasmFieldV) + } + case fieldKindSubMap: + if collectionNode, found := n.children[field.name]; found { + mapFieldV := field.val + if mapFieldV.IsNil() { + mapFieldV = reflect.MakeMapWithSize(field.typ, field.val.Len()) + field.val.Set(mapFieldV) + } + + for collectionItemName, collectionItemNode := range collectionNode.children { + // field.typ.Elem() is a go type of map item: Field[T] + chasmFieldV := reflect.New(field.typ.Elem()).Elem() + internalValue := reflect.ValueOf(newFieldInternalWithNode(collectionItemNode)) + chasmFieldV.FieldByName(internalFieldName).Set(internalValue) + mapKeyV, err := n.stringToMapKey(field.name, collectionItemName, mapFieldV.Type().Key()) + if err != nil { + return err + } + mapFieldV.SetMapIndex(mapKeyV, chasmFieldV) + } + } + case fieldKindMutableState: + field.val.Set(reflect.ValueOf(NewMSPointer(n.backend))) + case fieldKindParentPtr: + parentPtrV := reflect.New(field.typ).Elem() + parentPtrV.FieldByName(parentPtrInternalFieldName).Set(reflect.ValueOf(parentPtrInternal{ + currentNode: n, + })) + field.val.Set(parentPtrV) + } + } + + n.setValue(valueV.Interface()) + n.setValueState(valueStateSynced) + return nil +} + +func (n *Node) deserializeDataNode( + valueT reflect.Type, +) error { + value, err := unmarshalProto(n.serializedNode.GetData(), valueT) + if err != nil { + return err + } + + n.setValue(value.Interface()) + n.setValueState(valueStateSynced) + return nil +} + +// deserializePointerNode doesn't deserialize anything but named this way for consistency. +func (n *Node) deserializePointerNode() error { + n.setValue(n.serializedNode.GetMetadata().GetPointerAttributes().GetNodePath()) + n.setValueState(valueStateSynced) + return nil +} + +func unmarshalProto( + dataBlob *commonpb.DataBlob, + valueT reflect.Type, +) (reflect.Value, error) { + if !valueT.AssignableTo(protoMessageT) { + return reflect.Value{}, serviceerror.NewInternal("only support proto.Message as chasm data") + } + + value := reflect.New(valueT.Elem()) + + if dataBlob == nil || len(dataBlob.Data) == 0 { + // If the original data is the zero value of its type, the dataBlob loaded from persistence layer will be nil. + // But we know for component & data nodes, they won't get persisted in the first place if there's no data, + // so it must be a zero value. + dataBlob = &commonpb.DataBlob{ + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + Data: []byte{}, + } + } + + if err := serialization.Decode(dataBlob, value.Interface().(proto.Message)); err != nil { + return reflect.Value{}, err + } + + return value, nil +} + +// Ref implements the CHASM Context interface +func (n *Node) Ref( + component Component, +) ([]byte, error) { + ref, err := n.structuredRef(component) + if err != nil { + return nil, err + } + return ref.Serialize(n.registry) +} + +// structuredRef returns a ComponentRef for the node. +func (n *Node) structuredRef( + component Component, +) (ComponentRef, error) { + // No need to update tree structure here. If a Component can only be found after + // syncSubComponents() is called, it means the component is created in the + // current transition and don't have a reference yet. + + refNode, ok := n.valueToNode[component] + if !ok || !refNode.isComponent() { + return ComponentRef{}, errComponentNotFound + } + + workflowKey := refNode.backend.GetWorkflowKey() + return ComponentRef{ + ExecutionKey: ExecutionKey{ + NamespaceID: workflowKey.NamespaceID, + BusinessID: workflowKey.WorkflowID, + RunID: workflowKey.RunID, + }, + archetypeID: n.ArchetypeID(), + // TODO: Consider using node's LastUpdateVersionedTransition for checking staleness here. + // Using VersionedTransition of the entire tree might be too strict. + executionLastUpdateVT: transitionhistory.CopyVersionedTransition(refNode.backend.CurrentVersionedTransition()), + componentPath: refNode.path(), + componentInitialVT: refNode.serializedNode.GetMetadata().GetInitialVersionedTransition(), + }, nil + +} + +// componentNodePath implements the CHASM Context interface +func (n *Node) componentNodePath( + component Component, +) ([]string, error) { + // It's unnecessary to deserialize entire tree as calling this method means + // caller already have the deserialized value. + + refNode, ok := n.valueToNode[component] + if !ok || !refNode.isComponent() { + return nil, errComponentNotFound + } + + return refNode.path(), nil +} + +// dataNodePath implements the CHASM Context interface +func (n *Node) dataNodePath( + data proto.Message, +) ([]string, error) { + // It's unnecessary to deserialize entire tree as calling this method means + // caller already have the deserialized value. + + refNode, ok := n.valueToNode[data] + if !ok || !refNode.isData() { + return nil, errDataNotFound + } + + return refNode.path(), nil +} + +// Now implements the CHASM Context interface +func (n *Node) Now( + _ Component, +) time.Time { + // TODO: Now() could be different for components after we support Pause for CHASM components. + return n.timeSource.Now() +} + +// AddTask implements the CHASM MutableContext interface +func (n *Node) AddTask( + component Component, + taskAttributes TaskAttributes, + task any, +) { + rt, ok := n.registry.taskFor(task) + if ok && rt.isPureTask && taskAttributes.IsImmediate() { + // Those tasks will be executed in the current transaction. + n.immediatePureTasks[component] = append(n.immediatePureTasks[component], taskWithAttributes{ + task: task, + attributes: taskAttributes, + }) + return + } + + n.nodeBase.newTasks[component] = append(n.nodeBase.newTasks[component], taskWithAttributes{ + task: task, + attributes: taskAttributes, + }) +} + +// CloseTransaction is used by MutableState to close the transaction and +// track changes made in the current transaction. +func (n *Node) CloseTransaction() (NodesMutation, error) { + defer n.cleanupTransaction() + + if err := n.executeImmediatePureTasks(); err != nil { + return NodesMutation{}, err + } + + if err := n.syncSubComponents(); err != nil { + return NodesMutation{}, err + } + + if n.needsPointerResolution { + if err := n.resolveDeferredPointers(); err != nil { + return NodesMutation{}, err + } + } + + nextVersionedTransition := &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: n.backend.GetCurrentVersion(), + TransitionCount: n.backend.NextTransitionCount(), + } + + immutableContext := NewContext(context.TODO(), n) + rootLifecycleChanged, err := n.closeTransactionHandleRootLifecycleChange(immutableContext) + if err != nil { + return NodesMutation{}, err + } + + if n.isActiveStateDirty { + if err := n.closeTransactionForceUpdateVisibility(immutableContext, rootLifecycleChanged); err != nil { + return NodesMutation{}, err + } + } + + if err := n.closeTransactionSerializeNodes(); err != nil { + return NodesMutation{}, err + } + + if err := n.closeTransactionUpdateComponentTasks(nextVersionedTransition); err != nil { + return NodesMutation{}, err + } + + // Both user & system data mutation need to be returned and persisted. + maps.Copy(n.mutation.UpdatedNodes, n.systemMutation.UpdatedNodes) + maps.Copy(n.mutation.DeletedNodes, n.systemMutation.DeletedNodes) + + return n.mutation, nil +} + +func (n *Node) executeImmediatePureTasks() error { + + // We must sync structure before running any tasks here because, + // those tasks might be for a newly created component which doesn't even have a node yet. + // And we want to make sure we only run tasks for components that are still part of the tree. + syncStructure := true + var err error + + for len(n.immediatePureTasks) != 0 { + // Create a map in case more immediate pure tasks get + // added while existing ones are executed. + immediatePureTasks := n.immediatePureTasks + n.immediatePureTasks = make(map[any][]taskWithAttributes) + + for component, pureTasks := range immediatePureTasks { + for _, task := range pureTasks { + if syncStructure { + if err := n.syncSubComponents(); err != nil { + return err + } + } + + // The corresponding Node may not be found due to several reasons: + // 1. This function is executed at the end of a transaction which could contain multiple transitions. + // So it's possible that a task added for a component in one transition and in a later transition that component get removed. + // 2. Previous pure task for the node deleted the node itself via a (parent) pointer. + // This is also why this check is done in the inner for loop. + taskNode, ok := n.valueToNode[component] + if !ok { + break + } + + // Only syncStructure on next iteration if task is executed (the first return value). + syncStructure, err = taskNode.ExecutePureTask(context.Background(), task.attributes, task.task) + if err != nil { + return err + } + } + } + } + + return nil +} + +func (n *Node) closeTransactionHandleRootLifecycleChange( + immutableContext Context, +) (bool, error) { + if n.backend.IsWorkflow() { + // Workflow manages its lifecycle directly in mutable state. + return false, nil + } + + if n.valueState != valueStateNeedSerialize { + return false, nil + } + + if n.backend.GetExecutionState().State == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { + // Already in completed state, no need to update lifecycle state. + return false, nil + } + + if n.terminated { + return n.backend.UpdateWorkflowStateStatus( + enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + ) + } + + rootComponent, err := n.Component(immutableContext, ComponentRef{}) + if err != nil { + return false, err + } + lifecycleState := rootComponent.LifecycleState(immutableContext) + + var newState enumsspb.WorkflowExecutionState + var newStatus enumspb.WorkflowExecutionStatus + switch lifecycleState { + case LifecycleStateRunning, LifecycleStatePaused: + // Paused is an OPEN state; the execution remains RUNNING from the persistence perspective. + newState = enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING + newStatus = enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING + case LifecycleStateCompleted: + newState = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + newStatus = enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED + case LifecycleStateFailed: + newState = enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED + newStatus = enumspb.WORKFLOW_EXECUTION_STATUS_FAILED + default: + return false, softassert.UnexpectedInternalErr( + n.logger, + "unknown component lifecycle state", + fmt.Errorf("%v", lifecycleState)) + } + + return n.backend.UpdateWorkflowStateStatus(newState, newStatus) +} + +func (n *Node) closeTransactionForceUpdateVisibility( + immutableContext Context, + rootLifecycleChanged bool, +) error { + if n.deleteAfterClose { + return nil + } + + if !rootLifecycleChanged && + n.backend.GetExecutionState().State == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { + return nil + } + + needUpdate := rootLifecycleChanged + + rootComponent, err := n.Component(immutableContext, ComponentRef{}) + if err != nil { + return err + } + + saProvider, ok := rootComponent.(VisibilitySearchAttributesProvider) + if ok { + saSlice := saProvider.SearchAttributes(immutableContext) + newSA := searchAttributeKeyValuesToMap(saSlice) + if !maps.EqualFunc(n.currentSA, newSA, isVisibilityValueEqual) { + needUpdate = true + } + n.currentSA = newSA + } + + memoProvider, ok := rootComponent.(VisibilityMemoProvider) + if ok { + newMemo := memoProvider.Memo(immutableContext) + if !proto.Equal(n.currentMemo, newMemo) { + needUpdate = true + } + n.currentMemo = proto.Clone(newMemo) + } + + if !needUpdate { + return nil + } + + var visibilityNode *Node + for _, child := range n.children { + if !child.isComponent() { + continue + } + + if child.valueState == valueStateNeedSerialize { + if rc, ok := n.registry.componentFor(child.value); ok && rc.fqType() == visibilityComponentType { + visibilityNode = child + break + } + } else if child.serializedNode.Metadata.GetComponentAttributes().TypeId == visibilityComponentTypeID { + visibilityNode = child + break + } + } + + if visibilityNode == nil { + return nil + } + + visComponent, err := visibilityNode.Component(immutableContext, ComponentRef{}) + if err != nil { + return err + } + + visibility, ok := visComponent.(*Visibility) + if !ok { + return softassert.UnexpectedInternalErr( + n.logger, + "expected visibility component for component type", + fmt.Errorf("type: %s, but got %T", visibilityComponentType, visComponent)) + } + + // Generate a task and mark the node as dirty. + // + // NOTE: generateTask() will create a new logical task for the visibility component. But it also + // invalidates all previous logical tasks at the end of the transaction, and only one physical task + // will be created in the visibility queue. + mutableContext := NewMutableContext(context.TODO(), n) + visibility.generateTask(mutableContext) + visibilityNode.setValueState(valueStateNeedSerialize) + + // We don't need to sync tree structure here for the visiblity node because we only generated a task without + // changing any component fields. + return nil +} + +func (n *Node) closeTransactionSerializeNodes() error { + for nodePath, node := range n.andAllChildren() { + if node.valueState > valueStateNeedSerialize { + return serviceerror.NewInternalf("invalid valueState for serializing: %v", node.valueState) + } + + if node.valueState < valueStateNeedSerialize { + continue + } + + if err := node.serialize(); err != nil { + return err + } + + if componentAttr := node.serializedNode.GetMetadata().GetComponentAttributes(); componentAttr != nil && + componentAttr.TypeId == visibilityComponentTypeID && + len(nodePath) != 1 { + return softassert.UnexpectedInternalErr( + n.logger, + "CHASM visibility component must be immediate child of the root node", + fmt.Errorf("found at path %s", nodePath)) + } + + encodedPath, err := node.getEncodedPath() + if err != nil { + return err + } + n.mutation.UpdatedNodes[encodedPath] = node.serializedNode + // DeletedNodes map is populated when syncing tree structure. However, since we may sync tree structure + // multiple times in one transaction, if node at the same path was previously deleted, have structure synced, + // then get re-created, the same encoded path will exists in both UpdatedNodes and DeletedNodes maps. + // + // serializeNode only happens once at the end of a transaction, and here we know the node at this encoded path exists, + // remove it from the DeletedNodes map. + delete(n.mutation.DeletedNodes, encodedPath) + } + + return nil +} + +func (n *Node) closeTransactionUpdateComponentTasks( + nextVersionedTransition *persistencespb.VersionedTransition, +) error { + taskOffset := int64(1) + taskValidationContext := NewContext(newContextWithOperationIntent(context.Background(), OperationIntentProgress), n) + + archetypeID := n.ArchetypeID() + + var firstPureTask *persistencespb.ChasmComponentAttributes_Task + var firstPureTaskNode *Node + + for nodePath, node := range n.andAllChildren() { + // no-op if node is not a component + componentAttr := node.serializedNode.Metadata.GetComponentAttributes() + if componentAttr == nil { + continue + } + + // First update component logical tasks. + + // Even if a node is not touched in this transaction, its task can still become invalid due to, e.g. + // - child component state update + // - parent component closing (access rule) + // - a pointer field pointing to an updated component + // As a result, we need to validate existing tasks for all components if we are in active cluster. + if n.isActiveStateDirty { + // Ensure this node's component value is hydrated before cleaning up tasks. + if err := node.prepareComponentValue(taskValidationContext); err != nil { + return err + } + + cleanedUp, err := node.closeTransactionCleanupInvalidTasks(taskValidationContext) + if err != nil { + return err + } + + if cleanedUp { + // add the current node to UpdatedNodes map if it's not already there + encodedPath, err := node.getEncodedPath() + if err != nil { + return err + } + if _, exists := n.mutation.UpdatedNodes[encodedPath]; !exists { + // Mark the node as updated so changes will get replicated. + node.updateLastUpdateVersionedTransition() + + n.mutation.UpdatedNodes[encodedPath] = node.serializedNode + delete(n.mutation.DeletedNodes, encodedPath) + } + } + } + + // The conditions excludes replication logic (applyMutation/Snapshot) which sets + // valueState to valueStateNeedDeserialize. + // + // Do NOT use condition node.valueState == valueStateNeedSerialize. + // This method is called after the closeTransactionSerializeNodes which sets valueState + // to valueStateSynced. + if transitionhistory.Compare( + node.serializedNode.GetMetadata().LastUpdateVersionedTransition, + nextVersionedTransition, + ) == 0 && node.valueState != valueStateNeedDeserialize { + if err := node.closeTransactionHandleNewTasks( + nextVersionedTransition, + taskValidationContext, + &taskOffset, + ); err != nil { + return err + } + } + + sideEffectTasks := componentAttr.GetSideEffectTasks() + for idx := len(sideEffectTasks) - 1; idx >= 0; idx-- { + sideEffectTask := sideEffectTasks[idx] + if sideEffectTask.PhysicalTaskStatus == physicalTaskStatusCreated { + break + } + + node.closeTransactionGeneratePhysicalSideEffectTask( + sideEffectTask, + nodePath, + archetypeID, + ) + } + + // Find the first pure task in the entire tree, + // regardless if the pure task is newly added or existing. + pureTasks := componentAttr.GetPureTasks() + if len(pureTasks) == 0 { + continue + } + + if firstPureTask == nil || + comparePureTasks(pureTasks[0], firstPureTask) < 0 { + firstPureTask = pureTasks[0] + firstPureTaskNode = node + } + } + + // TODO: We cannot simply assert that all tasks in n.nodeBase.newTasks are processed. + // That should be the case when only one transition for each transaction. + // However, when processing pure tasks, we run multiple pure tasks, thus multiple transitions + // in one transaction. This means it's possible that task generated for a component in the first + // task, and that component get deleted by the second task. + + return n.closeTransactionGeneratePhysicalPureTask( + firstPureTask, + firstPureTaskNode, + archetypeID, + ) +} + +func (n *Node) deserializeComponentTask( + componentTask *persistencespb.ChasmComponentAttributes_Task, +) (any, error) { + registableTask, ok := n.registry.TaskByID(componentTask.TypeId) + if !ok { + return nil, softassert.UnexpectedInternalErr( + n.logger, + "unknown task type id", + fmt.Errorf("%d", componentTask.TypeId)) + } + + taskValue, err := n.deserializeTaskWithCache(registableTask, componentTask.Data) + if err != nil { + return nil, err + } + + return taskValue.Interface(), nil +} + +// validateTask runs taskInstance's registered validation handler. +// This method assumes component value is already hydrated. +func (n *Node) validateTask( + validateContext Context, + taskAttributes TaskAttributes, + taskInstance any, +) (_ bool, retErr error) { + registableTask, ok := n.registry.taskFor(taskInstance) + if !ok { + return false, softassert.UnexpectedInternalErr( + n.logger, + "task type for goType is not registered", + fmt.Errorf("%s", reflect.TypeOf(taskInstance).Name())) + } + + // checkPaused=true: a single ancestor walk invalidates tasks for both + // closed ancestors and paused components (self or non-detached ancestor). + if err := n.validateAccess(validateContext, true); err != nil { + if errors.Is(err, errAccessCheckFailed) { + return false, nil + } + return false, err + } + + defer log.CapturePanic(n.logger, &retErr) + + return registableTask.validateFn( + validateContext, + n.value, + taskAttributes, + taskInstance, + n.registry, + ) +} + +func (n *Node) closeTransactionCleanupInvalidTasks( + validateContext Context, +) (bool, error) { + // Validate existing tasks and remove invalid ones. + var validationErr error + cleanedUp := false + deleteFunc := func(existingTask *persistencespb.ChasmComponentAttributes_Task) bool { + existingTaskInstance, err := n.deserializeComponentTask(existingTask) + if err != nil { + validationErr = err + return false + } + + valid, err := n.validateTask( + validateContext, + TaskAttributes{ + ScheduledTime: existingTask.ScheduledTime.AsTime(), + Destination: existingTask.Destination, + }, + existingTaskInstance, + ) + if err != nil { + validationErr = err + return false + } + if !valid { + cleanedUp = true + delete(n.taskValueCache, existingTask.Data) + } + return !valid + } + + componentAttr := n.serializedNode.Metadata.GetComponentAttributes() + componentAttr.SideEffectTasks = slices.DeleteFunc(componentAttr.SideEffectTasks, deleteFunc) + if validationErr != nil { + return false, validationErr + } + componentAttr.PureTasks = slices.DeleteFunc(componentAttr.PureTasks, deleteFunc) + if validationErr != nil { + return false, validationErr + } + return cleanedUp, nil +} + +func (n *Node) closeTransactionHandleNewTasks( + nextVersionedTransition *persistencespb.VersionedTransition, + validateContext Context, + taskOffset *int64, +) error { + newTasks, ok := n.newTasks[n.value] + if !ok { + return nil + } + + componentAttr := n.serializedNode.Metadata.GetComponentAttributes() + sortPureTasks := false + + for _, newTask := range newTasks { + if !newTask.attributes.IsValid() { + return softassert.UnexpectedInternalErr( + n.logger, + "task attributes cannot have both destination and scheduled specified", + fmt.Errorf("attributes: %v", newTask.attributes)) + } + + valid, err := n.validateTask( + validateContext, + newTask.attributes, + newTask.task, + ) + if err != nil { + return err + } + if !valid { + continue + } + + registrableTask, ok := n.registry.taskFor(newTask.task) + if !ok { + return softassert.UnexpectedInternalErr( + n.logger, + "task type is not registered", + fmt.Errorf("%s", reflect.TypeOf(newTask.task).String())) + } + + taskBlob, err := n.serializeTaskWithCache(registrableTask, reflect.ValueOf(newTask.task)) + if err != nil { + return err + } + + componentTask := &persistencespb.ChasmComponentAttributes_Task{ + TypeId: registrableTask.taskTypeID, + Destination: newTask.attributes.Destination, + ScheduledTime: timestamppb.New(newTask.attributes.ScheduledTime), + Data: taskBlob, + VersionedTransition: nextVersionedTransition, + VersionedTransitionOffset: *taskOffset, + PhysicalTaskStatus: physicalTaskStatusNone, + } + + if registrableTask.isPureTask { + componentAttr.PureTasks = append(componentAttr.PureTasks, componentTask) + sortPureTasks = true + } else { + componentAttr.SideEffectTasks = append(componentAttr.SideEffectTasks, componentTask) + } + + *taskOffset++ + } + + if sortPureTasks { + // pure tasks are sorted by scheduled time. + slices.SortFunc(componentAttr.PureTasks, comparePureTasks) + } + + return nil +} + +func (n *Node) closeTransactionGeneratePhysicalSideEffectTask( + sideEffectTask *persistencespb.ChasmComponentAttributes_Task, + nodePath []string, + archetypeID ArchetypeID, +) { + n.backend.AddTasks(&tasks.ChasmTask{ + WorkflowKey: n.backend.GetWorkflowKey(), + VisibilityTimestamp: sideEffectTask.ScheduledTime.AsTime(), + Destination: sideEffectTask.Destination, + Category: taskCategory(sideEffectTask), + Info: &persistencespb.ChasmTaskInfo{ + ComponentInitialVersionedTransition: n.serializedNode.Metadata.InitialVersionedTransition, + ComponentLastUpdateVersionedTransition: n.serializedNode.Metadata.LastUpdateVersionedTransition, + Path: nodePath, + TypeId: sideEffectTask.TypeId, + Data: sideEffectTask.Data, + ArchetypeId: archetypeID, + TaskVersionedTransition: sideEffectTask.VersionedTransition, + TaskVersionedTransitionOffset: sideEffectTask.VersionedTransitionOffset, + }, + }) + sideEffectTask.PhysicalTaskStatus = physicalTaskStatusCreated +} + +func (n *Node) closeTransactionGeneratePhysicalPureTask( + firstPureTask *persistencespb.ChasmComponentAttributes_Task, + firstTaskNode *Node, + archetypeID ArchetypeID, +) error { + if firstPureTask == nil { + n.backend.DeleteCHASMPureTasks(tasks.MaximumKey.FireTime) + return nil + } + + firstPureTaskScheduledTime := firstPureTask.ScheduledTime.AsTime() + n.backend.DeleteCHASMPureTasks(firstPureTaskScheduledTime) + + if firstPureTask.PhysicalTaskStatus == physicalTaskStatusCreated { + return nil + } + + n.backend.AddTasks(&tasks.ChasmTaskPure{ + WorkflowKey: n.backend.GetWorkflowKey(), + VisibilityTimestamp: firstPureTaskScheduledTime, + ArchetypeID: archetypeID, + }) + + // We need to persist the task status change as well, so add the node + // to the list of updated nodes. + // However, since task status is a cluster local field, we don't really + // update LastUpdateVersionedTransition for this node, and the change won't be replicated. + firstPureTask.PhysicalTaskStatus = physicalTaskStatusCreated + encodedPath, err := firstTaskNode.getEncodedPath() + if err != nil { + return err + } + n.systemMutation.UpdatedNodes[encodedPath] = firstTaskNode.serializedNode + return nil +} + +// resolveDeferredPointers resolves all deferred pointers in the tree. +// Returns error if any deferred pointer cannot be resolved, as deferred pointers +// cannot be persisted after transaction close. +func (n *Node) resolveDeferredPointers() error { + for _, node := range n.andAllChildren() { + if node.value == nil || !node.isComponent() { + continue + } + + for field := range node.valueFields() { + if field.err != nil { + return field.err + } + + if field.kind != fieldKindSubField { + continue + } + + internalV := field.val.FieldByName(internalFieldName) + internal, _ := internalV.Interface().(fieldInternal) //nolint:revive + + if internal.fieldType() == fieldTypeDeferredPointer && internal.value() != nil { + // Must resolve the deferred pointer or fail the transaction. + var resolvedPath []string + var err error + + switch value := internal.value().(type) { + case Component: + resolvedPath, err = n.componentNodePath(value) + if err == nil { + targetNode := n.valueToNode[value] + if !targetNode.isAncestorOf(node) { + err = fmt.Errorf( + "pointer target is not an ancestor of component at path %v", + node.path(), + ) + } + } + case proto.Message: + resolvedPath, err = n.dataNodePath(value) + default: + err = softassert.UnexpectedInternalErr( + n.logger, + "unable to create a deferred pointer for values of type", + fmt.Errorf("%T", value)) + } + if err != nil { + return softassert.UnexpectedInternalErr( + n.logger, + "failed to resolve deferred pointer during transaction close", + err) + } + + // Update the field to be a regular pointer, reusing the existing serializedNode, + // and update the serializedNode's value. + newInternal := newFieldInternalWithValue(fieldTypePointer, resolvedPath) + newInternal.node = internal.node + newInternal.node.setValue(resolvedPath) + internalV.Set(reflect.ValueOf(newInternal)) + } + } + } + return nil +} + +// andAllChildren returns a sequence of all nodes in the tree starting from n, including n itself. +// The sequence is depth-first, pre-order traversal. +func (n *Node) andAllChildren() iter.Seq2[[]string, *Node] { + return func(yield func([]string, *Node) bool) { + var walk func([]string, *Node) bool + walk = func(path []string, node *Node) bool { + if node == nil { + return true + } + if !yield(path, node) { + return false + } + for _, child := range node.children { + childPath := make([]string, len(path)+1) + copy(childPath, path) + childPath[len(path)] = child.nodeName + if !walk(childPath, child) { + return false + } + } + return true + } + walk(nil, n) + } +} + +func (n *Node) cleanupTransaction() { + n.mutation = NodesMutation{ + UpdatedNodes: make(map[string]*persistencespb.ChasmNode), + DeletedNodes: make(map[string]struct{}), + } + + // System mutation are most likely to be empty, so we reuse existing ones if possible. + if len(n.systemMutation.UpdatedNodes) != 0 { + n.systemMutation.UpdatedNodes = make(map[string]*persistencespb.ChasmNode) + } + if len(n.systemMutation.DeletedNodes) != 0 { + n.systemMutation.DeletedNodes = make(map[string]struct{}) + } + + n.newTasks = make(map[any][]taskWithAttributes) + if len(n.immediatePureTasks) != 0 { + // n.immediatePureTasks should already be empty after executeImmediatePureTasks() + // unless there's an error. + n.immediatePureTasks = make(map[any][]taskWithAttributes) + } + + n.isActiveStateDirty = false + n.needsPointerResolution = false +} + +// Snapshot returns all nodes in the tree that have been modified after the given min versioned transition. +// A nil exclusiveMinVT will be treated as the same as the zero versioned transition and returns all nodes in the tree. +// This method should only be invoked on root CHASM node when IsDirty() is false. +func (n *Node) Snapshot( + exclusiveMinVT *persistencespb.VersionedTransition, +) NodesSnapshot { + if !softassert.That(n.logger, n.parent == nil, "chasm.Snapshot() should only be called on the root node") { + panic(fmt.Sprintf("chasm.Snapshot() called on child node: %+v", n)) + } + + // TODO: add assertion on IsDirty() once implemented + + nodes := make(map[string]*persistencespb.ChasmNode) + n.snapshotInternal(exclusiveMinVT, nodes) + + return NodesSnapshot{ + Nodes: nodes, + } +} + +func (n *Node) snapshotInternal( + exclusiveMinVT *persistencespb.VersionedTransition, + nodes map[string]*persistencespb.ChasmNode, +) { + if n == nil { + return + } + + if transitionhistory.Compare(n.serializedNode.Metadata.LastUpdateVersionedTransition, exclusiveMinVT) > 0 { + encodedPath, err := n.getEncodedPath() + if !softassert.That(n.logger, err == nil, "chasm path encoding should always succeed on clean tree") { + panic(fmt.Sprintf("failed to encode chasm path on clean tree: %v", err)) + } + nodes[encodedPath] = n.serializedNode + } + + for _, childNode := range n.children { + childNode.snapshotInternal( + exclusiveMinVT, + nodes, + ) + } +} + +// ApplySystemMutation should only used by internal persistence layer logic to force apply +// cluster specific chasm tree changes. +// DO NOT USE if you don't know why this method is introduced. +func (n *Node) ApplySystemMutation( + mutation NodesMutation, +) error { + if err := n.applyDeletions(mutation.DeletedNodes, true); err != nil { + return err + } + + return n.applyUpdates(mutation.UpdatedNodes, true) +} + +// ApplyMutation is used by replication stack to apply node +// mutations from the source cluster. +// +// NOTE: It will be an error if UpdatedNodes and DeletedNodes have overlapping keys, +// as the CHASM tree does not have enough information to tell if the deletion happens +// before or after the update. +func (n *Node) ApplyMutation( + mutation NodesMutation, +) error { + if err := n.applyDeletions(mutation.DeletedNodes, false); err != nil { + return err + } + + if err := n.applyUpdates(mutation.UpdatedNodes, false); err != nil { + return err + } + + // For replication case, we only update the search attributes and memo + // but not force updating the visibility component itself to generate a task. + // + // This is because the visibility component is already force updated in the active + // cluster and that forced update will be replicated as well. Standby cluster + // only needs to track the current SA and memo to prevent generating an unnecessary + // visibility component update & task if there is a failover. + // + // TODO: combine this with the logic in CloseTransactionForceUpdateVisibility + // right that force update logic only applies to the active cluster. + immutableContext := NewContext(context.TODO(), n) + rootComponent, err := n.root().Component(immutableContext, ComponentRef{}) + if err != nil { + return err + } + saProvider, ok := rootComponent.(VisibilitySearchAttributesProvider) + if ok { + saSlice := saProvider.SearchAttributes(immutableContext) + n.currentSA = searchAttributeKeyValuesToMap(saSlice) + } + memoProvider, ok := rootComponent.(VisibilityMemoProvider) + if ok { + n.currentMemo = proto.Clone(memoProvider.Memo(immutableContext)) + } + + return nil +} + +// ApplySnapshot is used by replication stack to apply node +// snapshot from the source cluster. +// +// If we simply substituting the entire CHASM tree, we will be +// forced to close the transaction as snapshot and potentially +// write extra data to persistence. +// This method will instead figure out the mutations needed to +// bring the current tree to the be the same as the snapshot, +// thus allowing us to close the transaction as mutation. +func (n *Node) ApplySnapshot( + incomingSnapshot NodesSnapshot, +) error { + currentSnapshot := n.Snapshot(nil) + + mutation := NodesMutation{ + UpdatedNodes: make(map[string]*persistencespb.ChasmNode), + DeletedNodes: make(map[string]struct{}), + } + + for encodedPath := range currentSnapshot.Nodes { + if _, ok := incomingSnapshot.Nodes[encodedPath]; !ok { + mutation.DeletedNodes[encodedPath] = struct{}{} + } + } + + for encodedPath, incomingNode := range incomingSnapshot.Nodes { + currentNode, ok := currentSnapshot.Nodes[encodedPath] + if !ok { + mutation.UpdatedNodes[encodedPath] = incomingNode + continue + } + + if transitionhistory.Compare( + currentNode.Metadata.LastUpdateVersionedTransition, + incomingNode.Metadata.LastUpdateVersionedTransition, + ) != 0 { + mutation.UpdatedNodes[encodedPath] = incomingNode + } + } + + return n.ApplyMutation(mutation) +} + +func (n *Node) applyDeletions( + deletedNodes map[string]struct{}, + isSystemUpdates bool, +) error { + for encodedPath := range deletedNodes { + path, err := n.pathEncoder.Decode(encodedPath) + if err != nil { + return err + } + + node, ok := n.findNode(path) + if !ok { + // Already deleted. + // This could happen when: + // - If the mutations passed in include changes + // older than the current state of the tree. + // - We are already applied the deletion on a parent node. + continue + } + + if node == n.root() { + // Root node can never be deleted + // This can happen when: + // 1. CHASM framework is disabled in source cluster and sends an + // empty snapshot to the standby cluster. If the standby cluster + // has a non-empty chasm tree, the root node will be marked for + // deletion and we will lose archetype information for the execution, + // and hit other undefined issues when root is deleted. + // Disabled CHASM framework itself is already an undefined situation + // for non-workflow chasm executions, and we are ok with not deleting + // the root node. + // + // 2. CHASM is enabled but the execution is a workflow which doesn't + // have any chasm nodes. In this case, again an empty snapshot will be sent to + // standby cluster. + // In this case, we can actually choose to delete the root itself because empty + // chasm tree is assume to be a Workflow. However, given chasm workflow component's + // state is an empty proto, skipping deletion is fine as well. All other child nodes + // will still be deleted. + continue + } + + if err := node.delete(isSystemUpdates); err != nil { + return err + } + } + + return nil +} + +func (n *Node) applyUpdates( + updatedNodes map[string]*persistencespb.ChasmNode, + isSystemUpdates bool, +) error { + for encodedPath, updatedNode := range updatedNodes { + path, err := n.pathEncoder.Decode(encodedPath) + if err != nil { + return err + } + + node, ok := n.findNode(path) + if !ok { + // Node doesn't exist, we need to create it. + newNode := n.setSerializedNode(path, encodedPath, updatedNode) + newNode.resetTaskStatus() + if isSystemUpdates { + n.systemMutation.UpdatedNodes[encodedPath] = newNode.serializedNode + delete(n.systemMutation.DeletedNodes, encodedPath) + } else { + n.mutation.UpdatedNodes[encodedPath] = newNode.serializedNode + delete(n.mutation.DeletedNodes, encodedPath) + } + continue + } + + // An empty node may be created when child update is applied before the parent, + // in which case node.serializedNode will be nil. + if node.serializedNode == nil || transitionhistory.Compare( + node.serializedNode.Metadata.LastUpdateVersionedTransition, + updatedNode.Metadata.LastUpdateVersionedTransition, + ) != 0 { + localComponentAttr := node.serializedNode.GetMetadata().GetComponentAttributes() + updatedComponentAttr := updatedNode.GetMetadata().GetComponentAttributes() + if localComponentAttr != nil && updatedComponentAttr != nil { + n.carryOverTaskStatus( + localComponentAttr.SideEffectTasks, + updatedComponentAttr.SideEffectTasks, + compareSideEffectTasks, + ) + n.carryOverTaskStatus( + localComponentAttr.PureTasks, + updatedComponentAttr.PureTasks, + comparePureTasks, + ) + } + + if isSystemUpdates { + n.systemMutation.UpdatedNodes[encodedPath] = updatedNode + delete(n.systemMutation.DeletedNodes, encodedPath) + } else { + n.mutation.UpdatedNodes[encodedPath] = updatedNode + delete(n.mutation.DeletedNodes, encodedPath) + } + node.setValue(nil) + node.setValueState(valueStateNeedDeserialize) + node.serializedNode = updatedNode + + // Clearing decoded value for ancestor nodes is not necessary because the value field is not referenced directly. + // Parent node is pointing to the Node struct. + } + } + + return nil +} + +func (n *Node) RefreshTasks() error { + for _, node := range n.andAllChildren() { + // Only reset task status here, the actual task generation will be done when + // CloseTransaction() is called to persist the changes. + if reset := node.resetTaskStatus(); !reset { + continue + } + + encodedPath, err := node.getEncodedPath() + if err != nil { + return err + } + + // Task status is a cluster local field and changes to it doesn't need to be replicated. + // Recording changes in systemMutation so that: + // 1. it can be persisted. + // 2. n.IsStateDirty() can still return false so that mutable state's transition history + // won't be updated. + n.systemMutation.UpdatedNodes[encodedPath] = node.serializedNode + } + + return nil +} + +func (n *Node) resetTaskStatus() bool { + if n.serializedNode == nil || n.serializedNode.GetMetadata() == nil { + return false + } + + componentAttr := n.serializedNode.GetMetadata().GetComponentAttributes() + if componentAttr == nil { + return false + } + + reset := false + for _, componentTasks := range [][]*persistencespb.ChasmComponentAttributes_Task{ + componentAttr.PureTasks, + componentAttr.SideEffectTasks, + } { + for _, t := range componentTasks { + if !reset && t.PhysicalTaskStatus == physicalTaskStatusCreated { + reset = true + } + t.PhysicalTaskStatus = physicalTaskStatusNone + } + } + + return reset +} + +func (n *Node) getEncodedPath() (string, error) { + if n.encodedPath != nil { + return *n.encodedPath, nil + } + encodePath, err := n.pathEncoder.Encode(n, n.path()) + if err == nil { + n.encodedPath = &encodePath + } + return encodePath, err +} + +func (n *Node) path() []string { + if n.parent == nil { + return []string{} + } + + return append(n.parent.path(), n.nodeName) +} + +func (n *Node) findNode( + path []string, +) (*Node, bool) { + if len(path) == 0 { + return n, true + } + + childName := path[0] + childNode, ok := n.children[childName] + if !ok { + return nil, false + } + return childNode.findNode(path[1:]) +} + +// isAncestorOf returns true if n is a proper ancestor of descendant. +// It walks from descendant up through parent links to check if n is encountered. +func (n *Node) isAncestorOf(descendant *Node) bool { + current := descendant.parent + for current != nil { + if current == n { + return true + } + current = current.parent + } + return false +} + +func (n *Node) delete(isSystemDelete bool) error { + for _, childNode := range n.children { + if err := childNode.delete(isSystemDelete); err != nil { + return err + } + } + + // If a parent is about to be removed, it must not have any children. + softassert.That(n.logger, len(n.children) == 0, "children must be empty when node is removed") + + if n.parent != nil { + delete(n.parent.children, n.nodeName) + } + + // Set value to nil which also deletes the value from valueToNode map. + n.setValue(nil) + + encodedPath, err := n.getEncodedPath() + if err != nil { + return err + } + + // TODO: consider remove entries from UpdatedNodes map as well + // if the same node is updated and then deleted in the same transaction. + // + // That's not a problem today though and DeletedNodes entries are always added + // before UpdatedNodes entires. + // - For active logic, DeletedNodes are added upon syncSubComponents(), + // and UpdatedNodes are added when closing transaction and serializing nodes. + // - For standby replication logic, mutable state calls ApplyMutation() twice, + // first with a deletion only mutation for tombstone nodes, and then an + // update only mutation. + if isSystemDelete { + n.systemMutation.DeletedNodes[encodedPath] = struct{}{} + } else { + n.mutation.DeletedNodes[encodedPath] = struct{}{} + } + + n.cleanupCachedTasks() + + return nil +} + +func (n *Node) cleanupCachedTasks() { + if !n.isComponent() { + return + } + + componentAttr := n.serializedNode.GetMetadata().GetComponentAttributes() + for _, task := range componentAttr.GetPureTasks() { + delete(n.taskValueCache, task.Data) + } + for _, task := range componentAttr.GetSideEffectTasks() { + delete(n.taskValueCache, task.Data) + } +} + +// IsDirty returns true if any node in the tree has been modified, +// and need to be persisted in DB. +// The result will be reset to false after a call to CloseTransaction(). +func (n *Node) IsDirty() bool { + if n.IsStateDirty() { + return true + } + + return len(n.systemMutation.UpdatedNodes) > 0 || len(n.systemMutation.DeletedNodes) > 0 +} + +// IsStateDirty returns true if any node in the tree has USER DATA modified, +// which need to be persisted to DB AND replicated to other clusters. +// The result will be reset to false after a call to CloseTransaction(). +func (n *Node) IsStateDirty() bool { + return n.isActiveStateDirty || len(n.mutation.UpdatedNodes) > 0 || len(n.mutation.DeletedNodes) > 0 +} + +func (n *Node) IsStale( + ref ComponentRef, +) error { + // The point of this method to access the private executionLastUpdateVT field in componentRef, + // and avoid exposing it in the public CHASM interface. + if ref.executionLastUpdateVT == nil { + return nil + } + + return transitionhistory.StalenessCheck( + n.backend.GetExecutionInfo().TransitionHistory, + ref.executionLastUpdateVT, + ) +} + +func (n *Node) Terminate( + request TerminateComponentRequest, +) error { + if n.parent != nil { + return softassert.UnexpectedInternalErr( + n.logger, + "Terminate should only be called on the root node", + fmt.Errorf("node path: %v", n.path()), + ) + } + + mutableContext := NewMutableContext(context.TODO(), n.root()) + component, err := n.Component(mutableContext, ComponentRef{}) + if err != nil { + return err + } + rootComponent, ok := component.(RootComponent) + if !ok { + return softassert.UnexpectedInternalErr( + n.logger, + "root node must implement RootComponent interface", + fmt.Errorf("component type: %T", component), + ) + } + + _, err = rootComponent.Terminate(mutableContext, request) + if err != nil { + return err + } + + n.terminated = true + return nil +} + +// SetDeleteAfterClose suppresses the close visibility task when an execution is being +// terminated as part of a delete operation. Must be called before a [Terminate] call, like in DeleteExecution. +func (n *Node) SetDeleteAfterClose(deleteAfterClose bool) { + n.deleteAfterClose = deleteAfterClose +} + +// ArchetypeID returns the framework's internal ID for the root component's fully qualified name. +func (n *Node) ArchetypeID() ArchetypeID { + // Root must be a component. + return n.root().serializedNode.Metadata.GetComponentAttributes().GetTypeId() +} + +// Archetype returns the root component's fully qualified name. +// Deprecated: use ArchetypeID() instead, this method will be removed. +func (n *Node) Archetype() (Archetype, error) { + archetypeID := n.ArchetypeID() + + fqn, ok := n.registry.ComponentFqnByID(archetypeID) + if !ok { + return "", softassert.UnexpectedInternalErr( + n.logger, + "unknown archetype id", + fmt.Errorf("%d", archetypeID)) + } + + return Archetype(fqn), nil +} + +func (n *Node) root() *Node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// isComponentTaskExpired returns true when the task's scheduled time is equal +// or before the reference time. The caller should also make sure to account +// for skew between the physical task queue and the database by adjusting +// referenceTime in advance. +func isComponentTaskExpired( + referenceTime time.Time, + task *persistencespb.ChasmComponentAttributes_Task, +) bool { + if task.ScheduledTime == nil { + return false + } + + scheduledTime := task.ScheduledTime.AsTime().Truncate(common.ScheduledTaskMinPrecision) + referenceTime = referenceTime.Truncate(common.ScheduledTaskMinPrecision) + + return !scheduledTime.After(referenceTime) +} + +// EachPureTask runs the callback for all expired/runnable pure tasks within the +// CHASM tree (including invalid tasks). The CHASM tree is left untouched, even +// if invalid tasks are detected (these are cleaned up as part of transaction +// close). +func (n *Node) EachPureTask( + referenceTime time.Time, + callback func(handler NodePureTask, taskAttributes TaskAttributes, taskInstance any) (bool, error), +) error { + chasmContext := NewContext(context.Background(), n) + + // Because tree structure may change during the processing, + // we first gather all nodes that have pure tasks that are ready for execution. + var componentToProcess []any + for _, node := range n.andAllChildren() { + // Skip nodes that aren't serialized yet. + if node.serializedNode == nil || node.serializedNode.Metadata == nil { + continue + } + + componentAttr := node.serializedNode.Metadata.GetComponentAttributes() + // Skip nodes that aren't components. + if componentAttr == nil { + continue + } + + if len(componentAttr.PureTasks) == 0 { + continue + } + + if !isComponentTaskExpired(referenceTime, componentAttr.PureTasks[0]) { + continue + } + + // This component node as a pure task that's ready to execute + err := node.prepareComponentValue(chasmContext) + if err != nil { + return err + } + + componentToProcess = append(componentToProcess, node.value) + } + + for _, component := range componentToProcess { + + // Node get deleted when previous pure tasks of other components are executed. + node, ok := n.valueToNode[component] + if !ok { + continue + } + + componentAttr := node.serializedNode.Metadata.GetComponentAttributes() + + for _, task := range componentAttr.GetPureTasks() { + if !isComponentTaskExpired(referenceTime, task) { + break + } + + // Node get deleted when previous pure tasks of the same component are executed. + // e.g. via a (parent) pointer. + _, ok := n.valueToNode[component] + if !ok { + break + } + + taskInstance, err := node.deserializeComponentTask(task) + if err != nil { + return err + } + + taskAttributes := TaskAttributes{ + ScheduledTime: task.ScheduledTime.AsTime(), + Destination: task.Destination, + } + + executed, err := callback(node, taskAttributes, taskInstance) + if err != nil { + return err + } + + if executed { + if err := n.syncSubComponents(); err != nil { + return err + } + } + + // Processed task should become invalid and will be removed upon CloseTransaction(). + + // TODO: Add a validation for that and return an internal error if tasks is still valid after processing. + // Alternatively, remove task from PureTasks slice after processing, but that requires persisting the + // task changes as well even if the component itself is not changed. + } + } + + return nil +} + +func newNode( + base *nodeBase, + parent *Node, + nodeName string, +) *Node { + return &Node{ + nodeBase: base, + parent: parent, + children: make(map[string]*Node), + nodeName: nodeName, + } +} + +func compareSideEffectTasks(a, b *persistencespb.ChasmComponentAttributes_Task) int { + if cmpResult := transitionhistory.Compare(a.VersionedTransition, b.VersionedTransition); cmpResult != 0 { + return cmpResult + } + return cmp.Compare(a.VersionedTransitionOffset, b.VersionedTransitionOffset) +} + +func comparePureTasks(a, b *persistencespb.ChasmComponentAttributes_Task) int { + if cmpResult := a.ScheduledTime.AsTime().Compare(b.ScheduledTime.AsTime()); cmpResult != 0 { + return cmpResult + } + + return compareSideEffectTasks(a, b) +} + +func (n *Node) carryOverTaskStatus( + sourceTasks, targetTasks []*persistencespb.ChasmComponentAttributes_Task, + compareFn func(a, b *persistencespb.ChasmComponentAttributes_Task) int, +) { + sourceIdx, targetIdx := 0, 0 + for sourceIdx < len(sourceTasks) && targetIdx < len(targetTasks) { + sourceTask := sourceTasks[sourceIdx] + targetTask := targetTasks[targetIdx] + + switch compareFn(sourceTask, targetTask) { + case 0: + // Task match, carry over status. + targetTask.PhysicalTaskStatus = sourceTask.PhysicalTaskStatus + // Use existing task data to avoid taskValueCache miss, since the cache uses + // *DataBlob as the key. + // Otherwise we have to clear cache for all tasks in the node, and re-deserialize + // tasks later. + targetTask.Data = sourceTask.Data + sourceIdx++ + targetIdx++ + case -1: + // Source task has a smaller key, meaning the task has been deleted. + // Move on to the next source task. + sourceIdx++ + delete(n.taskValueCache, sourceTask.Data) + case 1: + // Source task has a larger key, meaning there's a new task inserted. + // Sanitize incoming task status. + targetTask.PhysicalTaskStatus = physicalTaskStatusNone + targetIdx++ + } + } + + // Sanitize incoming task status for remaining tasks. + for ; targetIdx < len(targetTasks); targetIdx++ { + targetTasks[targetIdx].PhysicalTaskStatus = physicalTaskStatusNone + } + for ; sourceIdx < len(sourceTasks); sourceIdx++ { + delete(n.taskValueCache, sourceTasks[sourceIdx].Data) + } +} + +func taskCategory( + task *persistencespb.ChasmComponentAttributes_Task, +) tasks.Category { + if task.TypeId == visibilityTaskTypeID { + return tasks.CategoryVisibility + } + + if task.Destination != "" { + return tasks.CategoryOutbound + } + + if task.ScheduledTime == nil || + task.ScheduledTime.AsTime().Equal(TaskScheduledTimeImmediate) { + return tasks.CategoryTransfer + } + return tasks.CategoryTimer +} + +func (n *Node) deserializeTaskWithCache( + registrableTask *RegistrableTask, + taskBlob *commonpb.DataBlob, +) (taskValue reflect.Value, retErr error) { + if cachedValue, ok := n.taskValueCache[taskBlob]; ok { + return cachedValue, nil + } + + taskValue, err := deserializeTask(registrableTask, taskBlob) + if err != nil { + return reflect.Value{}, err + } + + n.taskValueCache[taskBlob] = taskValue + return taskValue, nil +} + +func (n *Node) serializeTaskWithCache( + registrableTask *RegistrableTask, + taskValue reflect.Value, +) (*commonpb.DataBlob, error) { + taskBlob, err := serializeTask(registrableTask, taskValue) + if err != nil { + return nil, err + } + + n.taskValueCache[taskBlob] = taskValue + return taskBlob, nil +} + +func deserializeTask( + registrableTask *RegistrableTask, + taskBlob *commonpb.DataBlob, +) (taskValue reflect.Value, retErr error) { + if registrableTask.goType.AssignableTo(protoMessageT) { + taskValue, err := unmarshalProto(taskBlob, registrableTask.goType) + if err != nil { + return reflect.Value{}, err + } + return taskValue, nil + } + + taskGoType := registrableTask.goType + if taskGoType.Kind() == reflect.Ptr { + taskGoType = taskGoType.Elem() + } + taskValue = reflect.New(taskGoType) + + // At this point taskGoType is guaranteed to be a struct and + // taskValue is a pointer to struct. + + defer func() { + if retErr == nil && registrableTask.goType.Kind() == reflect.Struct { + taskValue = taskValue.Elem() + } + }() + + if taskGoType.NumField() == 0 { + return taskValue, nil + } + + // TODO: consider pre-calculating the proto field num when registring the task type. + + protoMessageFound := false + for i := 0; i < taskGoType.NumField(); i++ { + fieldV := taskValue.Elem().Field(i) + fieldT := taskGoType.Field(i).Type + if !fieldT.AssignableTo(protoMessageT) { + continue + } + + if protoMessageFound { + return reflect.Value{}, serviceerror.NewInternal("only one proto field allowed in task struct") + } + protoMessageFound = true + + value, err := unmarshalProto(taskBlob, fieldT) + if err != nil { + return reflect.Value{}, err + } + + fieldV.Set(value) + } + + return taskValue, nil +} + +func serializeTask( + registrableTask *RegistrableTask, + taskValue reflect.Value, +) (*commonpb.DataBlob, error) { + protoValue, ok := taskValue.Interface().(proto.Message) + if ok { + return serialization.ProtoEncode(protoValue) + } + + taskGoType := registrableTask.goType + + // Handle pointer to struct. + if taskGoType.Kind() == reflect.Ptr { + taskGoType = taskGoType.Elem() + taskValue = taskValue.Elem() + } + + // Handle empty task struct. + if taskGoType.NumField() == 0 { + return &commonpb.DataBlob{ + Data: nil, + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + }, nil + } + + // TODO: consider pre-calculating the proto field num when registring the task type. + + var blob *commonpb.DataBlob + protoMessageFound := false + for i := 0; i < taskGoType.NumField(); i++ { + fieldV := taskValue.Field(i) + if !fieldV.Type().AssignableTo(protoMessageT) { + continue + } + + if protoMessageFound { + return nil, serviceerror.NewInternalf("only one proto field allowed in task struct of type: %v", taskGoType.String()) + } + protoMessageFound = true + + var err error + blob, err = serialization.ProtoEncode(fieldV.Interface().(proto.Message)) + if err != nil { + return nil, err + } + } + + if !protoMessageFound { + return nil, serviceerror.NewInternal("no proto field found in task struct") + } + + return blob, nil +} + +// ExecutePureTask validates and then executes the given taskInstance against the +// node's component. Executing an invalid task is a no-op (no error returned). +func (n *Node) ExecutePureTask( + baseCtx context.Context, + taskAttributes TaskAttributes, + taskInstance any, +) (_ bool, retErr error) { + registrableTask, ok := n.registry.taskFor(taskInstance) + if !ok { + return false, fmt.Errorf("unknown task type for task instance goType '%s'", reflect.TypeOf(taskInstance).Name()) + } + + if !registrableTask.isPureTask { + return false, fmt.Errorf("ExecutePureTask called on a SideEffect task '%s'", registrableTask.fqType()) + } + + progressIntentCtx := newContextWithOperationIntent(baseCtx, OperationIntentProgress) + validationContext := NewContext(progressIntentCtx, n) + + // Ensure this node's component value is hydrated before execution. + if err := n.prepareComponentValue(validationContext); err != nil { + return false, err + } + + // Run the task's registered value before execution. + valid, err := n.validateTask(validationContext, taskAttributes, taskInstance) + if err != nil { + return false, err + } + if !valid { + return false, nil + } + + executionContext := NewMutableContext(progressIntentCtx, n) + component, err := n.Component(executionContext, ComponentRef{}) + if err != nil { + return false, err + } + + defer log.CapturePanic(n.logger, &retErr) + + archetypeTag := metrics.ArchetypeTag("") + if name, ok := n.registry.ArchetypeDisplayName(n.ArchetypeID()); ok { + archetypeTag = metrics.ArchetypeTag(name) + } + chasmTaskTypeTag := metrics.ChasmTaskTypeTag(registrableTask.fqType()) + metricsHandler := n.metricsHandler.WithTags(archetypeTag) + + execErr := registrableTask.pureTaskExecuteFn( + executionContext, + component, + taskAttributes, + taskInstance, + n.registry, + ) + + metrics.ChasmPureTaskRequests.With(metricsHandler).Record(1, chasmTaskTypeTag) + + if execErr != nil { + metrics.ChasmPureTaskErrors.With(metricsHandler).Record(1, chasmTaskTypeTag) + return true, execErr + } + + // TODO - a task validator must succeed validation after a task executes + // successfully (without error), otherwise it will generate an infinite loop. + // Check for this case by marking the in-memory task as having executed, which the + // CloseTransaction method will check against. + // + // See: https://github.com/temporalio/temporal/pull/7701#discussion_r2072026993 + + return true, nil +} + +// ValidatePureTask runs a pure task's associated validator, returning true +// if the task is valid. Intended for use by standby handlers as part of +// EachPureTask's callback. +// This method assumes the node's value has already been prepared (hydrated). +func (n *Node) ValidatePureTask( + ctx context.Context, + taskAttributes TaskAttributes, + taskInstance any, +) (bool, error) { + return n.validateTask( + NewContext(newContextWithOperationIntent(ctx, OperationIntentProgress), n), + taskAttributes, + taskInstance, + ) +} + +// ValidateSideEffectTask runs a side effect task's associated validator, +// returning the deserialized task instance if the task is valid. Intended for +// use by standby handlers. +// +// If validation succeeds but the task is invalid, nil is returned to signify the +// task can be skipped/deleted. +// +// If validation fails, that error is returned. +func (n *Node) ValidateSideEffectTask( + ctx context.Context, + chasmTask *tasks.ChasmTask, +) (isValid bool, retErr error) { + + taskInfo := chasmTask.Info + taskTypeID := taskInfo.TypeId + registrableTask, ok := n.registry.TaskByID(taskTypeID) + if !ok { + return false, softassert.UnexpectedInternalErr( + n.logger, + "unknown task type id", + fmt.Errorf("%d", taskTypeID)) + } + + if registrableTask.isPureTask { + return false, softassert.UnexpectedInternalErr( + n.logger, + "ValidateSideEffectTask called on a Pure task, task type: ", + fmt.Errorf("%s", registrableTask.fqType())) + } + + node, ok := n.findNode(taskInfo.Path) + if !ok { + return false, nil + } + + // node.serializedNode should always be available when running a side effect task. + if transitionhistory.Compare( + taskInfo.ComponentInitialVersionedTransition, + node.serializedNode.Metadata.InitialVersionedTransition, + ) != 0 { + return false, nil + } + + // Verify the logical task this physical task was generated from still exists, + // and capture it so we can use its Data pointer for the deserialization cache. + // + // A logical task can be dropped mid-flight (e.g. component paused then unpaused) + // without the physical task being cancelled. Checking existence here prevents + // stale physical tasks from executing after their logical counterpart is gone. + // + // TaskVersionedTransition is unset on physical tasks created before this field + // was added; skip the check in that case to preserve backward compatibility. + var logicalTask *persistencespb.ChasmComponentAttributes_Task + if taskInfo.TaskVersionedTransition != nil { + componentAttr := node.serializedNode.Metadata.GetComponentAttributes() + for _, t := range componentAttr.GetSideEffectTasks() { + if transitionhistory.Compare(t.VersionedTransition, taskInfo.TaskVersionedTransition) == 0 && + t.VersionedTransitionOffset == taskInfo.TaskVersionedTransitionOffset { + logicalTask = t + break + } + } + if logicalTask == nil { + return false, nil + } + } + + // Component must be hydrated before the task's validator is called. + validateCtx := NewContext(newContextWithOperationIntent(ctx, OperationIntentProgress), n) + if err := node.prepareComponentValue(validateCtx); err != nil { + return false, err + } + + defer func() { + if rec := recover(); rec != nil { + chasmTask.DeserializedTask = reflect.Value{} + panic(rec) //nolint:forbidigo + } + if retErr != nil { + chasmTask.DeserializedTask = reflect.Value{} + } + }() + + if !chasmTask.DeserializedTask.IsValid() { + var err error + if logicalTask != nil { + // Use the logical task's Data pointer so deserialization shares the + // node's taskValueCache with closeTransactionCleanupInvalidTasks. + // The physical task's taskInfo.Data is a different pointer (freshly + // allocated from the physical task row) and would always miss the cache. + chasmTask.DeserializedTask, err = node.deserializeTaskWithCache(registrableTask, logicalTask.Data) + } else { + // Backward compatibility: physical task predates TaskVersionedTransition. + chasmTask.DeserializedTask, err = deserializeTask(registrableTask, taskInfo.Data) + } + if err != nil { + return false, err + } + } + + return node.validateTask( + validateCtx, + TaskAttributes{ + ScheduledTime: chasmTask.GetVisibilityTime(), + Destination: chasmTask.Destination, + }, + chasmTask.DeserializedTask.Interface(), + ) +} + +// ExecuteSideEffectTask executes the given ChasmTask on its associated node +// without holding the execution lock. +// +// WARNING: This method *must not* access the node's properties without first +// locking the execution. +// +// ctx should have a CHASM engine already set. +func (n *Node) ExecuteSideEffectTask( + ctx context.Context, + executionKey ExecutionKey, + chasmTask *tasks.ChasmTask, + validate func(NodeBackend, Context, Component) error, +) error { + rt, err := n.lookupSideEffectTask(ctx, "ExecuteSideEffectTask", chasmTask) + if err != nil { + return err + } + return n.invokeSideEffectTaskFn(ctx, rt, executionKey, chasmTask, validate, rt.sideEffectTaskExecuteFn) +} + +// ExecuteSideEffectDiscardTask executes the discard handler for the given ChasmTask. This is called on standby +// clusters when a side effect task has been pending past the discard delay, allowing custom discard behavior +// (e.g., spilling activity tasks to matching). +func (n *Node) ExecuteSideEffectDiscardTask( + ctx context.Context, + executionKey ExecutionKey, + chasmTask *tasks.ChasmTask, + validate func(NodeBackend, Context, Component) error, +) error { + rt, err := n.lookupSideEffectTask(ctx, "ExecuteSideEffectDiscardTask", chasmTask) + if err != nil { + return err + } + return n.invokeSideEffectTaskFn(ctx, rt, executionKey, chasmTask, validate, rt.sideEffectTaskDiscardFn) +} + +func (n *Node) lookupSideEffectTask( + ctx context.Context, + callerName string, + chasmTask *tasks.ChasmTask, +) (*RegistrableTask, error) { + if engineFromContext(ctx) == nil { + return nil, serviceerror.NewInternal("no CHASM engine set on context") + } + + taskTypeID := chasmTask.Info.TypeId + registrableTask, ok := n.registry.TaskByID(taskTypeID) + if !ok { + return nil, softassert.UnexpectedInternalErr( + n.logger, + "unknown task type id", + fmt.Errorf("%d", taskTypeID)) + } + if registrableTask.isPureTask { + return nil, softassert.UnexpectedInternalErr( + n.logger, + callerName+" called on a Pure task", + fmt.Errorf("%s", registrableTask.fqType())) + } + return registrableTask, nil +} + +func (n *Node) invokeSideEffectTaskFn( + ctx context.Context, + registrableTask *RegistrableTask, + executionKey ExecutionKey, + chasmTask *tasks.ChasmTask, + validate func(NodeBackend, Context, Component) error, + taskFn func(context.Context, ComponentRef, TaskAttributes, any) error, +) (retErr error) { + taskInfo := chasmTask.Info + + defer func() { + if rec := recover(); rec != nil { + chasmTask.DeserializedTask = reflect.Value{} + panic(rec) //nolint:forbidigo + } + if retErr != nil && !errors.As(retErr, new(*serviceerror.NotFound)) { + chasmTask.DeserializedTask = reflect.Value{} + } + }() + + if !chasmTask.DeserializedTask.IsValid() { + var err error + // TODO: Change physical side effect task to reference logical task and + // then use deserializeTaskWithCache as well. + chasmTask.DeserializedTask, err = deserializeTask(registrableTask, taskInfo.Data) + if err != nil { + return err + } + } + taskValue := chasmTask.DeserializedTask + + taskAttributes := TaskAttributes{ + ScheduledTime: chasmTask.GetVisibilityTime(), + Destination: chasmTask.Destination, + } + + ref := ComponentRef{ + ExecutionKey: executionKey, + archetypeID: ArchetypeID(taskInfo.GetArchetypeId()), + executionLastUpdateVT: taskInfo.ComponentLastUpdateVersionedTransition, + componentPath: taskInfo.Path, + componentInitialVT: taskInfo.ComponentInitialVersionedTransition, + + // Validate the Ref only once it is accessed by the task's handler. + validationFn: makeValidationFn(registrableTask, validate, taskAttributes, taskValue), + } + + ctx = newContextWithOperationIntent(ctx, OperationIntentProgress) + + defer log.CapturePanic(n.logger, &retErr) + + return taskFn(ctx, ref, taskAttributes, taskValue.Interface()) +} + +func (n *Node) ComponentByPath( + chasmContext Context, + path []string, +) (Component, error) { + node, ok := n.findNode(path) + if !ok { + return nil, errComponentNotFound + } + + if err := node.prepareComponentValue(chasmContext); err != nil { + return nil, err + } + + componentValue, ok := node.value.(Component) + if !ok { + return nil, softassert.UnexpectedInternalErr( + n.logger, + "component value is not of type Component", + fmt.Errorf("%s", reflect.TypeOf(node.value).String())) + } + + return componentValue, nil +} + +// makeValidationFn adapts the TaskValidator interface to the ComponentRef's +// validation callback format. Returns a validation function that wraps the +// given validation callback to be called before the RegistrableTask's registered +// validator callback. Intended for use to validate mutable state at access time. +func makeValidationFn( + registrableTask *RegistrableTask, + validate func(NodeBackend, Context, Component) error, + taskAttributes TaskAttributes, + taskValue reflect.Value, +) func(NodeBackend, Context, Component, *Registry) error { + return func(backend NodeBackend, ctx Context, component Component, registry *Registry) error { + // Call the provided validation callback. + err := validate(backend, ctx, component) + if err != nil { + return err + } + + // Side effect's task validator is invoked inside the task handler, + // so the panic wrapper ExecuteSideEffectTask() will cover this case. + + // Call the TaskValidator. + valid, err := registrableTask.validateFn( + ctx, + component, + taskAttributes, + taskValue.Interface(), + registry, + ) + if err != nil { + return err + } + if !valid { + return errTaskNotValid + } + return nil + } +} diff --git a/chasm/tree_test.go b/chasm/tree_test.go new file mode 100644 index 00000000000..bce043594a3 --- /dev/null +++ b/chasm/tree_test.go @@ -0,0 +1,3890 @@ +package chasm + +import ( + "context" + "errors" + "fmt" + "reflect" + "slices" + "sort" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + enumsspb "go.temporal.io/server/api/enums/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/definition" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence/serialization" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/testing/protoassert" + "go.temporal.io/server/common/testing/protorequire" + "go.temporal.io/server/common/testing/testlogger" + "go.temporal.io/server/service/history/tasks" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/timestamppb" +) + +type ( + nodeSuite struct { + suite.Suite + *require.Assertions + protorequire.ProtoAssertions + + controller *gomock.Controller + nodeBackend *MockNodeBackend + testLibrary *TestLibrary + + registry *Registry + timeSource *clock.EventTimeSource + nodePathEncoder NodePathEncoder + logger log.Logger + metricsHandler metrics.Handler + } +) + +func TestNodeSuite(t *testing.T) { + suite.Run(t, new(nodeSuite)) +} + +func (s *nodeSuite) SetupTest() { + s.initAssertions() + s.controller = gomock.NewController(s.T()) + s.nodeBackend = &MockNodeBackend{} + s.testLibrary = newTestLibrary(s.controller) + + s.logger = testlogger.NewTestLogger(s.T(), testlogger.FailOnAnyUnexpectedError) + s.metricsHandler = metrics.NoopMetricsHandler + s.registry = NewRegistry(s.logger) + err := s.registry.Register(s.testLibrary) + s.NoError(err) + err = s.registry.Register(&CoreLibrary{}) + s.NoError(err) + + s.timeSource = clock.NewEventTimeSource() + s.nodePathEncoder = &testNodePathEncoder{} +} + +func (s *nodeSuite) SetupSubTest() { + s.initAssertions() +} + +func (s *nodeSuite) initAssertions() { + // `s.Assertions` (as well as other test helpers which depends on `s.T()`) must be initialized on + // both test and subtest levels (but not suite level, where `s.T()` is `nil`). + // + // If these helpers are not reinitialized on subtest level, any failed `assert` in + // subtest will fail the entire test (not subtest) immediately without running other subtests. + + s.Assertions = require.New(s.T()) + s.ProtoAssertions = protorequire.New(s.T()) +} + +func (s *nodeSuite) TestNewTree() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "child1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + }, + }, + "child2": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + }, + }, + "child1/grandchild1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 4}, + }, + }, + "child2/grandchild1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 5}, + }, + }, + } + expectedPreorderNodes := []*persistencespb.ChasmNode{ + persistenceNodes[""], + persistenceNodes["child1"], + persistenceNodes["child1/grandchild1"], + persistenceNodes["child2"], + persistenceNodes["child2/grandchild1"], + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + s.NotNil(root) + + preorderNodes := s.preorderAndAssertParent(root, nil) + s.Len(preorderNodes, 5) + s.Equal(expectedPreorderNodes, preorderNodes) +} + +func (s *nodeSuite) TestInitSerializedNode_TypeComponent() { + node := newNode(s.nodeBase(), nil, "") + node.initSerializedNode(fieldTypeComponent) + + s.NotNil(node.serializedNode.GetMetadata().GetComponentAttributes(), "node serializedNode must have attributes created") + s.Nil(node.serializedNode.GetData(), "node serializedNode must not have data before serialize is called") +} + +func (s *nodeSuite) TestSerializeNode_ComponentAttributes() { + node := s.testComponentTree() + + s.Len(node.children, 2) + s.NotNil(node.children["SubComponent1"].value) + s.Len(node.children["SubComponent1"].children, 2) + s.NotNil(node.children["SubComponent1"].children["SubComponent11"].value) + s.Empty(node.children["SubComponent1"].children["SubComponent11"].children) + + // Serialize root component. + s.NotNil(node.serializedNode.GetMetadata().GetComponentAttributes()) + s.Nil(node.serializedNode.GetData()) + err := node.serialize() + s.NoError(err) + s.NotNil(node.serializedNode) + s.NotNil(node.serializedNode.GetData(), "node serialized value must have data after serialize is called") + s.Equal(testComponentTypeID, node.serializedNode.GetMetadata().GetComponentAttributes().GetTypeId(), "node serialized value must have type set") + s.Equal(valueStateSynced, node.valueState) + + // Serialize subcomponents (there are 2 subcomponents). + sc1Node := node.children["SubComponent1"] + s.NotNil(sc1Node.serializedNode.GetMetadata().GetComponentAttributes()) + s.Nil(sc1Node.serializedNode.GetData()) + for _, childNode := range node.children { + err = childNode.serialize() + s.NoError(err) + s.Equal(valueStateSynced, childNode.valueState) + } + s.NotNil(sc1Node.serializedNode.GetData(), "child node serialized value must have data after serialize is called") + s.Equal(testSubComponent1TypeID, sc1Node.serializedNode.GetMetadata().GetComponentAttributes().GetTypeId(), "node serialized value must have type set") + + // Check SubData too. + sd1Node := node.children["SubData1"] + s.NoError(err) + s.NotNil(sd1Node.serializedNode.GetData(), "child node serialized value must have data after serialize is called") +} + +func (s *nodeSuite) TestSerializeNode_ClearComponentData() { + node := s.testComponentTree() + + node.value.(*TestComponent).ComponentData = nil + + err := node.serialize() + s.NoError(err) + s.NotNil(node.serializedNode, "node serialized value must be not nil after serialize is called") + s.NotNil(node.serializedNode.GetMetadata().GetComponentAttributes(), "metadata must have component attributes") + s.Nil(node.serializedNode.GetData(), "data field must cleared to nil") + s.Equal(testComponentTypeID, node.serializedNode.GetMetadata().GetComponentAttributes().GetTypeId(), "type must present") + s.Equal(valueStateSynced, node.valueState) +} + +func (s *nodeSuite) TestSerializeNode_ClearSubDataField() { + node := s.testComponentTree() + + mutableContext := NewMutableContext(context.Background(), node) + component, err := node.Component(mutableContext, ComponentRef{}) + s.NoError(err) + testComponent := component.(*TestComponent) + + testComponent.SubData1 = NewEmptyField[*protoMessageType]() + + sd1Node := node.children["SubData1"] + s.NotNil(sd1Node) + + err = node.syncSubComponents() + s.NoError(err) + s.False(node.needsPointerResolution) + s.Len(node.mutation.DeletedNodes, 1) + + sd1Node = node.children["SubData1"] + s.Nil(sd1Node) +} + +func (s *nodeSuite) TestSetRootComponent_SetsArchetypeID() { + rootNode := NewEmptyTree(s.registry, s.timeSource, s.nodeBackend, s.nodePathEncoder, s.logger, s.metricsHandler) + s.Equal(WorkflowArchetypeID, rootNode.ArchetypeID()) + rootComponent := &TestComponent{ + MSPointer: NewMSPointer(s.nodeBackend), + } + s.NoError(rootNode.SetRootComponent(rootComponent)) + s.Equal(testComponentTypeID, rootNode.ArchetypeID()) + s.NotEqual(WorkflowArchetypeID, rootNode.ArchetypeID()) +} + +func (s *nodeSuite) TestInitSerializedNode_TypeData() { + node := newNode(s.nodeBase(), nil, "") + node.initSerializedNode(fieldTypeData) + s.NotNil(node.serializedNode.GetMetadata().GetDataAttributes(), "node serializedNode must have attributes created") + s.Nil(node.serializedNode.GetData(), "node serializedNode must not have data before serialize is called") +} + +func (s *nodeSuite) TestSerializeNode_DataAttributes() { + component := &protoMessageType{ + CreateRequestId: "22", + } + + node := newNode(s.nodeBase(), nil, "") + node.initSerializedNode(fieldTypeData) + node.value = component + node.valueState = valueStateNeedSerialize + + err := node.serialize() + s.NoError(err) + s.NotNil(node.serializedNode.GetData(), "child node serialized value must have data after serialize is called") + s.Equal([]byte{0xa, 0x2, 0x32, 0x32}, node.serializedNode.GetData().GetData()) + s.Equal(valueStateSynced, node.valueState) +} + +func (s *nodeSuite) TestCollectionAttributes() { + runID1 := fmt.Sprintf("workflow_id_%d", 1) + runID2 := fmt.Sprintf("workflow_id_%d", 2) + sc1 := &TestSubComponent1{ + SubComponent1Data: &protoMessageType{ + RunId: runID1, + }, + } + sc2 := &TestSubComponent1{ + SubComponent1Data: &protoMessageType{ + RunId: runID2, + }, + } + + type testCase struct { + name string + initComponent func() *TestComponent + mapField string + } + cases := []testCase{ + { + name: "of string key", + initComponent: func() *TestComponent { + return &TestComponent{ + SubComponents: Map[string, *TestSubComponent1]{ + "SubComponent1": NewComponentField(nil, sc1), + "SubComponent2": NewComponentField(nil, sc2), + }, + } + }, + mapField: "SubComponents", + }, + { + name: "of int key", + initComponent: func() *TestComponent { + return &TestComponent{ + PendingActivities: Map[int, *TestSubComponent1]{ + 1: NewComponentField(nil, sc1), + 2: NewComponentField(nil, sc2), + }, + } + }, + mapField: "PendingActivities", + }, + } + + for _, tc := range cases { + + var persistedNodes map[string]*persistencespb.ChasmNode + + s.Run("Sync and serialize component with map "+tc.name, func() { + var nilSerializedNodes map[string]*persistencespb.ChasmNode + rootNode, err := s.newTestTree(nilSerializedNodes) + s.NoError(err) + + rootComponent := tc.initComponent() + rootNode.value = rootComponent + rootNode.valueState = valueStateNeedSyncStructure + + mutations, err := rootNode.CloseTransaction() + s.NoError(err) + s.Len(mutations.UpdatedNodes, 4, "root, collection, and 2 collection items must be updated") + s.Empty(mutations.DeletedNodes) + + switch tc.mapField { + case "SubComponents": + s.NotEmpty(rootNode.children[tc.mapField].children["SubComponent1"].serializedNode.GetData().GetData()) + s.NotEmpty(rootNode.children[tc.mapField].children["SubComponent2"].serializedNode.GetData().GetData()) + case "PendingActivities": + s.NotEmpty(rootNode.children[tc.mapField].children["1"].serializedNode.GetData().GetData()) + s.NotEmpty(rootNode.children[tc.mapField].children["2"].serializedNode.GetData().GetData()) + } + + // Save it use in other subtests. + persistedNodes = common.CloneProtoMap(mutations.UpdatedNodes) + }) + + s.NotNil(persistedNodes) + + s.Run("Deserialize component with map "+tc.name, func() { + rootNode, err := s.newTestTree(persistedNodes) + s.NoError(err) + + err = rootNode.deserialize(reflect.TypeFor[*TestComponent]()) + s.NoError(err) + + rootComponent := rootNode.value.(*TestComponent) + + var sc1Field, sc2Field Field[*TestSubComponent1] + switch tc.mapField { + case "SubComponents": + s.NotNil(rootComponent.SubComponents) + s.Len(rootComponent.SubComponents, 2) + sc1Field, sc2Field = rootComponent.SubComponents["SubComponent1"], rootComponent.SubComponents["SubComponent2"] + case "PendingActivities": + s.NotNil(rootComponent.PendingActivities) + s.Len(rootComponent.PendingActivities, 2) + sc1Field, sc2Field = rootComponent.PendingActivities[1], rootComponent.PendingActivities[2] + } + + chasmContext := NewMutableContext(context.Background(), rootNode) + sc1Des := sc1Field.Get(chasmContext) + s.Equal(sc1.SubComponent1Data.GetRunId(), sc1Des.SubComponent1Data.GetRunId()) + + sc2Des := sc2Field.Get(chasmContext) + s.Equal(sc2.SubComponent1Data.GetRunId(), sc2Des.SubComponent1Data.GetRunId()) + }) + + s.Run("Clear map "+tc.name+" by setting it to nil", func() { + rootNode, err := s.newTestTree(persistedNodes) + s.NoError(err) + + err = rootNode.deserialize(reflect.TypeFor[*TestComponent]()) + s.NoError(err) + + rootComponent := rootNode.value.(*TestComponent) + + rootNode.valueState = valueStateNeedSyncStructure + switch tc.mapField { + case "SubComponents": + rootComponent.SubComponents = nil + case "PendingActivities": + rootComponent.PendingActivities = nil + } + + mutation, err := rootNode.CloseTransaction() + s.NoError(err) + s.Len(mutation.UpdatedNodes, 1, "although root component is not updated, collection is tracked as part of component, therefore root must be updated") + s.Len(mutation.DeletedNodes, 3, "collection and 2 collection items must be deleted") + }) + + s.Run("Delete single map "+tc.name+" item", func() { + rootNode, err := s.newTestTree(persistedNodes) + s.NoError(err) + + err = rootNode.deserialize(reflect.TypeFor[*TestComponent]()) + s.NoError(err) + + rootComponent := rootNode.value.(*TestComponent) + + // Delete collection item 1. + rootNode.valueState = valueStateNeedSyncStructure + switch tc.mapField { + case "SubComponents": + delete(rootComponent.SubComponents, "SubComponent1") + case "PendingActivities": + delete(rootComponent.PendingActivities, 1) + } + + mutation, err := rootNode.CloseTransaction() + s.NoError(err) + s.Len(mutation.UpdatedNodes, 1, "although root component is not updated, collection is tracked as part of component, therefore root must be updated") + s.Len(mutation.DeletedNodes, 1, "collection item 1 must be deleted") + }) + + s.Run("Clear map "+tc.name+" by deleting all items", func() { + rootNode, err := s.newTestTree(persistedNodes) + s.NoError(err) + + err = rootNode.deserialize(reflect.TypeFor[*TestComponent]()) + s.NoError(err) + + rootComponent := rootNode.value.(*TestComponent) + + // Delete both collection items. + rootNode.valueState = valueStateNeedSyncStructure + switch tc.mapField { + case "SubComponents": + delete(rootComponent.SubComponents, "SubComponent1") + delete(rootComponent.SubComponents, "SubComponent2") + case "PendingActivities": + delete(rootComponent.PendingActivities, 1) + delete(rootComponent.PendingActivities, 2) + } + + // Now map is empty and must be deleted. + mutation, err := rootNode.CloseTransaction() + s.NoError(err) + s.Len(mutation.UpdatedNodes, 1, "although root component is not updated, collection is tracked as part of component, therefore root must be updated") + s.Len(mutation.DeletedNodes, 3, "collection and 2 items must be deleted") + }) + } +} + +func (s *nodeSuite) TestPointerAttributes() { + var persistedNodes map[string]*persistencespb.ChasmNode + + sc11 := &TestSubComponent11{ + SubComponent11Data: &protoMessageType{ + RunId: fmt.Sprintf("workflow_id_%d", 11), + }, + } + + sc1 := &TestSubComponent1{ + SubComponent1Data: &protoMessageType{ + RunId: fmt.Sprintf("workflow_id_%d", 1), + }, + SubComponent11: NewComponentField(nil, sc11), + } + + s.Run("Sync and serialize component with ancestor pointer", func() { + var nilSerializedNodes map[string]*persistencespb.ChasmNode + rootNode, err := s.newTestTree(nilSerializedNodes) + s.NoError(err) + + ctx := NewMutableContext(context.Background(), rootNode) + + rootComponent := &TestComponent{ + MSPointer: NewMSPointer(s.nodeBackend), + SubComponent1: NewComponentField(nil, sc1), + SubComponentInterfacePointer: NewComponentField[Component](nil, sc1), + } + + // sc11 points to root (grandparent) -- an ancestor pointer. + sc11.GrandparentPointer = ComponentPointerTo(ctx, rootComponent) + + s.NoError(rootNode.SetRootComponent(rootComponent)) + + s.Equal(fieldTypeDeferredPointer, sc11.GrandparentPointer.Internal.ft) + + mutations, err := rootNode.CloseTransaction() + s.NoError(err) + s.Len(mutations.UpdatedNodes, 5, "root, SubComponent1, SubComponent11, GrandparentPointer, and SubComponentInterfacePointer must be updated") + s.Empty(mutations.DeletedNodes) + + sc11Node := rootNode.children["SubComponent1"].children["SubComponent11"] + s.Equal( + []string{}, + sc11Node.children["GrandparentPointer"].serializedNode.GetMetadata().GetPointerAttributes().GetNodePath(), + ) + + // Save for use in other subtests. + persistedNodes = common.CloneProtoMap(mutations.UpdatedNodes) + }) + + s.NotNil(persistedNodes) + + s.Run("Deserialize ancestor pointer component", func() { + rootNode, err := s.newTestTree(persistedNodes) + s.NoError(err) + + mutableContext := NewMutableContext(context.Background(), rootNode) + component, err := rootNode.Component(mutableContext, ComponentRef{}) + s.NoError(err) + testComponent := component.(*TestComponent) + + s.NotNil(testComponent.MSPointer) + + chasmContext := NewMutableContext(context.Background(), rootNode) + sc1Des := testComponent.SubComponent1.Get(chasmContext) + s.NotNil(sc1Des) + sc11Des := sc1Des.SubComponent11.Get(chasmContext) + s.NotNil(sc11Des) + + rootViaPointer := sc11Des.GrandparentPointer.Get(chasmContext) + s.NotNil(rootViaPointer) + s.Equal(testComponent, rootViaPointer) + + ifacePtr := testComponent.SubComponentInterfacePointer.Get(chasmContext) + s.NotNil(ifacePtr) + + sc1ptr, ok := ifacePtr.(*TestSubComponent1) + s.True(ok) + s.ProtoEqual(sc1ptr.SubComponent1Data, sc1.SubComponent1Data) + }) + + s.Run("Clear ancestor pointer by setting it to the empty field", func() { + rootNode, err := s.newTestTree(persistedNodes) + s.NoError(err) + + mutableContext := NewMutableContext(context.Background(), rootNode) + component, err := rootNode.Component(mutableContext, ComponentRef{}) + s.NoError(err) + testComponent := component.(*TestComponent) + sc1Des := testComponent.SubComponent1.Get(mutableContext) + sc11Des := sc1Des.SubComponent11.Get(mutableContext) + + sc11Des.GrandparentPointer = NewEmptyField[*TestComponent]() + + mutation, err := rootNode.CloseTransaction() + s.NoError(err) + s.NotEmpty(mutation.UpdatedNodes) + s.Len(mutation.DeletedNodes, 1, "GrandparentPointer must be deleted") + }) +} + +func (s *nodeSuite) TestParentPointer_InMemory() { + node := s.testComponentTree() + + s.assertParentPointer(node) + + // Additionally also test parentPtr for components inside a map. + + mutableContext := NewMutableContext(context.Background(), node) + component, err := node.Component(mutableContext, ComponentRef{}) + s.NoError(err) + testComponent := component.(*TestComponent) + + mapSubComponent1 := &TestSubComponent1{} + // Try using the testComponent we get from the ParentPtr for the mutation. + testComponent.SubComponents = Map[string, *TestSubComponent1]{ + "mapSubComponent1": NewComponentField(mutableContext, mapSubComponent1), + } + + s.Panics(func() { + _ = mapSubComponent1.ParentPtr.Get(mutableContext) + }) + + // Sync structure initializes the parent pointer + err = node.syncSubComponents() + s.NoError(err) + + testComponentFromPtr := mapSubComponent1.ParentPtr.Get(mutableContext) + // Asserting they actually point to the same testComponent object. + s.Same(testComponent, testComponentFromPtr) +} + +func (s *nodeSuite) TestParentPointer_FromDB() { + serializedNodes := testComponentSerializedNodes() + + node, err := s.newTestTree(serializedNodes) + s.NoError(err) + + s.assertParentPointer(node) +} + +func (s *nodeSuite) assertParentPointer(testComponentNode *Node) { + chasmContext := NewContext(context.Background(), testComponentNode) + component, err := testComponentNode.Component(chasmContext, ComponentRef{}) + s.NoError(err) + testComponent := component.(*TestComponent) + + _, found := testComponent.ParentPtr.TryGet(chasmContext) + s.False(found) + + subComponent1 := testComponent.SubComponent1.Get(chasmContext) + testComponentFromPtr := subComponent1.ParentPtr.Get(chasmContext) + // Asserting they actually point to the same testComponent object. + s.Same(testComponent, testComponentFromPtr) + + subComponent11 := subComponent1.SubComponent11.Get(chasmContext) + testSubComponent1FromPtr := subComponent11.ParentPtr.Get(chasmContext) + // Asserting they actually point to the same testSubComponent1 object. + s.Same(subComponent1, testSubComponent1FromPtr) +} + +func (s *nodeSuite) TestSyncSubComponents_DeleteLeafNode() { + node := s.testComponentTree() + + mutableContext := NewMutableContext(context.Background(), node) + component, err := node.ComponentByPath(mutableContext, []string{"SubComponent1"}) + s.NoError(err) + + sc1 := component.(*TestSubComponent1) + sc1.SubComponent11 = NewEmptyField[*TestSubComponent11]() + s.NotNil(node.children["SubComponent1"].children["SubComponent11"]) + + err = node.syncSubComponents() + s.NoError(err) + s.False(node.needsPointerResolution) + + s.Len(node.mutation.DeletedNodes, 1) + s.NotNil(node.mutation.DeletedNodes["SubComponent1/SubComponent11"]) + s.Nil(node.children["SubComponent1"].children["SubComponent11"]) +} + +func (s *nodeSuite) TestSyncSubComponents_DeleteMiddleNode() { + node := s.testComponentTree() + + mutableContext := NewMutableContext(context.Background(), node) + component, err := node.Component(mutableContext, ComponentRef{}) + s.NoError(err) + testComponent := component.(*TestComponent) + + // Set subcomponent at middle node to nil. + testComponent.SubComponent1 = NewEmptyField[*TestSubComponent1]() + s.NotNil(node.children["SubComponent1"]) + + err = node.syncSubComponents() + s.NoError(err) + s.False(node.needsPointerResolution) + + s.Len(node.mutation.DeletedNodes, 3) + s.NotNil(node.mutation.DeletedNodes["SubComponent1/SubComponent11"]) + s.NotNil(node.mutation.DeletedNodes["SubComponent1/SubData11"]) + s.NotNil(node.mutation.DeletedNodes["SubComponent1"]) + + s.Nil(node.children["SubComponent1"]) +} + +func (s *nodeSuite) TestDeserializeNode_EmptyPersistence() { + var serializedNodes map[string]*persistencespb.ChasmNode + + node, err := s.newTestTree(serializedNodes) + s.NoError(err) + s.Nil(node.value) + s.NotNil(node.serializedNode) + + err = node.deserialize(reflect.TypeFor[*TestComponent]()) + s.NoError(err) + s.NotNil(node.value) + s.IsType(&TestComponent{}, node.value) + tc := node.value.(*TestComponent) + s.Equal(valueStateSynced, node.valueState) + s.Nil(tc.SubComponent1.Internal.node) + s.Nil(tc.SubComponent1.Internal.value()) + + // nil component data should decode into zero value + s.NotNil(tc.ComponentData) + s.ProtoEqual(&protoMessageType{}, tc.ComponentData) +} + +func (s *nodeSuite) TestDeserializeNode_ComponentAttributes() { + serializedNodes := testComponentSerializedNodes() + + // Root component will be deserialized as part of the initialization process, + // for initializing search attributes and memo. + node, err := s.newTestTree(serializedNodes) + s.NoError(err) + s.NotNil(node.serializedNode) + s.NotNil(node.value) + s.IsType(&TestComponent{}, node.value) + tc := node.value.(*TestComponent) + s.Equal(tc.SubComponent1.Internal.node, node.children["SubComponent1"]) + s.Equal(tc.ComponentData.CreateRequestId, "component-data") + s.Equal(valueStateSynced, node.valueState) + + s.Nil(tc.SubComponent1.Internal.value()) + s.Equal(valueStateNeedDeserialize, tc.SubComponent1.Internal.node.valueState) + err = tc.SubComponent1.Internal.node.deserialize(reflect.TypeFor[*TestSubComponent1]()) + s.NoError(err) + s.NotNil(tc.SubComponent1.Internal.node.value) + s.IsType(&TestSubComponent1{}, tc.SubComponent1.Internal.node.value) + s.Equal("sub-component1-data", tc.SubComponent1.Internal.node.value.(*TestSubComponent1).SubComponent1Data.CreateRequestId) + s.Equal(valueStateSynced, tc.SubComponent1.Internal.node.valueState) +} + +func (s *nodeSuite) TestDeserializeNode_DataAttributes() { + serializedNodes := testComponentSerializedNodes() + + // Root component will be deserialized as part of the initialization process, + // for initializing search attributes and memo. + node, err := s.newTestTree(serializedNodes) + s.NoError(err) + s.NotNil(node.serializedNode) + s.NotNil(node.value) + s.Equal(valueStateSynced, node.valueState) + + s.IsType(&TestComponent{}, node.value) + tc := node.value.(*TestComponent) + + s.Equal(tc.SubData1.Internal.node, node.children["SubData1"]) + + s.Nil(tc.SubData1.Internal.value()) + err = tc.SubData1.Internal.node.deserialize(reflect.TypeFor[*protoMessageType]()) + s.NoError(err) + s.NotNil(tc.SubData1.Internal.node.value) + s.Equal(valueStateSynced, tc.SubData1.Internal.node.valueState) + s.IsType(&protoMessageType{}, tc.SubData1.Internal.node.value) + s.Equal("sub-data1", tc.SubData1.Internal.node.value.(*protoMessageType).CreateRequestId) +} + +func (s *nodeSuite) TestFieldInterface() { + type testComponent struct { + UnimplementedComponent + Data *protoMessageType + SubComponent1 Field[TestSubComponent] + } + + serializedNodes := testComponentSerializedNodes() + node, err := s.newTestTree(serializedNodes) + s.NoError(err) + + err = node.deserialize(reflect.TypeFor[*testComponent]()) + s.NoError(err) + s.NotNil(node.value) + s.IsType(&testComponent{}, node.value) + tc := node.value.(*testComponent) + + chasmContext := NewMutableContext(context.Background(), node) + sc1 := tc.SubComponent1.Get(chasmContext) + s.NotNil(sc1) + s.Equal("sub-component1-data", sc1.GetData()) +} + +func (s *nodeSuite) TestGenerateSerializedNodes() { + s.T().Skip("This test is used to generate serialized nodes for other tests.") + + node := s.testComponentTree() + + err := node.serialize() + s.NoError(err) + serializedNodes := map[string]*persistencespb.ChasmNode{} + serializedNodes[""] = node.serializedNode + + for childName, childNode := range node.children { + err = childNode.serialize() + s.NoError(err) + serializedNodes[childName] = childNode.serializedNode + } + + for childName, childNode := range node.children["SubComponent1"].children { + err = childNode.serialize() + s.NoError(err) + serializedNodes["SubComponent1/"+childName] = childNode.serializedNode + } + + generateMapInit(serializedNodes, "serializedNodes") +} + +func (s *nodeSuite) TestNodeSnapshot() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "child1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 4}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 4}, + }, + }, + "child2": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + }, + }, + "child1/grandchild1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + }, + }, + "child2/grandchild1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 5}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 5}, + }, + }, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + s.NotNil(root) + + // Test snapshot with nil exclusiveMinVT, which should return all nodes + snapshot := root.Snapshot(nil) + s.Equal(persistenceNodes, snapshot.Nodes) + + // Test snapshot with non-nil exclusiveMinVT, which should return only nodes with higher + // LastUpdateVersionedTransition than the exclusiveMinVT + expectedNodePaths := []string{"child1", "child2/grandchild1"} + expectedNodes := make(map[string]*persistencespb.ChasmNode) + for _, path := range expectedNodePaths { + expectedNodes[path] = persistenceNodes[path] + } + snapshot = root.Snapshot(&persistencespb.VersionedTransition{TransitionCount: 3}) + s.Equal(expectedNodes, snapshot.Nodes) +} + +func (s *nodeSuite) TestApplyMutation() { + mustEncode := func(m proto.Message) *commonpb.DataBlob { + taskBlob, err := serialization.ProtoEncode(m) + s.NoError(err) + return taskBlob + } + + now := s.timeSource.Now() + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + // This task is not updated, so it's deserialized version will + // NOT be cleared below as part of the updateNode process. + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Second)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusNone, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("root-task-data-1"), + }), + }, + { + // Task will be deleted, so deserialized version of this task should also be deleted from cache. + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Second)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusNone, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("root-task-data-2"), + }), + }, + }, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + }, + }, + "SubComponent1/SubComponent11": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent11TypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + // Node is deleted, so deserialized version of this task should be deleted from cache. + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusNone, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("SubComponent11-task-data"), + }), + }, + }, + }, + }, + }, + }, + "SubComponent1/SubComponent11/SubComponent11Data": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 4}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 4}, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + s.Len(root.currentSA, 3) + s.NotNil(root.currentMemo) + initialMemo, ok := root.currentMemo.(*protoMessageType) + s.True(ok) + s.ProtoEqual(&protoMessageType{}, initialMemo) + + // Manually deserialize some tasks to populate the taskValueCache + _, err = root.deserializeComponentTask(root.serializedNode.Metadata.GetComponentAttributes().PureTasks[0]) + s.NoError(err) + _, err = root.deserializeComponentTask(root.serializedNode.Metadata.GetComponentAttributes().PureTasks[1]) + s.NoError(err) + _, err = root.deserializeComponentTask(root.children["SubComponent1"].children["SubComponent11"].serializedNode.Metadata.GetComponentAttributes().PureTasks[0]) + s.NoError(err) + s.Len(root.taskValueCache, 3) + + // This decoded value should be reset after applying the mutation + root.children["SubComponent1"].value = "some-random-decoded-value" + + // Prepare mutation: update root and "SubComponent1" node, delete "SubComponent1/SubComponent11", and add "newchild". + + updatedRoot := &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 30}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 30}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Second)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusNone, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("root-task-data-1"), + }), + }, + }, + }, + }, + }, + Data: mustEncode( + &protoMessageType{ + StartTime: timestamppb.New(now), + }), + } + updatedSC1 := &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 20}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 20}, + }, + } + newSC2 := &persistencespb.ChasmNode{ + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 100}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 100}, + }, + } + mutation := NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "": updatedRoot, + "SubComponent1": updatedSC1, + "SubComponent2": newSC2, + }, + DeletedNodes: map[string]struct{}{ + "SubComponent1/SubComponent11": {}, // this should remove the entire "SubComponent11" subtree + "SubComponent1/non-exist-child": {}, + }, + } + err = root.ApplyMutation(mutation) + s.NoError(err) + + // Validate root node got updated. + s.Equal(updatedRoot, root.serializedNode) + s.NotNil(root.value) + s.Len(root.currentSA, 3) + s.Len(root.currentSA, 3) + s.Contains(root.currentSA, "TemporalDatetime01") + s.True(root.currentSA["TemporalDatetime01"].(VisibilityValueTime).Equal(VisibilityValueTime(now))) + + // Validate memo content. + s.NotNil(root.currentMemo) + decodedMemo, ok := root.currentMemo.(*protoMessageType) + s.True(ok, "currentMemo should be of type *protoMessageType") + s.True(decodedMemo.StartTime.AsTime().Equal(now)) + + // Validate the "child" node got updated. + nodeSC1, ok := root.children["SubComponent1"] + s.True(ok) + s.Equal(updatedSC1, nodeSC1.serializedNode) + s.Nil(nodeSC1.value) // value should be reset after mutation + + // Validate the "newchild" node is added. + nodeSC2, ok := root.children["SubComponent2"] + s.True(ok) + s.Equal(newSC2, nodeSC2.serializedNode) + + // Validate the "grandchild" node is deleted. + s.Empty(nodeSC1.children) + + // Validate that nodeBase.mutation reflects the applied mutation. + // Only updates on existing nodes are recorded; new nodes are inserted without a mutation record. + expectedMutation := NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "": updatedRoot, + "SubComponent1": updatedSC1, + "SubComponent2": newSC2, + }, + DeletedNodes: map[string]struct{}{ + "SubComponent1/SubComponent11": {}, + "SubComponent1/SubComponent11/SubComponent11Data": {}, + }, + } + s.Equal(expectedMutation, root.mutation) + + s.Len(root.taskValueCache, 1) +} + +func (s *nodeSuite) TestApplyMutation_DeleteUpdateSamePath() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + // First apply a mutation to delete "SubComponent1" node. + err = root.ApplyMutation(NodesMutation{ + DeletedNodes: map[string]struct{}{ + "SubComponent1": {}, + }, + }) + s.NoError(err) + s.Empty(root.mutation.UpdatedNodes) + s.Len(root.mutation.DeletedNodes, 1) + + // Then apply another mutation to update "SubComponent1" node. + // This simulates the applyMutation logic in mutable state where the logic + // first applies a deletion only mutation for recorded chasm node tombstones, + // and then applies an update only mutation for updated nodes. + + mutation := NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 20}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 20}, + }, + }, + }, + } + err = root.ApplyMutation(mutation) + s.NoError(err) + s.Len(root.mutation.UpdatedNodes, 1) + s.Empty(root.mutation.DeletedNodes, 1) + +} + +func (s *nodeSuite) TestApplySnapshot() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + }, + }, + "SubComponent1/SubComponent11": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + }, + }, + "SubComponent1/SubComponent11/SubComponent11Data": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 4}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 4}, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + // Set a decoded value that should be reset after applying the snapshot. + root.children["SubComponent1"].value = "decoded-value" + + // Prepare an incoming snapshot representing the target state: + // - The "SubComponent1" node is updated (LastUpdateTransition becomes 20), + // - the "SubComponent1/SubComponent11" node is removed, + // - a new node "SubComponent2" is added. + + now := timestamppb.Now() + updatedRootData, err := serialization.ProtoEncode(&protoMessageType{ + StartTime: now, + }) + s.NoError(err) + incomingSnapshot := NodesSnapshot{ + Nodes: map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 10}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + Data: updatedRootData, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 20}, + }, + }, + "SubComponent2": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 100}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 100}, + }, + }, + }, + } + err = root.ApplySnapshot(incomingSnapshot) + s.NoError(err) + + s.Equal(incomingSnapshot, root.Snapshot(nil)) + s.Nil(root.children["SubComponent1"].value) // value should be reset after snapshot + + // Validate that nodeBase.mutation reflects the applied snapshot. + expectedMutation := NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "": incomingSnapshot.Nodes[""], + "SubComponent1": incomingSnapshot.Nodes["SubComponent1"], + "SubComponent2": incomingSnapshot.Nodes["SubComponent2"], + }, + DeletedNodes: map[string]struct{}{ + "SubComponent1/SubComponent11": {}, + "SubComponent1/SubComponent11/SubComponent11Data": {}, + }, + } + s.Equal(expectedMutation, root.mutation) + + // Validate visibility search attributes and memo are updated as well. + s.Len(root.currentSA, 3) + s.Contains(root.currentSA, "TemporalDatetime01") + s.True(root.currentSA["TemporalDatetime01"].(VisibilityValueTime).Equal(VisibilityValueTime(now.AsTime()))) +} + +func (s *nodeSuite) TestApplySnapshot_EmptySnapshot() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + // Apply an empty snapshot to simulate the case where + // chasm is disabled in source cluster or chasm tree is empty in + // source cluster. + err = root.ApplySnapshot(NodesSnapshot{}) + s.NoError(err) + + // Validate that nodeBase.mutation reflects the applied snapshot. + expectedMutation := NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{}, + DeletedNodes: map[string]struct{}{ + "SubComponent1": {}, // NOTE: root component can't be deleted. + }, + } + s.Equal(expectedMutation, root.mutation) +} + +func (s *nodeSuite) TestApplyMutation_OutOfOrder() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + // Test the case where child node is applied before parent node. + err = root.ApplyMutation(NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "SubComponent1/SubComponent11": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 20}, + }, + }, + }, + }) + s.NoError(err) + + err = root.ApplyMutation(NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + }, + }, + }, + }) + s.NoError(err) + + snapshot := root.Snapshot(nil) + s.Len(snapshot.Nodes, 3) + s.Len(root.mutation.UpdatedNodes, 3) +} + +func (s *nodeSuite) TestRefreshTasks() { + now := s.timeSource.Now() + pureTaskScheduledTime := now.Add(time.Second).UTC() + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(pureTaskScheduledTime), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + }, + }, + }, + }, + "SubComponent2": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent2TypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + }, + }, + }, + }, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + err = root.RefreshTasks() + s.NoError(err) + + s.True(root.IsDirty()) + s.False(root.IsStateDirty()) + + mutation, err := root.CloseTransaction() + s.NoError(err) + s.Len(mutation.UpdatedNodes, 2) // TaskStatus for the root node is not reset, so no need to persist it. + s.Equal(2, s.nodeBackend.NumTasksAdded()) + s.Equal(pureTaskScheduledTime, s.nodeBackend.LastDeletePureTaskCall()) +} + +func (s *nodeSuite) TestCarryOverTaskStatus() { + now := s.timeSource.Now() + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(2 * time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(3 * time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + }, + }, + }, + }, + "data": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + Attributes: &persistencespb.ChasmNodeMetadata_DataAttributes{ + DataAttributes: &persistencespb.ChasmDataAttributes{}, + }, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + mutations := NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Second)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(2 * time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(3 * time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + }, + }, + }, + }, + "data": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + Attributes: &persistencespb.ChasmNodeMetadata_DataAttributes{ + DataAttributes: &persistencespb.ChasmDataAttributes{}, + }, + }, + }, + }, + } + + expectedNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Second)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(2 * time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(3 * time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + }, + }, + }, + }, + "data": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 3}, + Attributes: &persistencespb.ChasmNodeMetadata_DataAttributes{ + DataAttributes: &persistencespb.ChasmDataAttributes{}, + }, + }, + }, + } + + err = root.ApplyMutation(mutations) + s.NoError(err) + + s.Equal(expectedNodes, root.Snapshot(nil).Nodes) +} + +func (s *nodeSuite) TestValidateAccess() { + nodePath := []string{"SubComponent1", "SubComponent11"} + + // Because access checks are performed on ancestor nodes and not the target node, + // test case properties are applied to the root node. + testCases := []struct { + name string + valid bool + intent OperationIntent + componentStatus enumspb.WorkflowExecutionStatus // TestComponent borrows the WorkflowExecutionStatus struct + executionStatus enumspb.WorkflowExecutionStatus + executionState enumsspb.WorkflowExecutionState + terminated bool + + setup func(*Node, Context) error + }{ + { + name: "access check applies only to ancestors (terminated)", + valid: true, + intent: OperationIntentProgress, + componentStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionState: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + terminated: false, + setup: func(target *Node, ctx Context) error { + // Set the terminated flag on the target node instead of an ancestor + target.terminated = true + return nil + }, + }, + { + name: "access check applies only to ancestors (closed)", + valid: true, + intent: OperationIntentProgress, + componentStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionState: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + terminated: false, + setup: func(target *Node, ctx Context) error { + if err := target.prepareComponentValue(ctx); err != nil { + return err + } + targetComponent, _ := target.value.(*TestSubComponent11) + targetComponent.SubComponent11Data.Status = enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED + return nil + }, + }, + { + name: "read-only always succeeds", + intent: OperationIntentObserve, + componentStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionStatus: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + executionState: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + terminated: true, + valid: true, + }, + { + name: "valid write access", + intent: OperationIntentProgress, + componentStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionState: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + terminated: false, + valid: true, + }, + { + name: "invalid write access (parent closed)", + intent: OperationIntentProgress, + componentStatus: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + executionStatus: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + executionState: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + terminated: false, + valid: false, + }, + { + name: "invalid write access (component terminated)", + intent: OperationIntentProgress, + componentStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionState: enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, + terminated: true, // terminated in current transaction + valid: false, + }, + { + name: "invalid write access (component terminated and reload)", + intent: OperationIntentProgress, + componentStatus: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, + executionStatus: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + executionState: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + terminated: false, // terminated in previous transaction and mutable state reloaded + valid: false, + }, + { + name: "detached node skips parent validation", + valid: true, + intent: OperationIntentProgress, + componentStatus: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, + executionStatus: enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, // root is closed + executionState: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + terminated: false, + setup: func(target *Node, _ Context) error { + // Set the parent node (SubComponent1) as detached. + // When validateParentAccess is called on a detached node, it skips + // ancestor validation entirely. + target.parent.serializedNode.GetMetadata().GetComponentAttributes().Detached = true + return nil + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + root, err := s.newTestTree(testComponentSerializedNodes()) + s.NoError(err) + + ctx := NewContext( + newContextWithOperationIntent(context.Background(), tc.intent), + root, + ) + + // Set fields on root node + err = root.prepareComponentValue(ctx) + s.NoError(err) + root.terminated = tc.terminated + component, ok := root.value.(*TestComponent) + if ok { + component.ComponentData.Status = tc.componentStatus + } + + // Find target node + node, ok := root.findNode(nodePath) + s.True(ok) + err = node.prepareComponentValue(ctx) + s.NoError(err) + + if tc.setup != nil { + s.NoError(tc.setup(node, ctx)) + } + + s.nodeBackend.HandleGetExecutionState = func() *persistencespb.WorkflowExecutionState { + return &persistencespb.WorkflowExecutionState{ + State: tc.executionState, + Status: tc.executionStatus, + } + } + + // Validation begins on the target node, checking ancestors only. + err = node.validateAccess(ctx, false) + if tc.valid { + s.NoError(err) + } else { + s.Error(err) + s.ErrorIs(errAccessCheckFailed, err) + } + }) + } + +} + +func (s *nodeSuite) TestGetComponent_DetachedNodeBypassesParentValidation() { + // Test that a detached node can be accessed even when its parent is closed. + root, err := s.newTestTree(testComponentSerializedNodes()) + s.NoError(err) + + targetPath := []string{"SubComponent1", "SubComponent11"} + targetNode, ok := root.findNode(targetPath) + s.True(ok) + + // Mark the target node as detached. + targetNode.serializedNode.GetMetadata().GetComponentAttributes().Detached = true + + // Close the root node (set lifecycle to COMPLETED). + ctx := NewMutableContext( + newContextWithOperationIntent(context.Background(), OperationIntentProgress), + root, + ) + err = root.prepareComponentValue(ctx) + s.NoError(err) + rootComponent, ok := root.value.(*TestComponent) + s.True(ok) + rootComponent.ComponentData.Status = enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED + + // GetComponent on the detached node should succeed despite root being closed. + ref := ComponentRef{ + componentPath: targetPath, + } + component, err := root.Component(ctx, ref) + s.NoError(err) + s.NotNil(component) +} + +func (s *nodeSuite) TestGetComponent_ClosedTargetSucceeds() { + // Test that a closed target component can still be accessed via Component() + // because we only check ancestor lifecycle, not the target's lifecycle. + root, err := s.newTestTree(testComponentSerializedNodes()) + s.NoError(err) + + targetPath := []string{"SubComponent1", "SubComponent11"} + targetNode, ok := root.findNode(targetPath) + s.True(ok) + + ctx := NewMutableContext( + newContextWithOperationIntent(context.Background(), OperationIntentProgress), + root, + ) + + // Close the target node's lifecycle (set to COMPLETED). + err = targetNode.prepareComponentValue(ctx) + s.NoError(err) + targetComponent, ok := targetNode.value.(*TestSubComponent11) + s.True(ok) + targetComponent.SubComponent11Data.Status = enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED + s.True(targetComponent.LifecycleState(ctx).IsClosed()) + + // GetComponent on the closed target should succeed because we only check ancestors. + ref := ComponentRef{ + componentPath: targetPath, + } + component, err := root.Component(ctx, ref) + s.NoError(err) + s.NotNil(component) +} + +func (s *nodeSuite) TestGetComponent() { + errValidation := errors.New("some random validation error") + + expectedTestComponent := &TestComponent{} + setTestComponentFields(expectedTestComponent, s.nodeBackend) + assertTestComponent := func(component Component) { + testComponent, ok := component.(*TestComponent) + s.True(ok) + protoassert.ProtoEqual(s.T(), expectedTestComponent.ComponentData, testComponent.ComponentData) + + // TODO: Can we assert other fields? + // Right now the chasm Field generated by setTestComponentFields() doesn't have a backing node. + } + + testCases := []struct { + name string + chasmContextFn func(root *Node) Context + ref ComponentRef + expectedErr error + nodeDirty bool + assertComponent func(Component) + }{ + { + name: "path not found", + chasmContextFn: func(root *Node) Context { + return NewContext(context.Background(), root) + }, + ref: ComponentRef{ + componentPath: []string{"unknownComponent"}, + }, + expectedErr: errComponentNotFound, + }, + { + name: "initialVT mismatch", + chasmContextFn: func(root *Node) Context { + return NewMutableContext(context.Background(), root) + }, + ref: ComponentRef{ + componentPath: []string{"SubComponent1", "SubComponent11"}, + // should be (1, 1) but we set it to (2, 2) + componentInitialVT: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 2, + TransitionCount: 2, + }, + }, + expectedErr: errComponentNotFound, + }, + { + name: "validation failure", + chasmContextFn: func(root *Node) Context { + return NewMutableContext(context.Background(), root) + }, + ref: ComponentRef{ + componentPath: []string{"SubComponent1"}, + componentInitialVT: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + validationFn: func(_ NodeBackend, _ Context, _ Component, _ *Registry) error { + return errValidation + }, + }, + expectedErr: errValidation, + }, + { + name: "success readonly access", + chasmContextFn: func(root *Node) Context { + return NewContext(context.Background(), root) + }, + ref: ComponentRef{ + componentPath: []string{}, // root + componentInitialVT: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + validationFn: func(_ NodeBackend, _ Context, _ Component, _ *Registry) error { + return nil + }, + }, + expectedErr: nil, + assertComponent: assertTestComponent, + }, + { + name: "success mutable access", + chasmContextFn: func(root *Node) Context { + return NewMutableContext(context.Background(), root) + }, + ref: ComponentRef{ + componentPath: []string{}, // root + }, + expectedErr: nil, + nodeDirty: true, + assertComponent: assertTestComponent, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + root, err := s.newTestTree(testComponentSerializedNodes()) + s.NoError(err) + + component, err := root.Component(tc.chasmContextFn(root), tc.ref) + s.Equal(tc.expectedErr, err) + + node, ok := root.findNode(tc.ref.componentPath) + if tc.expectedErr == nil { + s.True(ok) + tc.assertComponent(component) + } + + if ok { + if tc.nodeDirty { + s.Greater(node.valueState, valueStateSynced) + } else { + s.LessOrEqual(node.valueState, valueStateSynced) + } + } + }) + } +} + +func (s *nodeSuite) TestRef() { + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + executionKey := ExecutionKey{ + NamespaceID: workflowKey.NamespaceID, + BusinessID: workflowKey.WorkflowID, + RunID: workflowKey.RunID, + } + currentVT := &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 2, + TransitionCount: 2, + } + s.nodeBackend = &MockNodeBackend{ + HandleCurrentVersionedTransition: func() *persistencespb.VersionedTransition { + return currentVT + }, + HandleGetWorkflowKey: func() definition.WorkflowKey { + return workflowKey + }, + } + + root, err := s.newTestTree(testComponentSerializedNodes()) + s.NoError(err) + + chasmContext := NewContext(context.Background(), root) + rootComponent, err := root.Component(chasmContext, NewComponentRef[*TestComponent](executionKey)) + s.NoError(err) + testComponent, ok := rootComponent.(*TestComponent) + s.True(ok) + + rc, ok := s.registry.ComponentFor(testComponent) + s.True(ok) + archetypeID := rc.componentID + + subComponent1 := testComponent.SubComponent1.Get(chasmContext) + subComponent11 := subComponent1.SubComponent11.Get(chasmContext) + + testCases := []struct { + name string + component Component + expectErr bool + expectedPath []string + expectedInitalVT *persistencespb.VersionedTransition + }{ + { + name: "root", + component: testComponent, + expectErr: false, + expectedPath: nil, // same as []string{} + expectedInitalVT: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + }, + { + name: "subComponent1", + component: subComponent1, + expectErr: false, + expectedPath: []string{"SubComponent1"}, + expectedInitalVT: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + }, + { + name: "subComponent11", + component: subComponent11, + expectErr: false, + expectedPath: []string{"SubComponent1", "SubComponent11"}, + expectedInitalVT: &persistencespb.VersionedTransition{ + NamespaceFailoverVersion: 1, + TransitionCount: 1, + }, + }, + { + name: "unknown", + component: &TestComponent{}, // a new instance of TestComponent + expectErr: true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + + encodedRef, err := root.Ref(tc.component) + if tc.expectErr { + s.Error(err) + return + } + + s.NoError(err) + expectedRef := ComponentRef{ + ExecutionKey: executionKey, + archetypeID: archetypeID, + componentPath: tc.expectedPath, + + // Proto fields are validated separately with ProtoEqual. + // executionLastUpdateVT: currentVT, + // componentInitialVT: tc.expectedInitalVT, + } + + actualRef, err := DeserializeComponentRef(encodedRef) + s.NoError(err) + s.ProtoEqual(currentVT, actualRef.executionLastUpdateVT) + s.ProtoEqual(tc.expectedInitalVT, actualRef.componentInitialVT) + + actualRef.executionLastUpdateVT = nil + actualRef.componentInitialVT = nil + s.Equal(expectedRef, actualRef) + }) + } +} + +func (s *nodeSuite) TestSerializeDeserializeTask() { + payload := &commonpb.Payload{ + Data: []byte("some-random-data"), + } + expectedBlob, err := serialization.ProtoEncode(payload) + s.NoError(err) + + testCases := []struct { + name string + task any + expectedData []byte + equalFn func(t1, t2 any) + }{ + { + name: "ProtoTask", + task: &TestSideEffectTask{ + Data: []byte("some-random-data"), + }, + expectedData: expectedBlob.GetData(), + equalFn: func(t1, t2 any) { + protorequire.ProtoEqual(s.T(), t1.(*TestSideEffectTask), t2.(*TestSideEffectTask)) + }, + }, + { + name: "EmptyTask", + task: TestOutboundSideEffectTask{}, + expectedData: nil, + equalFn: func(t1, t2 any) { + s.IsType(TestOutboundSideEffectTask{}, t1) + s.IsType(TestOutboundSideEffectTask{}, t2) + s.Equal(t1, t2) + }, + }, + { + name: "StructWithProtoField", + task: &TestPureTask{ + Payload: payload, + }, + expectedData: expectedBlob.GetData(), + equalFn: func(t1, t2 any) { + protorequire.ProtoEqual(s.T(), t1.(*TestPureTask).Payload, t2.(*TestPureTask).Payload) + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + rt, ok := s.registry.taskFor(tc.task) + s.True(ok) + + blob, err := serializeTask(rt, reflect.ValueOf(tc.task)) + s.NoError(err) + + s.NotNil(blob) + s.Equal(enumspb.ENCODING_TYPE_PROTO3, blob.GetEncodingType()) + s.Equal(tc.expectedData, blob.GetData()) + + deserializedTaskValue, err := deserializeTask(rt, blob) + s.NoError(err) + tc.equalFn(tc.task, deserializedTaskValue.Interface()) + }) + } +} + +func (s *nodeSuite) TestCloseTransaction_Success() { + node := s.testComponentTree() + chasmCtx := NewMutableContext(context.Background(), node) + tc, err := node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + tc.(*TestComponent).SubData1 = NewEmptyField[*protoMessageType]() + tc.(*TestComponent).ComponentData = &protoMessageType{CreateRequestId: primitives.NewUUID().String()} + + mutations, err := node.CloseTransaction() + s.NoError(err) + s.Len(mutations.UpdatedNodes, 4) + s.Contains(mutations.UpdatedNodes, "", "root component must be in UpdatedNodes") + s.Contains(mutations.UpdatedNodes, "SubComponent1", "SubComponent1 component must be in UpdatedNodes") + s.Contains(mutations.UpdatedNodes, "SubComponent1/SubComponent11", "SubComponent1/SubComponent11 component must be in UpdatedNodes") + s.Contains(mutations.UpdatedNodes, "SubComponent1/SubData11", "SubComponent1/SubData11 component must be in UpdatedNodes") + s.Len(mutations.DeletedNodes, 1) + s.Contains(mutations.DeletedNodes, "SubData1", "SubData1 was removed and must be in DeletedNodes") + + sc1 := tc.(*TestComponent).SubComponent1.Get(chasmCtx) + s.NotNil(sc1) + + mutations, err = node.CloseTransaction() + s.NoError(err) + s.Len(mutations.UpdatedNodes, 1) + s.Contains(mutations.UpdatedNodes, "SubComponent1", "SubComponent1 component must be in UpdatedNodes") + s.Empty(mutations.DeletedNodes) +} + +func (s *nodeSuite) TestCloseTransaction_EmptyNode() { + var nilSerializedNodes map[string]*persistencespb.ChasmNode + // Create an empty tree. + node, err := s.newTestTree(nilSerializedNodes) + s.NoError(err) + s.Nil(node.value) + + mutations, err := node.CloseTransaction() + s.NoError(err) + s.Empty(mutations.UpdatedNodes, "there should be no updated nodes because tree was initialized with empty serialized nodes") + s.Empty(mutations.DeletedNodes, "there should be no deleted nodes because tree was initialized with empty serialized nodes") +} + +func (s *nodeSuite) TestCloseTransaction_LifecycleChange() { + node := s.testComponentTree() + + chasmCtx := NewMutableContext(context.Background(), node) + _, err := node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + _, err = node.CloseTransaction() + s.NoError(err) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, s.nodeBackend.LastUpdateWorkflowState()) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, s.nodeBackend.LastUpdateWorkflowStatus()) + + // Test force terminate case + _, err = node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + node.terminated = true + _, err = node.CloseTransaction() + s.NoError(err) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, s.nodeBackend.LastUpdateWorkflowState()) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, s.nodeBackend.LastUpdateWorkflowStatus()) + + node.terminated = false + tc, err := node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + tc.(*TestComponent).Complete(chasmCtx) + _, err = node.CloseTransaction() + s.NoError(err) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, s.nodeBackend.LastUpdateWorkflowState()) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, s.nodeBackend.LastUpdateWorkflowStatus()) + + tc, err = node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + tc.(*TestComponent).Fail(chasmCtx) + _, err = node.CloseTransaction() + s.NoError(err) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, s.nodeBackend.LastUpdateWorkflowState()) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, s.nodeBackend.LastUpdateWorkflowStatus()) +} + +func (s *nodeSuite) TestCloseTransaction_ForceUpdateVisibility_RootLifecycleChanged() { + node := s.testComponentTree() + + chasmCtx := NewMutableContext(context.Background(), node) + testComponent, err := node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + + nextTransitionCount := int64(1) + s.nodeBackend.HandleGetCurrentVersion = func() int64 { return 1 } + s.nodeBackend.HandleNextTransitionCount = func() int64 { return nextTransitionCount } + s.nodeBackend.HandleUpdateWorkflowStateStatus = func(state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus) (bool, error) { + return true, nil + } + + // Init visiblity component + testComponent.(*TestComponent).Visibility = NewComponentField(chasmCtx, NewVisibility(chasmCtx)) + mutation, err := node.CloseTransaction() + s.NoError(err) + pVisibilityNode, ok := mutation.UpdatedNodes["Visibility"] + s.True(ok) + s.Len(pVisibilityNode.GetMetadata().GetComponentAttributes().SideEffectTasks, 1) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, s.nodeBackend.UpdateCalls[0].State) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, s.nodeBackend.UpdateCalls[0].Status) + + // Change ComponentData which is used as Memo. Even though lifecycle didn't change, + // visibility should be updated because memo changed. + nextTransitionCount = 2 + testComponent, err = node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + testComponent.(*TestComponent).ComponentData = &protoMessageType{ + CreateRequestId: "some-updated-component-data", + } + s.nodeBackend.HandleUpdateWorkflowStateStatus = func(state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus) (bool, error) { + return false, nil + } + mutation, err = node.CloseTransaction() + s.NoError(err) + pVisibilityNode, ok = mutation.UpdatedNodes["Visibility"] + s.True(ok, "visibility should be updated when memo changes") + s.Len(pVisibilityNode.GetMetadata().GetComponentAttributes().SideEffectTasks, 1) + + // Close the run, visibility should be force updated + // even if not explicitly updated. + nextTransitionCount = 3 + testComponent, err = node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + testComponent.(*TestComponent).Complete(chasmCtx) + s.nodeBackend.HandleUpdateWorkflowStateStatus = func(state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus) (bool, error) { + return true, nil + } + mutation, err = node.CloseTransaction() + s.NoError(err) + pVisibilityNode, ok = mutation.UpdatedNodes["Visibility"] + s.True(ok) + s.Len(pVisibilityNode.GetMetadata().GetComponentAttributes().SideEffectTasks, 1) +} + +func (s *nodeSuite) TestCloseTransaction_ForceUpdateVisibility_RootSAMemoChanged() { + node := s.testComponentTree() + chasmCtx := NewMutableContext(context.Background(), node) + testComponent, err := node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + + nextTransitionCount := int64(1) + s.nodeBackend.HandleNextTransitionCount = func() int64 { + return nextTransitionCount + } + + // Init visiblity component + testComponent.(*TestComponent).Visibility = NewComponentField(chasmCtx, NewVisibility(chasmCtx)) + s.nodeBackend.HandleUpdateWorkflowStateStatus = func(state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus) (bool, error) { + return true, nil + } + mutation, err := node.CloseTransaction() + s.NoError(err) + pVisibilityNode, ok := mutation.UpdatedNodes["Visibility"] + s.True(ok) + s.Len(pVisibilityNode.GetMetadata().GetComponentAttributes().SideEffectTasks, 1) + + // Update root component state, which results in a change to the search attributes and memo. + // CHASM framework should automatically detect the change and generate a visibility task. + nextTransitionCount = 2 + testComponent, err = node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + testComponent.(*TestComponent).ComponentData = &protoMessageType{ + StartTime: timestamppb.Now(), + } + s.nodeBackend.HandleUpdateWorkflowStateStatus = func(state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus) (bool, error) { + return false, nil + } + mutation, err = node.CloseTransaction() + s.NoError(err) + pVisibilityNode, ok = mutation.UpdatedNodes["Visibility"] + s.True(ok) + s.Len(pVisibilityNode.GetMetadata().GetComponentAttributes().SideEffectTasks, 1) +} + +func (s *nodeSuite) TestCloseTransaction_InvalidateComponentTasks() { + payload := &commonpb.Payload{ + Data: []byte("some-random-data"), + } + taskBlob, err := serialization.ProtoEncode(payload) + s.NoError(err) + + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + Data: taskBlob, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testOutboundSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 2, + Data: &commonpb.DataBlob{ + Data: nil, + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + }, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 3, + Data: taskBlob, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 4, + Data: taskBlob, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 5, + Data: taskBlob, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + }, + }, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + nextTransitionCount := int64(2) + s.nodeBackend.HandleNextTransitionCount = func() int64 { return nextTransitionCount } + + // The idea is to mark the node as dirty by accessing it with a mutable context. + mutableContext := NewMutableContext(context.Background(), root) + _, err = root.Component(mutableContext, ComponentRef{}) + s.NoError(err) + + s.testLibrary.mockSideEffectTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil).Times(2) + s.testLibrary.mockOutboundSideEffectTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + s.testLibrary.mockPureTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil).Times(2) + + mutation, err := root.CloseTransaction() + s.NoError(err) + + s.Equal(tasks.MaximumKey.FireTime, s.nodeBackend.LastDeletePureTaskCall()) + + s.Len(mutation.UpdatedNodes, 2) + for _, updatedNode := range mutation.UpdatedNodes { + s.Equal(nextTransitionCount, updatedNode.GetMetadata().GetLastUpdateVersionedTransition().TransitionCount) + } + s.Empty(mutation.DeletedNodes) + + componentAttr := root.serializedNode.Metadata.GetComponentAttributes() + s.Empty(componentAttr.PureTasks) + s.Len(componentAttr.SideEffectTasks, 1) + s.Equal(testOutboundSideEffectTaskTypeID, componentAttr.SideEffectTasks[0].GetTypeId()) + + componentAttr = root.children["SubComponent1"].serializedNode.Metadata.GetComponentAttributes() + s.Empty(componentAttr.PureTasks) + s.Empty(componentAttr.SideEffectTasks) +} + +// TestCloseTransaction_PausedStateInvalidatesTasks verifies that all logical tasks are +// invalidated when a component (or one of its non-detached ancestors) is paused, without +// invoking the task-specific validator. +func (s *nodeSuite) TestCloseTransaction_PausedStateInvalidatesTasks() { + payload := &commonpb.Payload{ + Data: []byte("some-random-data"), + } + taskBlob, err := serialization.ProtoEncode(payload) + s.NoError(err) + + makeTask := func(typeID uint32, offset int64) *persistencespb.ChasmComponentAttributes_Task { + return &persistencespb.ChasmComponentAttributes_Task{ + TypeId: typeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: offset, + Data: taskBlob, + PhysicalTaskStatus: physicalTaskStatusCreated, + } + } + + s.Run("paused component invalidates its own tasks without calling task validator", func() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{makeTask(testSideEffectTaskTypeID, 1)}, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{makeTask(testPureTaskTypeID, 2)}, + }, + }, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + nextTransitionCount := int64(2) + s.nodeBackend.HandleNextTransitionCount = func() int64 { return nextTransitionCount } + + // Pause the root component. + mutableContext := NewMutableContext(context.Background(), root) + tc, err := root.Component(mutableContext, ComponentRef{}) + s.NoError(err) + tc.(*TestComponent).Pause(mutableContext) + + // Task-specific validators must NOT be called - paused state short-circuits them. + // (no EXPECT calls on mock handlers) + + mutation, err := root.CloseTransaction() + s.NoError(err) + + componentAttr := root.serializedNode.Metadata.GetComponentAttributes() + s.Empty(componentAttr.SideEffectTasks, "paused component should have no side-effect tasks") + s.Empty(componentAttr.PureTasks, "paused component should have no pure tasks") + + // Node must be marked updated so the invalidation is persisted. + s.Len(mutation.UpdatedNodes, 1) + }) + + s.Run("paused parent invalidates non-detached sub-component tasks", func() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{makeTask(testSideEffectTaskTypeID, 1)}, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{makeTask(testPureTaskTypeID, 2)}, + }, + }, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + nextTransitionCount := int64(2) + s.nodeBackend.HandleNextTransitionCount = func() int64 { return nextTransitionCount } + + // Pause the root - its non-detached sub-component's tasks should also be invalidated. + mutableContext := NewMutableContext(context.Background(), root) + tc, err := root.Component(mutableContext, ComponentRef{}) + s.NoError(err) + tc.(*TestComponent).Pause(mutableContext) + + mutation, err := root.CloseTransaction() + s.NoError(err) + + subAttr := root.children["SubComponent1"].serializedNode.Metadata.GetComponentAttributes() + s.Empty(subAttr.SideEffectTasks, "non-detached sub-component tasks should be invalidated when parent is paused") + s.Empty(subAttr.PureTasks) + s.Len(mutation.UpdatedNodes, 2) // root (paused) + SubComponent1 (task cleanup) + }) + + s.Run("detached sub-component tasks are NOT invalidated by parent pause", func() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + Detached: true, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{makeTask(testSideEffectTaskTypeID, 1)}, + }, + }, + }, + }, + } + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + nextTransitionCount := int64(2) + s.nodeBackend.HandleNextTransitionCount = func() int64 { return nextTransitionCount } + + // Pause the root. + mutableContext := NewMutableContext(context.Background(), root) + tc, err := root.Component(mutableContext, ComponentRef{}) + s.NoError(err) + tc.(*TestComponent).Pause(mutableContext) + + // The detached sub-component's validator IS called (it decides independently). + s.testLibrary.mockSideEffectTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + + mutation, err := root.CloseTransaction() + s.NoError(err) + + subAttr := root.children["SubComponent1"].serializedNode.Metadata.GetComponentAttributes() + s.Len(subAttr.SideEffectTasks, 1, "detached sub-component tasks should survive parent pause") + _ = mutation + }) + + s.Run("write access accepted on paused component", func() { + // Requirement: for now accept chasm engine requests on paused component. + root, err := s.newTestTree(testComponentSerializedNodes()) + s.NoError(err) + + ctx := NewContext( + newContextWithOperationIntent(context.Background(), OperationIntentProgress), + root, + ) + + // Pause the root. + err = root.prepareComponentValue(ctx) + s.NoError(err) + root.value.(*TestComponent).Pause(NewMutableContext(context.Background(), root)) + + // validateAccess should still succeed - paused does NOT block writes. + subNode, ok := root.findNode([]string{"SubComponent1"}) + s.True(ok) + err = subNode.validateAccess(ctx, false) + s.NoError(err, "write access to sub-component of paused parent should be accepted") + }) +} + +func (s *nodeSuite) TestCloseTransaction_LifecycleChange_PausedRootKeepsRunning() { + // When the root component is paused, the execution state should remain RUNNING + // because paused is an OPEN lifecycle state. + node := s.testComponentTree() + + chasmCtx := NewMutableContext(context.Background(), node) + rootComp, err := node.Component(chasmCtx, ComponentRef{componentPath: rootPath}) + s.NoError(err) + rootComp.(*TestComponent).Pause(chasmCtx) + + _, err = node.CloseTransaction() + s.NoError(err) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, s.nodeBackend.LastUpdateWorkflowState()) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, s.nodeBackend.LastUpdateWorkflowStatus()) +} + +func (s *nodeSuite) TestCloseTransaction_NewComponentTasks() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + }, + }, + }, + }, + "SubComponent2": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent2TypeID, + }, + }, + }, + }, + } + + s.nodeBackend.HandleNextTransitionCount = func() int64 { + return 2 + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + mutableContext := NewMutableContext(context.Background(), root) + c, err := root.Component(mutableContext, ComponentRef{}) + s.NoError(err) + + // Add a valid side effect task. + s.testLibrary.mockSideEffectTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + testComponent := c.(*TestComponent) + mutableContext.AddTask(testComponent, TaskAttributes{}, &TestSideEffectTask{ + Data: []byte("some-random-data"), + }) + + // Add an invalid outbound side effect task. + // the invalid task should not be created. + s.testLibrary.mockOutboundSideEffectTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil).Times(1) + mutableContext.AddTask( + testComponent, + TaskAttributes{Destination: "destination"}, + TestOutboundSideEffectTask{}, + ) + + // Add a valid pure task. + s.testLibrary.mockPureTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + mutableContext.AddTask( + testComponent, + TaskAttributes{ScheduledTime: s.timeSource.Now()}, + &TestPureTask{ + Payload: &commonpb.Payload{ + Data: []byte("valid-pure-task"), + }, + }, + ) + + // Add an invalid pure task. + // the invalid task should not be created. + s.testLibrary.mockPureTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil).Times(1) + mutableContext.AddTask( + testComponent, + TaskAttributes{ScheduledTime: s.timeSource.Now()}, + &TestPureTask{ + Payload: &commonpb.Payload{ + Data: []byte("invalid-pure-task"), + }, + }, + ) + + // Add a valid outbound side effect task to a sub-component. + s.testLibrary.mockOutboundSideEffectTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(true, nil).Times(1) + subComponent2 := testComponent.SubComponent2.Get(mutableContext) + mutableContext.AddTask( + subComponent2, + TaskAttributes{Destination: "destination"}, + TestOutboundSideEffectTask{}, + ) + + mutation, err := root.CloseTransaction() + s.NoError(err) + + s.Equal(s.timeSource.Now().UTC(), s.nodeBackend.LastDeletePureTaskCall()) + + rootAttr := mutation.UpdatedNodes[""].GetMetadata().GetComponentAttributes() + s.Len(rootAttr.SideEffectTasks, 1) // Only one valid side effect task. + newSideEffectTask := rootAttr.SideEffectTasks[0] + newSideEffectTask.Data = nil // This is tested by TestSerializeTask() + s.ProtoEqual(&persistencespb.ChasmComponentAttributes_Task{ + TypeId: testSideEffectTaskTypeID, + ScheduledTime: timestamppb.New(time.Time{}), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, newSideEffectTask) + s.Len(s.nodeBackend.TasksByCategory[tasks.CategoryTransfer], 1) + chasmTask := s.nodeBackend.TasksByCategory[tasks.CategoryTransfer][0].(*tasks.ChasmTask) + s.ProtoEqual(&persistencespb.ChasmTaskInfo{ + ComponentInitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + ComponentLastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + Path: rootPath, + TypeId: testSideEffectTaskTypeID, + Data: chasmTask.Info.GetData(), // This is tested by TestSerializeTask() + ArchetypeId: testComponentTypeID, + TaskVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + TaskVersionedTransitionOffset: 1, + }, chasmTask.Info) + + s.Len(rootAttr.PureTasks, 1) // Only one valid side effect task. + newPureTask := rootAttr.PureTasks[0] + newPureTask.Data = nil // This is tested by TestSerializeTask() + s.ProtoEqual(&persistencespb.ChasmComponentAttributes_Task{ + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(s.timeSource.Now()), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, newPureTask) + s.Len(s.nodeBackend.TasksByCategory[tasks.CategoryTimer], 1) + chasmPureTask := s.nodeBackend.TasksByCategory[tasks.CategoryTimer][0].(*tasks.ChasmTaskPure) + s.Equal(tasks.CategoryTimer, chasmPureTask.GetCategory()) + s.True(chasmPureTask.VisibilityTimestamp.Equal(s.timeSource.Now())) + + subComponent2Attr := mutation.UpdatedNodes["SubComponent2"].GetMetadata().GetComponentAttributes() + newOutboundSideEffectTask := subComponent2Attr.SideEffectTasks[0] + newOutboundSideEffectTask.Data = nil // This is tested by TestSerializeTask() + s.ProtoEqual(&persistencespb.ChasmComponentAttributes_Task{ + TypeId: testOutboundSideEffectTaskTypeID, + Destination: "destination", + ScheduledTime: timestamppb.New(time.Time{}), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, newOutboundSideEffectTask) + s.Len(s.nodeBackend.TasksByCategory[tasks.CategoryOutbound], 1) + chasmTask = s.nodeBackend.TasksByCategory[tasks.CategoryOutbound][0].(*tasks.ChasmTask) + s.ProtoEqual(&persistencespb.ChasmTaskInfo{ + ComponentInitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + ComponentLastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + Path: []string{"SubComponent2"}, + TypeId: testOutboundSideEffectTaskTypeID, + Data: chasmTask.Info.GetData(), // This is tested by TestSerializeTask() + ArchetypeId: testComponentTypeID, + TaskVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + TaskVersionedTransitionOffset: 3, + }, chasmTask.Info) +} + +func (s *nodeSuite) TestCloseTransaction_ApplyMutation_SideEffectTasks() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + }, + }, + }, + }, + } + + incomingMutation := NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + SideEffectTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + { + TypeId: testSideEffectTaskTypeID, + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + { + TypeId: testSideEffectTaskTypeID, + Destination: "destination", + ScheduledTime: timestamppb.New(TaskScheduledTimeImmediate), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + { + TypeId: testSideEffectTaskTypeID, + ScheduledTime: timestamppb.New(s.timeSource.Now().Add(time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + }, + }, + }, + }, + }, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + err = root.ApplyMutation(incomingMutation) + s.NoError(err) + + expectedCategories := []tasks.Category{tasks.CategoryTimer, tasks.CategoryOutbound, tasks.CategoryTransfer} + _, err = root.CloseTransaction() + for _, category := range expectedCategories { + for _, task := range s.nodeBackend.TasksByCategory[category] { + s.IsType(&tasks.ChasmTask{}, task) + s.Equal(category, task.GetCategory()) + } + } + + s.NoError(err) +} + +func (s *nodeSuite) TestCloseTransaction_ApplyMutation_PureTasks() { + now := s.timeSource.Now().UTC() + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Second)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusCreated, + }, + }, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + }, + }, + }, + }, + } + + incomingMutation := NodesMutation{ + UpdatedNodes: map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + LastUpdateVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now.Add(2 * time.Minute)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 2}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusNone, + }, + }, + }, + }, + }, + }, + }, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + + err = root.ApplyMutation(incomingMutation) + s.NoError(err) + + mutation, err := root.CloseTransaction() + s.NoError(err) + + s.Equal(now.Add(time.Minute), s.nodeBackend.LastDeletePureTaskCall()) + + // Although only root is mutated in ApplyMutation, we generated a pure task for the child node, + // and need to persist that as well. + s.Len(mutation.UpdatedNodes, 2) + + s.Len(s.nodeBackend.TasksByCategory[tasks.CategoryTimer], 1) + task := s.nodeBackend.TasksByCategory[tasks.CategoryTimer][0] + s.IsType(&tasks.ChasmTaskPure{}, task) + s.True(now.Add(time.Minute).Equal(task.GetKey().FireTime)) +} + +func (s *nodeSuite) TestTerminate() { + node := s.testComponentTree() + + // First closeTransaction once to make the tree clean. + _, err := node.CloseTransaction() + s.NoError(err) + + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING, s.nodeBackend.LastUpdateWorkflowState()) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, s.nodeBackend.LastUpdateWorkflowStatus()) + + // Then terminate the node and verify only that node will be in the mutation. + err = node.Terminate(TerminateComponentRequest{}) + s.NoError(err) + s.True(node.terminated) + + mutations, err := node.CloseTransaction() + s.NoError(err) + s.Len(mutations.UpdatedNodes, 1) + s.Empty(mutations.DeletedNodes) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, s.nodeBackend.LastUpdateWorkflowState()) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, s.nodeBackend.LastUpdateWorkflowStatus()) + + // Test updating a terminated node will NOT change the state & status in mutable state. + // Here we simulate mutable state reload case since the terminate flag is not persisted. + s.nodeBackend.HandleGetExecutionState = func() *persistencespb.WorkflowExecutionState { + return &persistencespb.WorkflowExecutionState{ + State: enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, + Status: enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, + } + } + + snapshot := node.Snapshot(nil) + node, err = s.newTestTree(snapshot.Nodes) + s.NoError(err) + + mutableContext := NewMutableContext(context.Background(), node) + _, err = node.Component(mutableContext, ComponentRef{}) + s.NoError(err) + + mutations, err = node.CloseTransaction() + s.NoError(err) + s.Len(mutations.UpdatedNodes, 1) + s.Equal(enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, s.nodeBackend.LastUpdateWorkflowState()) + s.Equal(enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, s.nodeBackend.LastUpdateWorkflowStatus()) +} + +func (s *nodeSuite) preorderAndAssertParent( + n *Node, + parent *Node, +) []*persistencespb.ChasmNode { + s.Equal(parent, n.parent) + + var nodes []*persistencespb.ChasmNode + nodes = append(nodes, n.serializedNode) + + childNames := make([]string, 0, len(n.children)) + for childName := range n.children { + childNames = append(childNames, childName) + } + sort.Strings(childNames) + + for _, childName := range childNames { + nodes = append(nodes, s.preorderAndAssertParent(n.children[childName], n)...) + } + + return nodes +} + +type testNodePathEncoder struct{} + +var _ NodePathEncoder = (*testNodePathEncoder)(nil) + +func (e *testNodePathEncoder) Encode( + _ *Node, + path []string, +) (string, error) { + return strings.Join(path, "/"), nil +} + +func (e *testNodePathEncoder) Decode( + encodedPath string, +) ([]string, error) { + if encodedPath == "" { + return rootPath, nil + } + return strings.Split(encodedPath, "/"), nil +} + +func (s *nodeSuite) nodeBase() *nodeBase { + return &nodeBase{ + registry: s.registry, + timeSource: s.timeSource, + backend: s.nodeBackend, + pathEncoder: s.nodePathEncoder, + + mutation: NodesMutation{ + UpdatedNodes: make(map[string]*persistencespb.ChasmNode), + DeletedNodes: make(map[string]struct{}), + }, + systemMutation: NodesMutation{ + UpdatedNodes: make(map[string]*persistencespb.ChasmNode), + DeletedNodes: make(map[string]struct{}), + }, + newTasks: make(map[any][]taskWithAttributes), + taskValueCache: make(map[*commonpb.DataBlob]reflect.Value), + } +} + +// Helper method to create a test tree for TestComponent. +func (s *nodeSuite) testComponentTree() *Node { + s.nodeBackend.HandleNextTransitionCount = func() int64 { return 1 } + s.nodeBackend.HandleGetCurrentVersion = func() int64 { return 1 } + + var nilSerializedNodes map[string]*persistencespb.ChasmNode + // Create an empty tree. + node, err := s.newTestTree(nilSerializedNodes) + s.NoError(err) + s.Nil(node.value) + + // Get an empty top-level component from the empty tree. + err = node.deserialize(reflect.TypeFor[*TestComponent]()) + s.NoError(err) + s.NotNil(node.value) + s.IsType(&TestComponent{}, node.value) + s.Equal(valueStateSynced, node.valueState) + + tc, err := node.Component(NewMutableContext(context.Background(), node), ComponentRef{componentPath: rootPath}) + s.NoError(err) + s.Equal(valueStateNeedSyncStructure, node.valueState) + // Create subcomponents by assigning fields to TestComponent instance. + setTestComponentFields(tc.(*TestComponent), s.nodeBackend) + + // Sync tree with subcomponents of TestComponent. + err = node.syncSubComponents() + s.False(node.needsPointerResolution) + s.NoError(err) + s.Empty(node.mutation.DeletedNodes) + + return node // maybe tc too +} + +func (s *nodeSuite) TestExecuteImmediatePureTask() { + root := s.testComponentTree() + + mutations, err := root.CloseTransaction() + s.NoError(err) + + // Start a clean transaction. + + mutableContext := NewMutableContext(context.Background(), root) + component, err := root.Component(mutableContext, ComponentRef{}) + s.NoError(err) + testComponent := component.(*TestComponent) + + taskAttributes := TaskAttributes{ScheduledTime: TaskScheduledTimeImmediate} + mutableContext.AddTask( + testComponent, + taskAttributes, + &TestPureTask{ + Payload: &commonpb.Payload{Data: []byte("root-task-payload")}, + }, + ) + + sc1 := testComponent.SubComponent1.Get(mutableContext) + + mutableContext.AddTask( + sc1, + taskAttributes, + &TestPureTask{ + Payload: &commonpb.Payload{Data: []byte("sc1-task-payload")}, + }, + ) + + // One valid task, one invalid task + s.testLibrary.mockPureTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Eq(taskAttributes), gomock.Any()).Return(false, nil).Times(1) + s.testLibrary.mockPureTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Eq(taskAttributes), gomock.Any()).Return(true, nil).Times(1) + s.testLibrary.mockPureTaskHandler.EXPECT(). + Execute( + gomock.AssignableToTypeOf(&mutableCtx{}), + gomock.Any(), + gomock.Eq(taskAttributes), + gomock.Any(), + ).Return(nil).Times(1) + + mutations, err = root.CloseTransaction() + s.NoError(err) + s.Len(mutations.UpdatedNodes, 2, "root and subcomponent1 should be updated") + s.Empty(mutations.DeletedNodes) + + // immedidate pure tasks will be executed inline and no physical chasm pure task will be generated. + s.Equal(tasks.MaximumKey.FireTime, s.nodeBackend.LastDeletePureTaskCall()) +} + +func (s *nodeSuite) TestEachPureTask() { + now := s.timeSource.Now() + + mustEncode := func(m proto.Message) *commonpb.DataBlob { + taskBlob, err := serialization.ProtoEncode(m) + s.NoError(err) + return taskBlob + } + + // Set up a tree with expired and unexpired pure tasks. + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + // Expired + TypeId: testPureTaskTypeID, + ScheduledTime: timestamppb.New(now), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 1, + PhysicalTaskStatus: physicalTaskStatusCreated, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("some-random-data-root"), + }), + }, + }, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + // Not expired yet. + ScheduledTime: timestamppb.New(now.Add(time.Hour)), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 2, + PhysicalTaskStatus: physicalTaskStatusCreated, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("some-random-data-sc1"), + }), + }, + }, + }, + }, + }, + }, + "SubComponent1/SubComponent11": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent11TypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + // Expired, and physical task not created + ScheduledTime: timestamppb.New(now), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 3, + PhysicalTaskStatus: physicalTaskStatusNone, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("some-random-data-sc11-1"), + }), + }, + { + TypeId: testPureTaskTypeID, + // Expired, but when processing this task, delete the SubComponent11 itself. + ScheduledTime: timestamppb.New(now), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 4, + PhysicalTaskStatus: physicalTaskStatusCreated, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("some-random-data-sc11-2"), + }), + }, + { + TypeId: testPureTaskTypeID, + // Expired, but should not be executed because previous task deletes SubComponent1 + // (this node's parent). + ScheduledTime: timestamppb.New(now), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 5, + PhysicalTaskStatus: physicalTaskStatusCreated, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("some-random-data-sc11-3"), + }), + }, + }, + }, + }, + }, + }, + "SubComponent2": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent2TypeID, + PureTasks: []*persistencespb.ChasmComponentAttributes_Task{ + { + TypeId: testPureTaskTypeID, + // Expired. However, this task won't be executed because the node is deleted + // when processing the pure task from the root component. + ScheduledTime: timestamppb.New(now), + VersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + VersionedTransitionOffset: 6, + PhysicalTaskStatus: physicalTaskStatusCreated, + Data: mustEncode(&commonpb.Payload{ + Data: []byte("some-random-data-sc2"), + }), + }, + }, + }, + }, + }, + }, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + s.NotNil(root) + + processedTaskData := [][]byte{} + err = root.EachPureTask(now.Add(time.Minute), func(handler NodePureTask, taskAttributes TaskAttributes, task any) (bool, error) { + s.NotNil(handler) + s.NotNil(taskAttributes) + + testPureTask, ok := task.(*TestPureTask) + s.True(ok) + + processedTaskData = append(processedTaskData, testPureTask.Payload.Data) + + // When processing root component task, delete SubComponent2 to verify its task is not executed. + if slices.Equal( + testPureTask.Payload.Data, + []byte("some-random-data-root"), + ) { + mutableContext := NewMutableContext(context.Background(), root) + rootComponent, err := root.Component(mutableContext, ComponentRef{}) + s.NoError(err) + + rootComponent.(*TestComponent).SubComponent2 = NewEmptyField[*TestSubComponent2]() + } + + // When processing task for SubComponent11, delete its parent SubComponent1 so that the remaining task is not executed. + if slices.Equal( + testPureTask.Payload.Data, + []byte("some-random-data-sc11-2"), + ) { + mutableContext := NewMutableContext(context.Background(), root) + rootComponent, err := root.Component(mutableContext, ComponentRef{}) + s.NoError(err) + + rootComponent.(*TestComponent).SubComponent1 = NewEmptyField[*TestSubComponent1]() + } + + return true, nil + }) + s.NoError(err) + s.Equal([][]byte{ + []byte("some-random-data-root"), + []byte("some-random-data-sc11-1"), + []byte("some-random-data-sc11-2"), + }, processedTaskData) + s.Len(root.taskValueCache, 1) // only one task from root component +} + +func (s *nodeSuite) TestExecutePureTask() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + } + + taskAttributes := TaskAttributes{} + pureTask := &TestPureTask{ + Payload: &commonpb.Payload{ + Data: []byte("some-random-data"), + }, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + s.NotNil(root) + ctx := context.Background() + + expectExecute := func(result error) { + s.testLibrary.mockPureTaskHandler.EXPECT(). + Execute( + gomock.AssignableToTypeOf(&mutableCtx{}), + gomock.AssignableToTypeOf(&TestComponent{}), + gomock.Eq(taskAttributes), + gomock.Eq(pureTask), + ).Return(result).Times(1) + } + + expectValidate := func(retValue bool, errValue error) { + s.testLibrary.mockPureTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Eq(taskAttributes), gomock.Any()).Return(retValue, errValue).Times(1) + } + + // Succeed task execution and validation (happy case). + root.setValueState(valueStateSynced) + expectExecute(nil) + expectValidate(true, nil) + executed, err := root.ExecutePureTask(ctx, taskAttributes, pureTask) + s.NoError(err) + s.True(executed) + s.Equal(valueStateNeedSyncStructure, root.valueState) + + expectedErr := errors.New("dummy") + + // Succeed validation, fail execution. + root.setValueState(valueStateSynced) + expectExecute(expectedErr) + expectValidate(true, nil) + _, err = root.ExecutePureTask(ctx, taskAttributes, pureTask) + s.ErrorIs(expectedErr, err) + s.Equal(valueStateNeedSyncStructure, root.valueState) + + // Fail task validation (no execution occurs). + root.setValueState(valueStateSynced) + expectValidate(false, nil) + executed, err = root.ExecutePureTask(ctx, taskAttributes, pureTask) + s.NoError(err) + s.False(executed) + s.Equal(valueStateSynced, root.valueState) // task not executed, so node is clean + + // Error during task validation (no execution occurs). + root.setValueState(valueStateSynced) + expectValidate(false, expectedErr) + _, err = root.ExecutePureTask(ctx, taskAttributes, pureTask) + s.ErrorIs(expectedErr, err) + s.Equal(valueStateSynced, root.valueState) // task not executed, so node is clean +} + +func (s *nodeSuite) TestValidatePureTask() { + taskAttributes := TaskAttributes{} + pureTask := &TestPureTask{ + Payload: &commonpb.Payload{ + Data: []byte("some-random-data"), + }, + } + + root := s.testComponentTree() + _, err := root.CloseTransaction() + s.NoError(err) + + ctx := context.Background() + expectValidate := func(retValue bool, errValue error) { + s.testLibrary.mockPureTaskHandler.EXPECT(). + Validate(gomock.Any(), gomock.Any(), gomock.Eq(taskAttributes), gomock.Any()).Return(retValue, errValue).Times(1) + } + + // Succeed task validation (happy case). + expectValidate(true, nil) + valid, err := root.ValidatePureTask(ctx, taskAttributes, pureTask) + s.NoError(err) + s.True(valid) + s.Equal(valueStateSynced, root.valueState) // node is always clean for task validation + + // Invalid task (validation returns false). + expectValidate(false, nil) + valid, err = root.ValidatePureTask(ctx, taskAttributes, pureTask) + s.NoError(err) + s.False(valid) + s.Equal(valueStateSynced, root.valueState) // node is always clean for task validation + + // Error during task validation (no execution occurs). + expectedErr := errors.New("dummy") + expectValidate(false, expectedErr) + _, err = root.ValidatePureTask(ctx, taskAttributes, pureTask) + s.ErrorIs(expectedErr, err) + s.Equal(valueStateSynced, root.valueState) // node is always clean for task validation + + // Close the root component. + mutableCtx := NewMutableContext(ctx, root) + rootComponent, err := root.ComponentByPath(mutableCtx, rootPath) + s.NoError(err) + rootComponent.(*TestComponent).Complete(mutableCtx) + _, err = root.CloseTransaction() + s.NoError(err) + + // Invalid task for sub-component due to access rule. + subComponent1, ok := root.children["SubComponent1"] + s.True(ok) + valid, err = subComponent1.ValidatePureTask(ctx, taskAttributes, pureTask) + s.NoError(err) + s.False(valid) + s.Equal(valueStateSynced, subComponent1.valueState) // node is always clean for task validation +} + +func (s *nodeSuite) TestExecuteSideEffectTask() { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + }, + }, + }, + }, + } + + taskInfo := &persistencespb.ChasmTaskInfo{ + ComponentInitialVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: 1, + }, + ComponentLastUpdateVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: 1, + }, + Path: []string{"SubComponent1"}, + TypeId: testSideEffectTaskTypeID, + ArchetypeId: testComponentTypeID, + Data: &commonpb.DataBlob{ + Data: nil, + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + }, + } + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + chasmTask := &tasks.ChasmTask{ + WorkflowKey: workflowKey, + VisibilityTimestamp: s.timeSource.Now(), + TaskID: 123, + Category: tasks.CategoryOutbound, + Destination: "destination", + Info: taskInfo, + } + executionKey := ExecutionKey{ + NamespaceID: chasmTask.NamespaceID, + BusinessID: chasmTask.WorkflowID, + RunID: chasmTask.RunID, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + s.NotNil(root) + + mockEngine := NewMockEngine(s.controller) + ctx := NewEngineContext(context.Background(), mockEngine) + + chasmContext := NewMutableContext(ctx, root) + var backendValidtionFnCalled bool + // This won't be called until access time. + dummyValidationFn := func(_ NodeBackend, _ Context, _ Component) error { + backendValidtionFnCalled = true + return nil + } + expectValidate := func(valid bool, validationErr error) { + backendValidtionFnCalled = false + s.testLibrary.mockSideEffectTaskHandler.EXPECT().Validate( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(valid, validationErr).Times(1) + } + expectExecute := func(result error) { + s.testLibrary.mockSideEffectTaskHandler.EXPECT(). + Execute( + gomock.Any(), + gomock.Any(), + gomock.Eq(TaskAttributes{ + chasmTask.GetVisibilityTime(), + chasmTask.Destination, + }), + gomock.Any(), + ).DoAndReturn( + func(_ context.Context, ref ComponentRef, _ TaskAttributes, _ *TestSideEffectTask) error { + s.NotNil(ref.validationFn) + s.Equal(taskInfo.GetArchetypeId(), uint32(ref.archetypeID)) + + // Accessing the Component should trigger the validationFn. + component, err := root.Component(chasmContext, ref) + if err != nil { + return err + } + s.IsType(&TestSubComponent1{}, component) + return result + }).Times(1) + } + + // Succeed task execution. + expectValidate(true, nil) + expectExecute(nil) + err = root.ExecuteSideEffectTask(ctx, executionKey, chasmTask, dummyValidationFn) + s.NoError(err) + s.True(backendValidtionFnCalled) + s.True(chasmTask.DeserializedTask.IsValid()) + + // Invalid task. + expectValidate(false, nil) + expectExecute(nil) + err = root.ExecuteSideEffectTask(ctx, executionKey, chasmTask, dummyValidationFn) + s.Error(err) + s.IsType(&serviceerror.NotFound{}, err) + s.True(chasmTask.DeserializedTask.IsValid()) + + // Failed to validate task. + validationErr := errors.New("validation error") + expectValidate(false, validationErr) + expectExecute(nil) + err = root.ExecuteSideEffectTask(ctx, executionKey, chasmTask, dummyValidationFn) + s.ErrorIs(validationErr, err) + s.False(chasmTask.DeserializedTask.IsValid()) + + // Fail task execution. + expectValidate(true, nil) + executionErr := errors.New("execution error") + expectExecute(executionErr) + err = root.ExecuteSideEffectTask(ctx, executionKey, chasmTask, dummyValidationFn) + s.ErrorIs(executionErr, err) + s.True(backendValidtionFnCalled) + s.False(chasmTask.DeserializedTask.IsValid()) +} + +func (s *nodeSuite) TestExecuteSideEffectDiscardTask() { + setup := func() (*Node, *tasks.ChasmTask, ExecutionKey, context.Context, Context) { + persistenceNodes := map[string]*persistencespb.ChasmNode{ + "": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testComponentTypeID, + }, + }, + }, + }, + "SubComponent1": { + Metadata: &persistencespb.ChasmNodeMetadata{ + InitialVersionedTransition: &persistencespb.VersionedTransition{TransitionCount: 1}, + Attributes: &persistencespb.ChasmNodeMetadata_ComponentAttributes{ + ComponentAttributes: &persistencespb.ChasmComponentAttributes{ + TypeId: testSubComponent1TypeID, + }, + }, + }, + }, + } + + root, err := s.newTestTree(persistenceNodes) + s.NoError(err) + s.NotNil(root) + + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + chasmTask := &tasks.ChasmTask{ + WorkflowKey: workflowKey, + VisibilityTimestamp: s.timeSource.Now(), + TaskID: 123, + Category: tasks.CategoryOutbound, + Destination: "destination", + Info: &persistencespb.ChasmTaskInfo{ + ComponentInitialVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: 1, + }, + ComponentLastUpdateVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: 1, + }, + Path: []string{"SubComponent1"}, + TypeId: testDiscardableSideEffectTaskTypeID, + ArchetypeId: testComponentTypeID, + Data: &commonpb.DataBlob{ + Data: nil, + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + }, + }, + } + executionKey := ExecutionKey{ + NamespaceID: chasmTask.NamespaceID, + BusinessID: chasmTask.WorkflowID, + RunID: chasmTask.RunID, + } + + mockEngine := NewMockEngine(s.controller) + ctx := NewEngineContext(context.Background(), mockEngine) + chasmContext := NewMutableContext(ctx, root) + + return root, chasmTask, executionKey, ctx, chasmContext + } + + s.Run("Success", func() { + root, chasmTask, executionKey, ctx, chasmContext := setup() + + var validationFnCalled bool + dummyValidationFn := func(_ NodeBackend, _ Context, _ Component) error { + validationFnCalled = true + return nil + } + + s.testLibrary.mockDiscardableSideEffectHandler.EXPECT().Validate( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(true, nil).Times(1) + s.testLibrary.mockDiscardableSideEffectHandler.EXPECT().Discard( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).DoAndReturn(func( + _ context.Context, ref ComponentRef, _ TaskAttributes, _ *TestDiscardableSideEffectTask, + ) error { + s.NotNil(ref.validationFn) + s.Equal(chasmTask.Info.GetArchetypeId(), uint32(ref.archetypeID)) + component, err := root.Component(chasmContext, ref) + if err != nil { + return err + } + s.IsType(&TestSubComponent1{}, component) + return nil + }).Times(1) + + err := root.ExecuteSideEffectDiscardTask(ctx, executionKey, chasmTask, dummyValidationFn) + s.NoError(err) + s.True(validationFnCalled) + s.True(chasmTask.DeserializedTask.IsValid()) + }) + + s.Run("InvalidTask", func() { + root, chasmTask, executionKey, ctx, chasmContext := setup() + + s.testLibrary.mockDiscardableSideEffectHandler.EXPECT().Validate( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(false, nil).Times(1) + s.testLibrary.mockDiscardableSideEffectHandler.EXPECT().Discard( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).DoAndReturn(func( + _ context.Context, ref ComponentRef, _ TaskAttributes, _ *TestDiscardableSideEffectTask, + ) error { + _, err := root.Component(chasmContext, ref) + return err + }).Times(1) + + err := root.ExecuteSideEffectDiscardTask(ctx, executionKey, chasmTask, func(_ NodeBackend, _ Context, _ Component) error { return nil }) + s.ErrorAs(err, new(*serviceerror.NotFound)) + }) + + s.Run("ValidationError", func() { + root, chasmTask, executionKey, ctx, chasmContext := setup() + + validationErr := errors.New("validation error") + s.testLibrary.mockDiscardableSideEffectHandler.EXPECT().Validate( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(false, validationErr).Times(1) + s.testLibrary.mockDiscardableSideEffectHandler.EXPECT().Discard( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).DoAndReturn(func( + _ context.Context, ref ComponentRef, _ TaskAttributes, _ *TestDiscardableSideEffectTask, + ) error { + _, err := root.Component(chasmContext, ref) + return err + }).Times(1) + + err := root.ExecuteSideEffectDiscardTask( + ctx, executionKey, chasmTask, func(_ NodeBackend, _ Context, _ Component) error { return nil }) + s.ErrorIs(err, validationErr) + }) + + s.Run("DiscardHandlerError", func() { + root, chasmTask, executionKey, ctx, chasmContext := setup() + + var validationFnCalled bool + dummyValidationFn := func(_ NodeBackend, _ Context, _ Component) error { + validationFnCalled = true + return nil + } + + s.testLibrary.mockDiscardableSideEffectHandler.EXPECT().Validate( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(true, nil).Times(1) + discardErr := errors.New("discard error") + s.testLibrary.mockDiscardableSideEffectHandler.EXPECT().Discard( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).DoAndReturn(func( + _ context.Context, ref ComponentRef, _ TaskAttributes, _ *TestDiscardableSideEffectTask, + ) error { + s.NotNil(ref.validationFn) + if _, err := root.Component(chasmContext, ref); err != nil { + return err + } + return discardErr + }).Times(1) + + err := root.ExecuteSideEffectDiscardTask(ctx, executionKey, chasmTask, dummyValidationFn) + s.ErrorIs(err, discardErr) + s.True(validationFnCalled) + }) +} + +func (s *nodeSuite) TestValidateSideEffectTask() { + taskInfo := &persistencespb.ChasmTaskInfo{ + ComponentInitialVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: 1, + NamespaceFailoverVersion: 1, + }, + ComponentLastUpdateVersionedTransition: &persistencespb.VersionedTransition{ + TransitionCount: 1, + NamespaceFailoverVersion: 1, + }, + Path: rootPath, + TypeId: testSideEffectTaskTypeID, + Data: &commonpb.DataBlob{ + Data: nil, + EncodingType: enumspb.ENCODING_TYPE_PROTO3, + }, + } + workflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + chasmTask := &tasks.ChasmTask{ + WorkflowKey: workflowKey, + VisibilityTimestamp: s.timeSource.Now(), + TaskID: 123, + Category: tasks.CategoryTransfer, + Info: taskInfo, + } + + root := s.testComponentTree() + + mockEngine := NewMockEngine(s.controller) + ctx := NewEngineContext(context.Background(), mockEngine) + + expectValidate := func(componentType any, retValue bool, errValue error) { + s.testLibrary.mockSideEffectTaskHandler.EXPECT(). + Validate( + gomock.AssignableToTypeOf((*immutableCtx)(nil)), + gomock.AssignableToTypeOf(componentType), + gomock.Eq(TaskAttributes{ + ScheduledTime: chasmTask.GetVisibilityTime(), + Destination: chasmTask.Destination, + }), + gomock.AssignableToTypeOf(&TestSideEffectTask{}), + ).Return(retValue, errValue).Times(1) + } + + // Succeed validation as valid. + expectValidate((*TestComponent)(nil), true, nil) + isValid, err := root.ValidateSideEffectTask(ctx, chasmTask) + s.True(isValid) + s.NoError(err) + s.True(chasmTask.DeserializedTask.IsValid()) + + // Succeed validation as invalid. + expectValidate((*TestComponent)(nil), false, nil) + isValid, err = root.ValidateSideEffectTask(ctx, chasmTask) + s.False(isValid) + s.NoError(err) + s.True(chasmTask.DeserializedTask.IsValid()) + + // Fail validation. + expectedErr := errors.New("validation failed") + expectValidate((*TestComponent)(nil), false, expectedErr) + isValid, err = root.ValidateSideEffectTask(ctx, chasmTask) + s.False(isValid) + s.ErrorIs(expectedErr, err) + s.False(chasmTask.DeserializedTask.IsValid()) + + // Succeed validation as valid for a sub component. + childTaskInfo := taskInfo + childTaskInfo.Path = []string{"SubComponent1"} + childWorkflowKey := definition.NewWorkflowKey( + primitives.NewUUID().String(), + primitives.NewUUID().String(), + primitives.NewUUID().String(), + ) + childChasmTask := &tasks.ChasmTask{ + WorkflowKey: childWorkflowKey, + VisibilityTimestamp: s.timeSource.Now(), + TaskID: 124, + Category: tasks.CategoryTransfer, + Info: childTaskInfo, + } + expectValidate((*TestSubComponent1)(nil), true, nil) + isValid, err = root.ValidateSideEffectTask(ctx, childChasmTask) + s.True(isValid) + s.NoError(err) + s.True(childChasmTask.DeserializedTask.IsValid()) + + // Succeed validation as invalid since parent is closed. + mutableCtx := NewMutableContext(ctx, root) + rootComponent, err := root.ComponentByPath(mutableCtx, rootPath) + s.NoError(err) + rootComponent.(*TestComponent).Complete(mutableCtx) + // Note there's also no mock for task validator here in this case. + // Access rule is checked first. + isValid, err = root.ValidateSideEffectTask(ctx, childChasmTask) + s.False(isValid) + s.NoError(err) + s.True(childChasmTask.DeserializedTask.IsValid()) +} + +func (s *nodeSuite) TestAndAllChildren_PathIndependence() { + // Build a tree deep enough to trigger Go's slice capacity doubling. + // append grows cap: 0→1→2→4. At depth 3, the path slice has len=3, cap=4, + // so a 4th append reuses the backing array. If node P at depth 3 has siblings + // S1 and S2 at depth 4, the second sibling's append overwrites S1's path. + // + // Tree: root → A → B → C → {S1, S2} + root := &Node{ + nodeName: "", + children: map[string]*Node{ + "A": {nodeName: "A", children: map[string]*Node{ + "B": {nodeName: "B", children: map[string]*Node{ + "C": {nodeName: "C", children: map[string]*Node{ + "S1": {nodeName: "S1", children: map[string]*Node{}}, + "S2": {nodeName: "S2", children: map[string]*Node{}}, + }}, + }}, + }}, + }, + } + + // Store raw path slices (not copies!) so we can detect mutation. + collected := make(map[string][]string) + for path, node := range root.andAllChildren() { + collected[node.nodeName] = path + } + + // Verify S1/S2 do not have a corrupted path + // because append reused the backing array at depth 3→4. + s.Equal([]string{"A", "B", "C", "S1"}, collected["S1"]) + s.Equal([]string{"A", "B", "C", "S2"}, collected["S2"]) +} + +func (s *nodeSuite) newTestTree( + serializedNodes map[string]*persistencespb.ChasmNode, +) (*Node, error) { + if len(serializedNodes) == 0 { + return NewEmptyTree(s.registry, s.timeSource, s.nodeBackend, s.nodePathEncoder, s.logger, s.metricsHandler), nil + } + return NewTreeFromDB(serializedNodes, s.registry, s.timeSource, s.nodeBackend, s.nodePathEncoder, s.logger, s.metricsHandler) +} diff --git a/chasm/visibility.go b/chasm/visibility.go new file mode 100644 index 00000000000..50f74530ddc --- /dev/null +++ b/chasm/visibility.go @@ -0,0 +1,362 @@ +package chasm + +import ( + "context" + "fmt" + "strings" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/searchattribute/sadefs" + "google.golang.org/protobuf/proto" +) + +const ( + UserMemoKey = "__user__" + ChasmMemoKey = "__chasm__" + + visibilityComponentType = "core.vis" + visibilityTaskType = "core.visTask" +) + +var ( + visibilityComponentTypeID = GenerateTypeID(visibilityComponentType) + visibilityTaskTypeID = GenerateTypeID(visibilityTaskType) +) + +// VisibilitySearchAttributesProvider if implemented by the root Component, +// allows the CHASM framework to automatically determine, at the end of +// a transaction, if a visibility task needs to be generated to update the +// visibility record with the returned search attributes. +type VisibilitySearchAttributesProvider interface { + SearchAttributes(Context) []SearchAttributeKeyValue +} + +// VisibilityMemoProvider if implemented by the root Component, +// allows the CHASM framework to automatically determine, at the end of +// a transaction, if a visibility task needs to be generated to update the +// visibility record with the returned memo. +type VisibilityMemoProvider interface { + Memo(Context) proto.Message +} + +// VisibilitySearchAttributesMapper is a mapper for CHASM search attributes. +type VisibilitySearchAttributesMapper struct { + // map from CHASM and predefined search attribute aliases to field names. + aliasToField map[string]string + fieldToAlias map[string]string + saTypeMap map[string]enumspb.IndexedValueType + + // map from system search attribute aliases to field names. + systemAliasToField map[string]string +} + +// Alias returns the alias for a given field. +func (v *VisibilitySearchAttributesMapper) Alias(field string) (string, error) { + if v == nil { + return "", serviceerror.NewInvalidArgument("visibility search attributes mapper not defined") + } + alias, ok := v.fieldToAlias[field] + if !ok { + return "", serviceerror.NewInvalidArgument(fmt.Sprintf("visibility search attributes mapper has no registered field %q", field)) + } + return alias, nil +} + +// Field returns the field for a given alias. +func (v *VisibilitySearchAttributesMapper) Field(alias string) (string, error) { + if v == nil { + return "", serviceerror.NewInvalidArgument("visibility search attributes mapper not defined") + } + if field, ok := v.aliasToField[alias]; ok { + return field, nil + } + if field, ok := v.resolveSystemAlias(alias); ok { + return field, nil + } + return "", serviceerror.NewInvalidArgument(fmt.Sprintf("visibility search attributes mapper has no registered alias %q", alias)) +} + +// resolveSystemAlias resolves a system search attribute alias to its field name. +// It handles the `Temporal` prefix variations (e.g., "ScheduleId" and "TemporalScheduleId"). +func (v *VisibilitySearchAttributesMapper) resolveSystemAlias(alias string) (string, bool) { + if v.systemAliasToField == nil { + return "", false + } + if field, ok := v.systemAliasToField[alias]; ok { + return field, true + } + // Try without the `Temporal` prefix. + if strings.HasPrefix(alias, sadefs.ReservedPrefix) { + withoutPrefix := alias[len(sadefs.ReservedPrefix):] + if field, ok := v.systemAliasToField[withoutPrefix]; ok { + return field, true + } + } else { + // Try with the `Temporal` prefix. + withPrefix := sadefs.ReservedPrefix + alias + if field, ok := v.systemAliasToField[withPrefix]; ok { + return field, true + } + } + return "", false +} + +// SATypeMap returns the type map for the CHASM search attributes. +func (v *VisibilitySearchAttributesMapper) SATypeMap() map[string]enumspb.IndexedValueType { + if v == nil { + return nil + } + return v.saTypeMap +} + +// ValueType returns the type of a CHASM search attribute field. +// Returns an error if the field is not found in the type map. +func (v *VisibilitySearchAttributesMapper) ValueType(fieldName string) (enumspb.IndexedValueType, error) { + if v == nil { + return enumspb.INDEXED_VALUE_TYPE_UNSPECIFIED, serviceerror.NewInvalidArgument("visibility search attributes mapper not defined") + } + typ, ok := v.saTypeMap[fieldName] + if !ok { + return enumspb.INDEXED_VALUE_TYPE_UNSPECIFIED, serviceerror.NewInvalidArgumentf("visibility search attributes mapper has no registered field %q", fieldName) + } + return typ, nil +} + +type Visibility struct { + UnimplementedComponent + + Data *persistencespb.ChasmVisibilityData + + // Do NOT access those fields directly. + // Use the provided getters and setters instead. + SA Field[*commonpb.SearchAttributes] + Memo Field[*commonpb.Memo] +} + +func NewVisibility( + mutableContext MutableContext, +) *Visibility { + visibility := &Visibility{ + Data: &persistencespb.ChasmVisibilityData{ + TransitionCount: 0, + }, + } + + visibility.generateTask(mutableContext) + return visibility +} + +func NewVisibilityWithData( + mutableContext MutableContext, + customSearchAttributes map[string]*commonpb.Payload, + customMemo map[string]*commonpb.Payload, +) *Visibility { + visibility := &Visibility{ + Data: &persistencespb.ChasmVisibilityData{ + TransitionCount: 0, + }, + } + + // Filter out nil/empty payload values for search attributes. + filteredSA := payload.MergeMapOfPayload(nil, customSearchAttributes) + if len(filteredSA) != 0 { + visibility.SA = NewDataField( + mutableContext, + &commonpb.SearchAttributes{IndexedFields: filteredSA}, + ) + } + if len(customMemo) != 0 { + visibility.Memo = NewDataField( + mutableContext, + &commonpb.Memo{Fields: customMemo}, + ) + } + + visibility.generateTask(mutableContext) + return visibility +} + +func (v *Visibility) LifecycleState(_ Context) LifecycleState { + return LifecycleStateRunning +} + +// CustomSearchAttributes returns the stored custom search attribute fields. +// Nil is returned if there are none. +// +// Returned map is NOT a deep copy of the underlying data, so do NOT modify it +// directly, use Merge/ReplaceCustomSearchAttributes methods instead. +func (v *Visibility) CustomSearchAttributes( + chasmContext Context, +) map[string]*commonpb.Payload { + sa, _ := v.SA.TryGet(chasmContext) + // nil check handled by the proto getter. + return sa.GetIndexedFields() +} + +// MergeCustomSearchAttributes merges the provided custom search attribute fields into the existing ones. +// - If a key in `customSearchAttributes` already exists, +// the value in `customSearchAttributes` replaces the existing value. +// - If a key in `customSearchAttributes` has nil or empty slice payload value, +// the key is deleted from the existing search attributes if it exists. +// If all search attributes are removed, the underlying search attributes node is deleted. +// - If `customSearchAttributes` is empty, this is a no-op. +func (v *Visibility) MergeCustomSearchAttributes( + mutableContext MutableContext, + customSearchAttributes map[string]*commonpb.Payload, +) { + if len(customSearchAttributes) == 0 { + return + } + + currentSA, ok := v.SA.TryGet(mutableContext) + if !ok { + currentSA = &commonpb.SearchAttributes{} + v.SA = NewDataField(mutableContext, currentSA) + } + + currentSA.IndexedFields = payload.MergeMapOfPayload( + currentSA.GetIndexedFields(), + customSearchAttributes, + ) + if len(currentSA.IndexedFields) == 0 { + v.SA = NewEmptyField[*commonpb.SearchAttributes]() + } + + v.generateTask(mutableContext) +} + +// ReplaceCustomSearchAttributes replaces the existing custom search attribute fields with the provided ones. +// Nil/empty payload values are filtered. +// If `customSearchAttributes` is empty or all values are nil after filtering, the underlying search attributes node is deleted. +func (v *Visibility) ReplaceCustomSearchAttributes( + mutableContext MutableContext, + customSearchAttributes map[string]*commonpb.Payload, +) { + // Filter out nil/empty payload values. + filteredSA := payload.MergeMapOfPayload(nil, customSearchAttributes) + + if len(filteredSA) == 0 { + _, ok := v.SA.TryGet(mutableContext) + if !ok { + // Already empty, no-op + return + } + + v.SA = NewEmptyField[*commonpb.SearchAttributes]() + } else { + v.SA = NewDataField( + mutableContext, + &commonpb.SearchAttributes{IndexedFields: filteredSA}, + ) + } + + v.generateTask(mutableContext) +} + +// CustomMemo returns the stored custom memo fields. +// Nil is returned if there are none. +// +// Returned map is NOT a deep copy of the underlying data, so do NOT modify it +// directly, use Merge/ReplaceCustomMemo methods instead. +func (v *Visibility) CustomMemo( + chasmContext Context, +) map[string]*commonpb.Payload { + memo, _ := v.Memo.TryGet(chasmContext) + // nil check handled by the proto getter. + return memo.GetFields() +} + +// MergeCustomMemo merges the provided custom memo fields into the existing ones. +// - If a key in `customMemo` already exists, +// the value in `customMemo` replaces the existing value. +// - If a key in `customMemo` has nil or empty slice payload value, +// the key is deleted from the existing memo if it exists. +// If all memo fields are removed, the underlying memo node is deleted. +// - If `customMemo` is empty, this is a no-op. +func (v *Visibility) MergeCustomMemo( + mutableContext MutableContext, + customMemo map[string]*commonpb.Payload, +) { + if len(customMemo) == 0 { + return + } + + currentMemo, ok := v.Memo.TryGet(mutableContext) + if !ok { + currentMemo = &commonpb.Memo{} + v.Memo = NewDataField(mutableContext, currentMemo) + } + + currentMemo.Fields = payload.MergeMapOfPayload( + currentMemo.GetFields(), + customMemo, + ) + if len(currentMemo.Fields) == 0 { + v.Memo = NewEmptyField[*commonpb.Memo]() + } + v.generateTask(mutableContext) +} + +// ReplaceCustomMemo replaces the existing custom memo fields with the provided ones. +// If `customMemo` is empty, the underlying memo node is deleted. +func (v *Visibility) ReplaceCustomMemo( + mutableContext MutableContext, + customMemo map[string]*commonpb.Payload, +) { + if len(customMemo) == 0 { + _, ok := v.Memo.TryGet(mutableContext) + if !ok { + // Already empty, no-op + return + } + + v.Memo = NewEmptyField[*commonpb.Memo]() + } else { + v.Memo = NewDataField( + mutableContext, + &commonpb.Memo{Fields: customMemo}, + ) + } + + v.generateTask(mutableContext) +} + +func (v *Visibility) generateTask( + mutableContext MutableContext, +) { + v.Data.TransitionCount++ + mutableContext.AddTask( + v, + TaskAttributes{}, + &persistencespb.ChasmVisibilityTaskData{TransitionCount: v.Data.TransitionCount}, + ) +} + +type visibilityTaskHandler struct { + SideEffectTaskHandlerBase[*persistencespb.ChasmVisibilityTaskData] +} + +var defaultVisibilityTaskHandler = &visibilityTaskHandler{} + +func (v *visibilityTaskHandler) Validate( + _ Context, + component *Visibility, + _ TaskAttributes, + task *persistencespb.ChasmVisibilityTaskData, +) (bool, error) { + return task.TransitionCount == component.Data.TransitionCount, nil +} + +func (v *visibilityTaskHandler) Execute( + _ context.Context, + _ ComponentRef, + _ TaskAttributes, + _ *persistencespb.ChasmVisibilityTaskData, +) error { + //nolint:forbidigo + panic("chasm visibilityTaskHandler should not be called directly") +} diff --git a/chasm/visibility_manager.go b/chasm/visibility_manager.go new file mode 100644 index 00000000000..fb36fd8e8b1 --- /dev/null +++ b/chasm/visibility_manager.go @@ -0,0 +1,180 @@ +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination visibility_manager_mock.go + +package chasm + +import ( + "context" + "reflect" + "time" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/api/visibilityservice/v1" + "go.temporal.io/server/common/payload" + "google.golang.org/protobuf/proto" +) + +type VisibilityManager interface { + ListExecutions( + context.Context, + reflect.Type, + *ListExecutionsRequest, + ) (*visibilityservice.ListChasmExecutionsResponse, error) + + CountExecutions( + context.Context, + reflect.Type, + *CountExecutionsRequest, + ) (*visibilityservice.CountChasmExecutionsResponse, error) +} + +type VisibilityExecutionInfo[M proto.Message] struct { + BusinessID string + RunID string + StartTime time.Time + CloseTime time.Time + HistoryLength int64 + HistorySizeBytes int64 + StateTransitionCount int64 + ChasmSearchAttributes SearchAttributesMap + CustomSearchAttributes map[string]*commonpb.Payload + Memo *commonpb.Memo + ChasmMemo M +} + +type ListExecutionsRequest struct { + NamespaceName string + Query string + PageSize int + NextPageToken []byte +} + +type ListExecutionsResponse[M proto.Message] struct { + Executions []*VisibilityExecutionInfo[M] + NextPageToken []byte +} + +type CountExecutionsRequest struct { + NamespaceName string + Query string +} + +type CountExecutionsResponse struct { + Count int64 + Groups []Group +} + +type Group struct { + Values []*commonpb.Payload + Count int64 +} + +// ListExecutions lists the executions of a CHASM archetype given an initial query. +// The query string can specify any combination of CHASM, custom, and predefined/system search attributes. +// The generic parameter C is the CHASM component type used for executions and search attribute filtering. +// The generic parameter M is the type of the memo payload to be unmarshaled from the execution. +// PageSize is required, must be greater than 0. +// NextPageToken is optional, set on subsequent requests to continue listing the next page of executions. +// Note: For CHASM executions, TemporalNamespaceDivision is the predefined search attribute +// that is used to identify the archetype of the execution. +// If the query string does not specify TemporalNamespaceDivision, the archetype C of the request will be used to filter the executions. +// If the initial query already specifies TemporalNamespaceDivision, the archetype C of the request will +// only be used to get the registered SearchAttributes. +func ListExecutions[C Component, M proto.Message]( + ctx context.Context, + request *ListExecutionsRequest, +) (*ListExecutionsResponse[M], error) { + archetypeType := reflect.TypeFor[C]() + response, err := visibilityManagerFromContext(ctx).ListExecutions(ctx, archetypeType, request) + if err != nil { + return nil, err + } + + // Convert response: decode ChasmSearchAttributes and ChasmMemo to type M + executions := make([]*VisibilityExecutionInfo[M], len(response.Executions)) + for i, execution := range response.Executions { + chasmSAs, err := newSearchAttributesMapFromProto(execution.ChasmSearchAttributes) + if err != nil { + return nil, err + } + + chasmMemoInterface := reflect.New(reflect.TypeFor[M]().Elem()).Interface() + chasmMemo, ok := chasmMemoInterface.(M) + if !ok { + return nil, serviceerror.NewInternalf("failed to cast chasm memo to type %s", reflect.TypeFor[M]().String()) + } + if err := payload.Decode(execution.ChasmMemo, chasmMemo); err != nil { + return nil, serviceerror.NewInternalf("failed to decode chasm memo: %v", err) + } + executions[i] = &VisibilityExecutionInfo[M]{ + BusinessID: execution.BusinessId, + RunID: execution.RunId, + StartTime: execution.StartTime.AsTime(), + CloseTime: execution.CloseTime.AsTime(), + HistoryLength: execution.HistoryLength, + HistorySizeBytes: execution.HistorySizeBytes, + StateTransitionCount: execution.StateTransitionCount, + ChasmSearchAttributes: chasmSAs, + CustomSearchAttributes: execution.CustomSearchAttributes.GetIndexedFields(), + Memo: execution.Memo, + ChasmMemo: chasmMemo, + } + } + + return &ListExecutionsResponse[M]{ + Executions: executions, + NextPageToken: response.NextPageToken, + }, nil +} + +// CountExecutions counts the executions of a CHASM archetype given an initial query. +// The generic parameter C is the CHASM component type used for executions and search attribute filtering. +// The query string can specify any combination of CHASM, custom, and predefined/system search attributes. +// Note: For CHASM executions, TemporalNamespaceDivision is the predefined search attribute +// that is used to identify the archetype of the execution. +// If the query string does not specify TemporalNamespaceDivision, the archetype C of the request will be used to count the executions. +// If the initial query already specifies TemporalNamespaceDivision, the archetype C of the request will +// only be used to get the registered SearchAttributes. +func CountExecutions[C Component]( + ctx context.Context, + request *CountExecutionsRequest, +) (*CountExecutionsResponse, error) { + archetypeType := reflect.TypeFor[C]() + visResponse, err := visibilityManagerFromContext(ctx).CountExecutions(ctx, archetypeType, request) + if err != nil { + return nil, err + } + + response := &CountExecutionsResponse{ + Count: visResponse.Count, + Groups: make([]Group, len(visResponse.Groups)), + } + for k, group := range visResponse.Groups { + response.Groups[k] = Group{ + Values: group.GroupValues, + Count: group.Count, + } + } + return response, nil +} + +type visibilityManagerCtxKeyType string + +const visibilityManagerCtxKey visibilityManagerCtxKeyType = "chasmVisibilityManager" + +func NewVisibilityManagerContext( + ctx context.Context, + engine VisibilityManager, +) context.Context { + return context.WithValue(ctx, visibilityManagerCtxKey, engine) +} + +func visibilityManagerFromContext( + ctx context.Context, +) VisibilityManager { + e, ok := ctx.Value(visibilityManagerCtxKey).(VisibilityManager) + if !ok { + return nil + } + return e +} diff --git a/chasm/visibility_manager_mock.go b/chasm/visibility_manager_mock.go new file mode 100644 index 00000000000..4a57c11377c --- /dev/null +++ b/chasm/visibility_manager_mock.go @@ -0,0 +1,72 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: visibility_manager.go +// +// Generated by this command: +// +// mockgen -package chasm -source visibility_manager.go -destination visibility_manager_mock.go +// + +// Package chasm is a generated GoMock package. +package chasm + +import ( + context "context" + reflect "reflect" + + visibilityservice "go.temporal.io/server/api/visibilityservice/v1" + gomock "go.uber.org/mock/gomock" +) + +// MockVisibilityManager is a mock of VisibilityManager interface. +type MockVisibilityManager struct { + ctrl *gomock.Controller + recorder *MockVisibilityManagerMockRecorder + isgomock struct{} +} + +// MockVisibilityManagerMockRecorder is the mock recorder for MockVisibilityManager. +type MockVisibilityManagerMockRecorder struct { + mock *MockVisibilityManager +} + +// NewMockVisibilityManager creates a new mock instance. +func NewMockVisibilityManager(ctrl *gomock.Controller) *MockVisibilityManager { + mock := &MockVisibilityManager{ctrl: ctrl} + mock.recorder = &MockVisibilityManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockVisibilityManager) EXPECT() *MockVisibilityManagerMockRecorder { + return m.recorder +} + +// CountExecutions mocks base method. +func (m *MockVisibilityManager) CountExecutions(arg0 context.Context, arg1 reflect.Type, arg2 *CountExecutionsRequest) (*visibilityservice.CountChasmExecutionsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CountExecutions", arg0, arg1, arg2) + ret0, _ := ret[0].(*visibilityservice.CountChasmExecutionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CountExecutions indicates an expected call of CountExecutions. +func (mr *MockVisibilityManagerMockRecorder) CountExecutions(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CountExecutions", reflect.TypeOf((*MockVisibilityManager)(nil).CountExecutions), arg0, arg1, arg2) +} + +// ListExecutions mocks base method. +func (m *MockVisibilityManager) ListExecutions(arg0 context.Context, arg1 reflect.Type, arg2 *ListExecutionsRequest) (*visibilityservice.ListChasmExecutionsResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListExecutions", arg0, arg1, arg2) + ret0, _ := ret[0].(*visibilityservice.ListChasmExecutionsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListExecutions indicates an expected call of ListExecutions. +func (mr *MockVisibilityManagerMockRecorder) ListExecutions(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListExecutions", reflect.TypeOf((*MockVisibilityManager)(nil).ListExecutions), arg0, arg1, arg2) +} diff --git a/chasm/visibility_task_test.go b/chasm/visibility_task_test.go new file mode 100644 index 00000000000..b991b4e2c4e --- /dev/null +++ b/chasm/visibility_task_test.go @@ -0,0 +1,26 @@ +package chasm + +import ( + "testing" + + "github.com/stretchr/testify/require" + persistencespb "go.temporal.io/server/api/persistence/v1" +) + +func TestTaskValidator(t *testing.T) { + ctx := &MockMutableContext{} + visibility := NewVisibility(ctx) + task := &persistencespb.ChasmVisibilityTaskData{ + TransitionCount: 3, + } + + visibility.Data.TransitionCount = 1 + valid, err := defaultVisibilityTaskHandler.Validate(ctx, visibility, TaskAttributes{}, task) + require.NoError(t, err) + require.False(t, valid) + + visibility.Data.TransitionCount = task.TransitionCount + valid, err = defaultVisibilityTaskHandler.Validate(ctx, visibility, TaskAttributes{}, task) + require.NoError(t, err) + require.True(t, valid) +} diff --git a/chasm/visibility_test.go b/chasm/visibility_test.go new file mode 100644 index 00000000000..4308791e60d --- /dev/null +++ b/chasm/visibility_test.go @@ -0,0 +1,351 @@ +package chasm + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + commonpb "go.temporal.io/api/common/v1" + persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/payload" + "go.temporal.io/server/common/testing/protorequire" +) + +type ( + visibilitySuite struct { + suite.Suite + *require.Assertions + + mockContext *MockContext + mockMutableContext *MockMutableContext + + registry *Registry + + visibility *Visibility + } +) + +func TestVisibilitySuite(t *testing.T) { + suite.Run(t, new(visibilitySuite)) +} + +func (s *visibilitySuite) SetupTest() { + s.initAssertions() + s.mockContext = &MockContext{} + s.mockMutableContext = &MockMutableContext{} + + s.registry = NewRegistry(log.NewTestLogger()) + err := s.registry.Register(&CoreLibrary{}) + s.NoError(err) + + s.visibility = NewVisibility(s.mockMutableContext) + s.Len(s.mockMutableContext.Tasks, 1) + s.mockMutableContext.Tasks = nil // Clear tasks added during creation +} + +func (s *visibilitySuite) SetupSubTest() { + s.initAssertions() +} + +func (s *visibilitySuite) initAssertions() { + s.Assertions = require.New(s.T()) +} + +func (s *visibilitySuite) TestComponentFqType() { + rc, ok := s.registry.ComponentFor(&Visibility{}) + s.True(ok) + s.Equal(visibilityComponentType, rc.FqType()) +} + +func (s *visibilitySuite) TestTaskFqType() { + rc, ok := s.registry.TaskFor(&persistencespb.ChasmVisibilityTaskData{}) + s.True(ok) + s.Equal(visibilityTaskType, rc.FqType()) +} + +func (s *visibilitySuite) TestLifeCycleState() { + s.Equal(LifecycleStateRunning, s.visibility.LifecycleState(s.mockMutableContext)) +} + +func (s *visibilitySuite) TestMergeCustomSearchAttributes() { + sa := s.visibility.CustomSearchAttributes(s.mockMutableContext) + s.Empty(sa) + + stringKey, stringVal := "stringKey", "stringValue" + intKey, intVal := "intKey", 42 + floatKey, floatVal := "floatKey", 3.14 + + // Add SA via Visibility struct method. + s.visibility.MergeCustomSearchAttributes( + s.mockMutableContext, + map[string]*commonpb.Payload{ + stringKey: s.mustEncode(stringVal), + intKey: s.mustEncode(intVal), + floatKey: s.mustEncode(floatVal), + }, + ) + s.Len(s.mockMutableContext.Tasks, 1) + s.assertTaskPayload(2, s.mockMutableContext.Tasks[0].Payload) + + sa = s.visibility.CustomSearchAttributes(s.mockMutableContext) + s.Len(sa, 3) + + var actualStringVal string + err := payload.Decode(sa[stringKey], &actualStringVal) + s.NoError(err) + s.Equal(stringVal, actualStringVal) + + var actualIntVal int + err = payload.Decode(sa[intKey], &actualIntVal) + s.NoError(err) + s.Equal(intVal, actualIntVal) + + var actualFloatVal float64 + err = payload.Decode(sa[floatKey], &actualFloatVal) + s.NoError(err) + s.Equal(floatVal, actualFloatVal) + + // Test remove search attributes by setting payload to nil. + s.visibility.MergeCustomSearchAttributes(s.mockMutableContext, map[string]*commonpb.Payload{ + intKey: s.mustEncode(intVal), + floatKey: nil, + }) + s.NoError(err) + s.Len(s.mockMutableContext.Tasks, 2) + s.assertTaskPayload(3, s.mockMutableContext.Tasks[1].Payload) + + sa = s.visibility.CustomSearchAttributes(s.mockMutableContext) + s.NoError(err) + s.Len(sa, 2, "intKey and stringKey should remain") + + // Test removing all search attributes also removes the node. + s.visibility.MergeCustomSearchAttributes(s.mockMutableContext, map[string]*commonpb.Payload{ + stringKey: nil, + intKey: nil, + }) + s.Len(s.mockMutableContext.Tasks, 3) + s.assertTaskPayload(4, s.mockMutableContext.Tasks[2].Payload) + _, ok := s.visibility.SA.TryGet(s.mockContext) + s.False(ok) + s.Nil(s.visibility.CustomSearchAttributes(s.mockContext)) +} + +func (s *visibilitySuite) TestNewVisibilityWithData_FilterNilSearchAttributes() { + stringKey, stringVal := "stringKey", "stringValue" + // SA with 1 valid and 2 nil values - nil values should be filtered out + customSearchAttributes := map[string]*commonpb.Payload{ + stringKey: s.mustEncode(stringVal), + "nilKey1": nil, + "nilKey2": nil, + } + // Memo with 1 valid and 2 nil values - nil values should NOT be filtered out + customMemo := map[string]*commonpb.Payload{ + stringKey: s.mustEncode(stringVal), + "nilKey1": nil, + "nilKey2": nil, + } + visibility := NewVisibilityWithData(s.mockMutableContext, customSearchAttributes, customMemo) + // SA should have only 1 field (nil values filtered out) + s.Len(visibility.SA.Get(s.mockContext).IndexedFields, 1) + s.NotNil(visibility.SA.Get(s.mockContext).IndexedFields[stringKey]) + // Memo should have all 3 fields (nil values NOT filtered) + s.Len(visibility.Memo.Get(s.mockContext).Fields, 3) + s.NotNil(visibility.Memo.Get(s.mockContext).Fields[stringKey]) +} + +func (s *visibilitySuite) TestReplaceCustomSearchAttributes() { + stringKey, stringVal := "stringKey", "stringValue" + intKey, intVal := "intKey", 42 + floatKey, floatVal := "floatKey", 3.14 + byteKey, byteVal := "byteKey", []byte{0x01, 0x02, 0x03} + + // Set up some initial SA. + s.visibility.ReplaceCustomSearchAttributes( + s.mockMutableContext, + map[string]*commonpb.Payload{ + stringKey: s.mustEncode(stringVal), + intKey: s.mustEncode(intVal), + floatKey: s.mustEncode(floatVal), + }, + ) + s.Len(s.mockMutableContext.Tasks, 1) + s.assertTaskPayload(2, s.mockMutableContext.Tasks[0].Payload) + + sa := s.visibility.CustomSearchAttributes(s.mockMutableContext) + s.Len(sa, 3) + + // Set to a new set of SA, non-existing keys should be removed. + s.visibility.ReplaceCustomSearchAttributes( + s.mockMutableContext, + map[string]*commonpb.Payload{ + floatKey: s.mustEncode(floatVal), + byteKey: s.mustEncode(byteVal), + }, + ) + s.Len(s.mockMutableContext.Tasks, 2) + s.assertTaskPayload(3, s.mockMutableContext.Tasks[1].Payload) + + sa = s.visibility.CustomSearchAttributes(s.mockMutableContext) + s.Len(sa, 2) + + // Setting to an empty map should remove the node. + s.visibility.ReplaceCustomSearchAttributes( + s.mockMutableContext, + map[string]*commonpb.Payload{}, + ) + s.Len(s.mockMutableContext.Tasks, 3) + s.assertTaskPayload(4, s.mockMutableContext.Tasks[2].Payload) + _, ok := s.visibility.SA.TryGet(s.mockContext) + s.False(ok) + s.Nil(s.visibility.CustomSearchAttributes(s.mockContext)) + + // Test that nil values are filtered out during replace. + s.visibility.ReplaceCustomSearchAttributes( + s.mockMutableContext, + map[string]*commonpb.Payload{ + stringKey: s.mustEncode(stringVal), + intKey: nil, // Should be filtered out + }, + ) + s.Len(s.mockMutableContext.Tasks, 4) + s.assertTaskPayload(5, s.mockMutableContext.Tasks[3].Payload) + + sa = s.visibility.CustomSearchAttributes(s.mockMutableContext) + s.Len(sa, 1, "nil values should be filtered out") + s.NotNil(sa[stringKey]) + s.Nil(sa[intKey]) + + // Test that replacing with all nil values removes the node. + s.visibility.ReplaceCustomSearchAttributes( + s.mockMutableContext, + map[string]*commonpb.Payload{ + stringKey: nil, + intKey: nil, + }, + ) + s.Len(s.mockMutableContext.Tasks, 5) + s.assertTaskPayload(6, s.mockMutableContext.Tasks[4].Payload) + _, ok = s.visibility.SA.TryGet(s.mockContext) + s.False(ok) + s.Nil(s.visibility.CustomSearchAttributes(s.mockContext)) +} + +func (s *visibilitySuite) TestMergeCustomMemo() { + memo := s.visibility.CustomMemo(s.mockMutableContext) + s.Empty(memo) + + stringKey, stringVal := "stringKey", "stringValue" + intKey, intVal := "intKey", 42 + floatKey, floatVal := "floatKey", 3.14 + + // Add memo via Visibility struct method. + s.visibility.MergeCustomMemo(s.mockMutableContext, map[string]*commonpb.Payload{ + stringKey: s.mustEncode(stringVal), + intKey: s.mustEncode(intVal), + floatKey: s.mustEncode(floatVal), + }) + s.Len(s.mockMutableContext.Tasks, 1) + s.assertTaskPayload(2, s.mockMutableContext.Tasks[0].Payload) + + memo = s.visibility.CustomMemo(s.mockMutableContext) + s.Len(memo, 3) + + var actualStringVal string + err := payload.Decode(memo[stringKey], &actualStringVal) + s.NoError(err) + s.Equal(stringVal, actualStringVal) + + var actualIntVal int + err = payload.Decode(memo[intKey], &actualIntVal) + s.NoError(err) + s.Equal(intVal, actualIntVal) + + var actualFloatVal float64 + err = payload.Decode(memo[floatKey], &actualFloatVal) + s.NoError(err) + s.Equal(floatVal, actualFloatVal) + + // Test remove memo by setting payload to nil. + s.visibility.MergeCustomMemo(s.mockMutableContext, map[string]*commonpb.Payload{ + intKey: s.mustEncode(intVal), + floatKey: nil, + }) + s.Len(s.mockMutableContext.Tasks, 2) + s.assertTaskPayload(3, s.mockMutableContext.Tasks[1].Payload) + + memo = s.visibility.CustomMemo(s.mockMutableContext) + s.Len(memo, 2, "intKey and stringKey should remain") + + // Test removing all memo fields also removes the node. + s.visibility.MergeCustomMemo(s.mockMutableContext, map[string]*commonpb.Payload{ + stringKey: nil, + intKey: nil, + }) + s.Len(s.mockMutableContext.Tasks, 3) + s.assertTaskPayload(4, s.mockMutableContext.Tasks[2].Payload) + _, ok := s.visibility.Memo.TryGet(s.mockContext) + s.False(ok) + s.Nil(s.visibility.CustomMemo(s.mockContext)) +} + +func (s *visibilitySuite) TestReplaceCustomMemo() { + stringKey, stringVal := "stringKey", "stringValue" + intKey, intVal := "intKey", 42 + floatKey, floatVal := "floatKey", 3.14 + byteKey, byteVal := "byteKey", []byte{0x01, 0x02, 0x03} + + // Set up some initial memo fields. + s.visibility.ReplaceCustomMemo( + s.mockMutableContext, + map[string]*commonpb.Payload{ + stringKey: s.mustEncode(stringVal), + intKey: s.mustEncode(intVal), + floatKey: s.mustEncode(floatVal), + }, + ) + s.Len(s.mockMutableContext.Tasks, 1) + s.assertTaskPayload(2, s.mockMutableContext.Tasks[0].Payload) + + memo := s.visibility.CustomMemo(s.mockMutableContext) + s.Len(memo, 3) + + // Set to a new set of memo fields, non-existing keys should be removed. + s.visibility.ReplaceCustomMemo( + s.mockMutableContext, + map[string]*commonpb.Payload{ + floatKey: s.mustEncode(floatVal), + byteKey: s.mustEncode(byteVal), + }, + ) + s.Len(s.mockMutableContext.Tasks, 2) + s.assertTaskPayload(3, s.mockMutableContext.Tasks[1].Payload) + + memo = s.visibility.CustomMemo(s.mockMutableContext) + s.Len(memo, 2) + + // Setting to an empty map should remove the node. + s.visibility.ReplaceCustomMemo( + s.mockMutableContext, + map[string]*commonpb.Payload{}, + ) + s.Len(s.mockMutableContext.Tasks, 3) + s.assertTaskPayload(4, s.mockMutableContext.Tasks[2].Payload) + _, ok := s.visibility.Memo.TryGet(s.mockContext) + s.False(ok) + s.Nil(s.visibility.CustomMemo(s.mockContext)) +} + +func (s *visibilitySuite) assertTaskPayload(expectedCount int64, taskPayload any) { + protorequire.ProtoEqual( + s.T(), + &persistencespb.ChasmVisibilityTaskData{TransitionCount: expectedCount}, + taskPayload.(*persistencespb.ChasmVisibilityTaskData), + ) +} + +func (s *visibilitySuite) mustEncode(v any) *commonpb.Payload { + p, err := payload.Encode(v) + s.NoError(err) + return p +} diff --git a/chasm/visibility_value.go b/chasm/visibility_value.go new file mode 100644 index 00000000000..0b79d230ec5 --- /dev/null +++ b/chasm/visibility_value.go @@ -0,0 +1,165 @@ +package chasm + +import ( + "fmt" + "slices" + "time" + + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/common/searchattribute/sadefs" +) + +type VisibilityValue interface { + MustEncode() *commonpb.Payload + Equal(VisibilityValue) bool + Value() any +} + +type VisibilityValueInt64 int64 + +func (v VisibilityValueInt64) MustEncode() *commonpb.Payload { + return sadefs.MustEncodeValue(int64(v), enumspb.INDEXED_VALUE_TYPE_INT) +} + +func (v VisibilityValueInt64) Equal(other VisibilityValue) bool { + ov, ok := other.(VisibilityValueInt64) + if !ok { + return false + } + return v == ov +} + +func (v VisibilityValueInt64) Value() any { + return int64(v) +} + +type VisibilityValueKeyword string + +func (v VisibilityValueKeyword) MustEncode() *commonpb.Payload { + return sadefs.MustEncodeValue(string(v), enumspb.INDEXED_VALUE_TYPE_KEYWORD) +} + +func (v VisibilityValueKeyword) Equal(other VisibilityValue) bool { + ov, ok := other.(VisibilityValueKeyword) + if !ok { + return false + } + return v == ov +} + +func (v VisibilityValueKeyword) Value() any { + return string(v) +} + +type VisibilityValueBool bool + +func (v VisibilityValueBool) MustEncode() *commonpb.Payload { + return sadefs.MustEncodeValue(bool(v), enumspb.INDEXED_VALUE_TYPE_BOOL) +} + +func (v VisibilityValueBool) Equal(other VisibilityValue) bool { + ov, ok := other.(VisibilityValueBool) + if !ok { + return false + } + return v == ov +} + +func (v VisibilityValueBool) Value() any { + return bool(v) +} + +type VisibilityValueFloat64 float64 + +func (v VisibilityValueFloat64) MustEncode() *commonpb.Payload { + return sadefs.MustEncodeValue(float64(v), enumspb.INDEXED_VALUE_TYPE_DOUBLE) +} + +func (v VisibilityValueFloat64) Equal(other VisibilityValue) bool { + ov, ok := other.(VisibilityValueFloat64) + if !ok { + return false + } + return v == ov +} + +func (v VisibilityValueFloat64) Value() any { + return float64(v) +} + +type VisibilityValueTime time.Time + +func (v VisibilityValueTime) MustEncode() *commonpb.Payload { + return sadefs.MustEncodeValue(time.Time(v), enumspb.INDEXED_VALUE_TYPE_DATETIME) +} + +func (v VisibilityValueTime) Equal(other VisibilityValue) bool { + ov, ok := other.(VisibilityValueTime) + if !ok { + return false + } + return time.Time(v).Equal(time.Time(ov)) +} + +func (v VisibilityValueTime) Value() any { + return time.Time(v) +} + +type VisibilityValueStringSlice []string + +func (v VisibilityValueStringSlice) MustEncode() *commonpb.Payload { + return sadefs.MustEncodeValue([]string(v), enumspb.INDEXED_VALUE_TYPE_KEYWORD_LIST) +} + +func (v VisibilityValueStringSlice) Equal(other VisibilityValue) bool { + ov, ok := other.(VisibilityValueStringSlice) + if !ok { + return false + } + return slices.Equal(v, ov) +} + +func (v VisibilityValueStringSlice) Value() any { + return []string(v) +} + +func isVisibilityValueEqual(v1, v2 VisibilityValue) bool { + if v1 == nil && v2 == nil { + return true + } + if v1 == nil || v2 == nil { + return false + } + return v1.Equal(v2) +} + +// visibilityValueFromPayload decoded payload based on type set in its metadata. +func visibilityValueFromPayload(payload *commonpb.Payload) (VisibilityValue, error) { + value, err := sadefs.DecodeValue(payload, enumspb.INDEXED_VALUE_TYPE_UNSPECIFIED, false) + if err != nil { + return nil, err + } + + switch val := value.(type) { + case int64: + return VisibilityValueInt64(val), nil + case float64: + return VisibilityValueFloat64(val), nil + case bool: + return VisibilityValueBool(val), nil + case time.Time: + return VisibilityValueTime(val), nil + case string: + // Try to parse as datetime first + if parsedTime, err := time.Parse(time.RFC3339, val); err == nil { + return VisibilityValueTime(parsedTime), nil + } + return VisibilityValueKeyword(val), nil + case []string: + return VisibilityValueStringSlice(val), nil + default: + // this should never happen given that DecodeValue did not return an error + return nil, fmt.Errorf("unexpected search attribute value type %T", value) + } +} diff --git a/chasm/visibility_value_test.go b/chasm/visibility_value_test.go new file mode 100644 index 00000000000..08c187a32bc --- /dev/null +++ b/chasm/visibility_value_test.go @@ -0,0 +1,120 @@ +package chasm + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/common/payload" +) + +func TestVisibilityValue(t *testing.T) { + t.Run("Int64", func(t *testing.T) { + v := VisibilityValueInt64(9876543210) + p := v.MustEncode() + require.NotNil(t, p) + + var out int64 + err := payload.Decode(p, &out) + require.NoError(t, err) + require.Equal(t, int64(9876543210), out) + + require.True(t, v.Equal(VisibilityValueInt64(9876543210))) + require.False(t, v.Equal(VisibilityValueInt64(9876543211))) + }) + + t.Run("String", func(t *testing.T) { + v := VisibilityValueKeyword("hello, 世界") + p := v.MustEncode() + require.NotNil(t, p) + + var out string + err := payload.Decode(p, &out) + require.NoError(t, err) + require.Equal(t, "hello, 世界", out) + + require.True(t, v.Equal(VisibilityValueKeyword("hello, 世界"))) + require.False(t, v.Equal(VisibilityValueKeyword("hello"))) + require.False(t, v.Equal(VisibilityValueBool(true))) + }) + + t.Run("Bool", func(t *testing.T) { + v := VisibilityValueBool(true) + p := v.MustEncode() + require.NotNil(t, p) + + var out bool + err := payload.Decode(p, &out) + require.NoError(t, err) + require.Equal(t, true, out) + + require.True(t, v.Equal(VisibilityValueBool(true))) + require.False(t, v.Equal(VisibilityValueBool(false))) + require.False(t, v.Equal(VisibilityValueKeyword("true"))) + }) + + t.Run("Float64", func(t *testing.T) { + v := VisibilityValueFloat64(3.14159) + p := v.MustEncode() + require.NotNil(t, p) + + var out float64 + err := payload.Decode(p, &out) + require.NoError(t, err) + require.InDelta(t, 3.14159, out, 1e-9) + + require.True(t, v.Equal(VisibilityValueFloat64(3.14159))) + require.False(t, v.Equal(VisibilityValueFloat64(2.71828))) + }) + + t.Run("StringSlice", func(t *testing.T) { + v := VisibilityValueStringSlice([]string{"a", "b", "c"}) + p := v.MustEncode() + require.NotNil(t, p) + + var out []string + err := payload.Decode(p, &out) + require.NoError(t, err) + require.Equal(t, []string{"a", "b", "c"}, out) + + require.True(t, v.Equal(VisibilityValueStringSlice([]string{"a", "b", "c"}))) + require.False(t, v.Equal(VisibilityValueStringSlice([]string{"a", "c", "b"}))) + require.False(t, v.Equal(VisibilityValueStringSlice([]string{"a", "b"}))) + require.False(t, v.Equal(VisibilityValueKeyword("[a b c]"))) + }) + + // Time + t.Run("Time", func(t *testing.T) { + // Use a fixed UTC time for deterministic comparison + base := time.Date(2025, 9, 28, 12, 34, 56, 789000000, time.UTC) + v := VisibilityValueTime(base) + p := v.MustEncode() + require.NotNil(t, p) + + var out time.Time + err := payload.Decode(p, &out) + require.NoError(t, err) + require.True(t, base.Equal(out)) + + require.True(t, v.Equal(VisibilityValueTime(base))) + require.False(t, v.Equal(VisibilityValueTime(base.Add(time.Second)))) + require.False(t, v.Equal(VisibilityValueKeyword(base.String()))) + }) +} + +func TestIsVisibilityValueEqual(t *testing.T) { + // nil vs nil + require.True(t, isVisibilityValueEqual(nil, nil)) + + // one nil + require.False(t, isVisibilityValueEqual(VisibilityValueInt64(1), nil)) + require.False(t, isVisibilityValueEqual(nil, VisibilityValueInt64(1))) + + // equal values + require.True(t, isVisibilityValueEqual(VisibilityValueKeyword("x"), VisibilityValueKeyword("x"))) + require.True(t, isVisibilityValueEqual(VisibilityValueInt64(5), VisibilityValueInt64(5))) + + // not equal values + require.False(t, isVisibilityValueEqual(VisibilityValueInt64(5), VisibilityValueInt64(6))) + require.False(t, isVisibilityValueEqual(VisibilityValueInt64(5), VisibilityValueFloat64(5))) +} diff --git a/chasm/workflow.go b/chasm/workflow.go new file mode 100644 index 00000000000..7b6a1e2be72 --- /dev/null +++ b/chasm/workflow.go @@ -0,0 +1,11 @@ +package chasm + +const ( + WorkflowLibraryName = "workflow" + WorkflowComponentName = "workflow" +) + +var ( + WorkflowArchetype = FullyQualifiedName(WorkflowLibraryName, WorkflowComponentName) + WorkflowArchetypeID = GenerateTypeID(WorkflowArchetype) +) diff --git a/client/admin/client.go b/client/admin/client.go index 812fc4605f8..e6bb0b01bec 100644 --- a/client/admin/client.go +++ b/client/admin/client.go @@ -1,29 +1,5 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Generates all three generated files in this package: -//go:generate go run ../../cmd/tools/rpcwrappers -service admin +//go:generate go run ../../cmd/tools/genrpcwrappers -service admin package admin @@ -31,10 +7,9 @@ import ( "context" "time" - "google.golang.org/grpc" - "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/common/debug" + "google.golang.org/grpc" ) var _ adminservice.AdminServiceClient = (*clientImpl)(nil) diff --git a/client/admin/client_gen.go b/client/admin/client_gen.go index 2654b9598ad..5a6a1d16fb9 100644 --- a/client/admin/client_gen.go +++ b/client/admin/client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package admin @@ -83,6 +59,16 @@ func (c *clientImpl) CloseShard( return c.client.CloseShard(ctx, request, opts...) } +func (c *clientImpl) DeepHealthCheck( + ctx context.Context, + request *adminservice.DeepHealthCheckRequest, + opts ...grpc.CallOption, +) (*adminservice.DeepHealthCheckResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DeepHealthCheck(ctx, request, opts...) +} + func (c *clientImpl) DeleteWorkflowExecution( ctx context.Context, request *adminservice.DeleteWorkflowExecutionRequest, @@ -133,6 +119,36 @@ func (c *clientImpl) DescribeMutableState( return c.client.DescribeMutableState(ctx, request, opts...) } +func (c *clientImpl) DescribeTaskQueuePartition( + ctx context.Context, + request *adminservice.DescribeTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*adminservice.DescribeTaskQueuePartitionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeTaskQueuePartition(ctx, request, opts...) +} + +func (c *clientImpl) ForceUnloadTaskQueuePartition( + ctx context.Context, + request *adminservice.ForceUnloadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*adminservice.ForceUnloadTaskQueuePartitionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ForceUnloadTaskQueuePartition(ctx, request, opts...) +} + +func (c *clientImpl) GenerateLastHistoryReplicationTasks( + ctx context.Context, + request *adminservice.GenerateLastHistoryReplicationTasksRequest, + opts ...grpc.CallOption, +) (*adminservice.GenerateLastHistoryReplicationTasksResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.GenerateLastHistoryReplicationTasks(ctx, request, opts...) +} + func (c *clientImpl) GetDLQMessages( ctx context.Context, request *adminservice.GetDLQMessagesRequest, @@ -223,6 +239,16 @@ func (c *clientImpl) GetTaskQueueTasks( return c.client.GetTaskQueueTasks(ctx, request, opts...) } +func (c *clientImpl) GetTaskQueueUserData( + ctx context.Context, + request *adminservice.GetTaskQueueUserDataRequest, + opts ...grpc.CallOption, +) (*adminservice.GetTaskQueueUserDataResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.GetTaskQueueUserData(ctx, request, opts...) +} + func (c *clientImpl) GetWorkflowExecutionRawHistory( ctx context.Context, request *adminservice.GetWorkflowExecutionRawHistoryRequest, @@ -313,6 +339,16 @@ func (c *clientImpl) MergeDLQTasks( return c.client.MergeDLQTasks(ctx, request, opts...) } +func (c *clientImpl) MigrateSchedule( + ctx context.Context, + request *adminservice.MigrateScheduleRequest, + opts ...grpc.CallOption, +) (*adminservice.MigrateScheduleResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.MigrateSchedule(ctx, request, opts...) +} + func (c *clientImpl) PurgeDLQMessages( ctx context.Context, request *adminservice.PurgeDLQMessagesRequest, @@ -402,3 +438,23 @@ func (c *clientImpl) ResendReplicationTasks( defer cancel() return c.client.ResendReplicationTasks(ctx, request, opts...) } + +func (c *clientImpl) StartAdminBatchOperation( + ctx context.Context, + request *adminservice.StartAdminBatchOperationRequest, + opts ...grpc.CallOption, +) (*adminservice.StartAdminBatchOperationResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.StartAdminBatchOperation(ctx, request, opts...) +} + +func (c *clientImpl) SyncWorkflowState( + ctx context.Context, + request *adminservice.SyncWorkflowStateRequest, + opts ...grpc.CallOption, +) (*adminservice.SyncWorkflowStateResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.SyncWorkflowState(ctx, request, opts...) +} diff --git a/client/admin/metric_client.go b/client/admin/metric_client.go index edfedda65c9..a53eb707667 100644 --- a/client/admin/metric_client.go +++ b/client/admin/metric_client.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package admin import ( @@ -29,13 +5,12 @@ import ( "time" "go.temporal.io/api/serviceerror" - "google.golang.org/grpc" - "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" + "google.golang.org/grpc" ) var _ adminservice.AdminServiceClient = (*metricClient)(nil) @@ -82,7 +57,8 @@ func (c *metricClient) finishMetricsRecording( *serviceerror.QueryFailed, *serviceerror.NamespaceNotFound, *serviceerror.WorkflowNotReady, - *serviceerror.WorkflowExecutionAlreadyStarted: + *serviceerror.WorkflowExecutionAlreadyStarted, + *serviceerror.ResourceExhausted: // noop - not interest and too many logs default: c.throttledLogger.Info("admin client encountered error", tag.Error(err), tag.ServiceErrorType(err)) diff --git a/client/admin/metric_client_gen.go b/client/admin/metric_client_gen.go index 4a049b3ddd4..7a0cf00737b 100644 --- a/client/admin/metric_client_gen.go +++ b/client/admin/metric_client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package admin @@ -103,6 +79,20 @@ func (c *metricClient) CloseShard( return c.client.CloseShard(ctx, request, opts...) } +func (c *metricClient) DeepHealthCheck( + ctx context.Context, + request *adminservice.DeepHealthCheckRequest, + opts ...grpc.CallOption, +) (_ *adminservice.DeepHealthCheckResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "AdminClientDeepHealthCheck") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeepHealthCheck(ctx, request, opts...) +} + func (c *metricClient) DeleteWorkflowExecution( ctx context.Context, request *adminservice.DeleteWorkflowExecutionRequest, @@ -173,6 +163,48 @@ func (c *metricClient) DescribeMutableState( return c.client.DescribeMutableState(ctx, request, opts...) } +func (c *metricClient) DescribeTaskQueuePartition( + ctx context.Context, + request *adminservice.DescribeTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (_ *adminservice.DescribeTaskQueuePartitionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "AdminClientDescribeTaskQueuePartition") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeTaskQueuePartition(ctx, request, opts...) +} + +func (c *metricClient) ForceUnloadTaskQueuePartition( + ctx context.Context, + request *adminservice.ForceUnloadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (_ *adminservice.ForceUnloadTaskQueuePartitionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "AdminClientForceUnloadTaskQueuePartition") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ForceUnloadTaskQueuePartition(ctx, request, opts...) +} + +func (c *metricClient) GenerateLastHistoryReplicationTasks( + ctx context.Context, + request *adminservice.GenerateLastHistoryReplicationTasksRequest, + opts ...grpc.CallOption, +) (_ *adminservice.GenerateLastHistoryReplicationTasksResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "AdminClientGenerateLastHistoryReplicationTasks") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.GenerateLastHistoryReplicationTasks(ctx, request, opts...) +} + func (c *metricClient) GetDLQMessages( ctx context.Context, request *adminservice.GetDLQMessagesRequest, @@ -299,6 +331,20 @@ func (c *metricClient) GetTaskQueueTasks( return c.client.GetTaskQueueTasks(ctx, request, opts...) } +func (c *metricClient) GetTaskQueueUserData( + ctx context.Context, + request *adminservice.GetTaskQueueUserDataRequest, + opts ...grpc.CallOption, +) (_ *adminservice.GetTaskQueueUserDataResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "AdminClientGetTaskQueueUserData") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.GetTaskQueueUserData(ctx, request, opts...) +} + func (c *metricClient) GetWorkflowExecutionRawHistory( ctx context.Context, request *adminservice.GetWorkflowExecutionRawHistoryRequest, @@ -425,6 +471,20 @@ func (c *metricClient) MergeDLQTasks( return c.client.MergeDLQTasks(ctx, request, opts...) } +func (c *metricClient) MigrateSchedule( + ctx context.Context, + request *adminservice.MigrateScheduleRequest, + opts ...grpc.CallOption, +) (_ *adminservice.MigrateScheduleResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "AdminClientMigrateSchedule") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.MigrateSchedule(ctx, request, opts...) +} + func (c *metricClient) PurgeDLQMessages( ctx context.Context, request *adminservice.PurgeDLQMessagesRequest, @@ -550,3 +610,31 @@ func (c *metricClient) ResendReplicationTasks( return c.client.ResendReplicationTasks(ctx, request, opts...) } + +func (c *metricClient) StartAdminBatchOperation( + ctx context.Context, + request *adminservice.StartAdminBatchOperationRequest, + opts ...grpc.CallOption, +) (_ *adminservice.StartAdminBatchOperationResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "AdminClientStartAdminBatchOperation") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.StartAdminBatchOperation(ctx, request, opts...) +} + +func (c *metricClient) SyncWorkflowState( + ctx context.Context, + request *adminservice.SyncWorkflowStateRequest, + opts ...grpc.CallOption, +) (_ *adminservice.SyncWorkflowStateResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "AdminClientSyncWorkflowState") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.SyncWorkflowState(ctx, request, opts...) +} diff --git a/client/admin/retryable_client.go b/client/admin/retryable_client.go index 51c9b91a3ab..fababa48dea 100644 --- a/client/admin/retryable_client.go +++ b/client/admin/retryable_client.go @@ -1,36 +1,11 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package admin import ( "context" - "google.golang.org/grpc" - "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/common/backoff" + "google.golang.org/grpc" ) var _ adminservice.AdminServiceClient = (*retryableClient)(nil) diff --git a/client/admin/retryable_client_gen.go b/client/admin/retryable_client_gen.go index 018136132cf..ad12cd45fb9 100644 --- a/client/admin/retryable_client_gen.go +++ b/client/admin/retryable_client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package admin @@ -110,6 +86,21 @@ func (c *retryableClient) CloseShard( return resp, err } +func (c *retryableClient) DeepHealthCheck( + ctx context.Context, + request *adminservice.DeepHealthCheckRequest, + opts ...grpc.CallOption, +) (*adminservice.DeepHealthCheckResponse, error) { + var resp *adminservice.DeepHealthCheckResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeepHealthCheck(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DeleteWorkflowExecution( ctx context.Context, request *adminservice.DeleteWorkflowExecutionRequest, @@ -185,6 +176,51 @@ func (c *retryableClient) DescribeMutableState( return resp, err } +func (c *retryableClient) DescribeTaskQueuePartition( + ctx context.Context, + request *adminservice.DescribeTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*adminservice.DescribeTaskQueuePartitionResponse, error) { + var resp *adminservice.DescribeTaskQueuePartitionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeTaskQueuePartition(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) ForceUnloadTaskQueuePartition( + ctx context.Context, + request *adminservice.ForceUnloadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*adminservice.ForceUnloadTaskQueuePartitionResponse, error) { + var resp *adminservice.ForceUnloadTaskQueuePartitionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ForceUnloadTaskQueuePartition(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) GenerateLastHistoryReplicationTasks( + ctx context.Context, + request *adminservice.GenerateLastHistoryReplicationTasksRequest, + opts ...grpc.CallOption, +) (*adminservice.GenerateLastHistoryReplicationTasksResponse, error) { + var resp *adminservice.GenerateLastHistoryReplicationTasksResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.GenerateLastHistoryReplicationTasks(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) GetDLQMessages( ctx context.Context, request *adminservice.GetDLQMessagesRequest, @@ -320,6 +356,21 @@ func (c *retryableClient) GetTaskQueueTasks( return resp, err } +func (c *retryableClient) GetTaskQueueUserData( + ctx context.Context, + request *adminservice.GetTaskQueueUserDataRequest, + opts ...grpc.CallOption, +) (*adminservice.GetTaskQueueUserDataResponse, error) { + var resp *adminservice.GetTaskQueueUserDataResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.GetTaskQueueUserData(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) GetWorkflowExecutionRawHistory( ctx context.Context, request *adminservice.GetWorkflowExecutionRawHistoryRequest, @@ -455,6 +506,21 @@ func (c *retryableClient) MergeDLQTasks( return resp, err } +func (c *retryableClient) MigrateSchedule( + ctx context.Context, + request *adminservice.MigrateScheduleRequest, + opts ...grpc.CallOption, +) (*adminservice.MigrateScheduleResponse, error) { + var resp *adminservice.MigrateScheduleResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.MigrateSchedule(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) PurgeDLQMessages( ctx context.Context, request *adminservice.PurgeDLQMessagesRequest, @@ -589,3 +655,33 @@ func (c *retryableClient) ResendReplicationTasks( err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } + +func (c *retryableClient) StartAdminBatchOperation( + ctx context.Context, + request *adminservice.StartAdminBatchOperationRequest, + opts ...grpc.CallOption, +) (*adminservice.StartAdminBatchOperationResponse, error) { + var resp *adminservice.StartAdminBatchOperationResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.StartAdminBatchOperation(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) SyncWorkflowState( + ctx context.Context, + request *adminservice.SyncWorkflowStateRequest, + opts ...grpc.CallOption, +) (*adminservice.SyncWorkflowStateResponse, error) { + var resp *adminservice.SyncWorkflowStateResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.SyncWorkflowState(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} diff --git a/client/client_bean.go b/client/client_bean.go index 7f677cd7566..855243debe4 100644 --- a/client/client_bean.go +++ b/client/client_bean.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../LICENSE -package $GOPACKAGE -source $GOFILE -destination client_bean_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination client_bean_mock.go package client @@ -33,8 +9,6 @@ import ( "go.temporal.io/api/serviceerror" "go.temporal.io/api/workflowservice/v1" - "google.golang.org/grpc" - "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" @@ -43,6 +17,7 @@ import ( "go.temporal.io/server/client/history" "go.temporal.io/server/client/matching" "go.temporal.io/server/common/cluster" + "google.golang.org/grpc" ) type ( @@ -52,7 +27,6 @@ type ( GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) GetFrontendClient() workflowservice.WorkflowServiceClient GetRemoteAdminClient(string) (adminservice.AdminServiceClient, error) - SetRemoteAdminClient(string, adminservice.AdminServiceClient) GetRemoteFrontendClient(string) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient, error) } @@ -108,27 +82,6 @@ func NewClientBean(factory Factory, clusterMetadata cluster.Metadata) (Bean, err WorkflowServiceClient: client, } - for clusterName, info := range clusterMetadata.GetAllClusterInfo() { - if !info.Enabled || clusterName == currentClusterName { - continue - } - adminClient = factory.NewRemoteAdminClientWithTimeout( - info.RPCAddress, - admin.DefaultTimeout, - admin.DefaultLargeTimeout, - ) - conn, client = factory.NewRemoteFrontendClientWithTimeout( - info.RPCAddress, - frontend.DefaultTimeout, - frontend.DefaultLongPollTimeout, - ) - adminClients[clusterName] = adminClient - frontendClients[clusterName] = frontendClient{ - connection: conn, - WorkflowServiceClient: client, - } - } - bean := &clientBeanImpl{ factory: factory, historyClient: historyClient, @@ -213,16 +166,6 @@ func (h *clientBeanImpl) GetRemoteAdminClient(cluster string) (adminservice.Admi return client, nil } -func (h *clientBeanImpl) SetRemoteAdminClient( - cluster string, - client adminservice.AdminServiceClient, -) { - h.adminClientsLock.Lock() - defer h.adminClientsLock.Unlock() - - h.adminClients[cluster] = client -} - func (h *clientBeanImpl) GetRemoteFrontendClient(clusterName string) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient, error) { h.frontendClientsLock.RLock() client, ok := h.frontendClients[clusterName] @@ -267,13 +210,6 @@ func (h *clientBeanImpl) GetRemoteFrontendClient(clusterName string) (grpc.Clien return client.connection, client, nil } -func (h *clientBeanImpl) setRemoteAdminClientLocked( - cluster string, - client adminservice.AdminServiceClient, -) { - h.adminClients[cluster] = client -} - func (h *clientBeanImpl) lazyInitMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) { h.Lock() defer h.Unlock() diff --git a/client/client_bean_mock.go b/client/client_bean_mock.go index 85979a3a471..00e938be97f 100644 --- a/client/client_bean_mock.go +++ b/client/client_bean_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: client_bean.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package client -source client_bean.go -destination client_bean_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: client_bean.go // Package client is a generated GoMock package. package client @@ -31,11 +12,11 @@ package client import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" - v1 "go.temporal.io/api/workflowservice/v1" - v10 "go.temporal.io/server/api/adminservice/v1" - v11 "go.temporal.io/server/api/historyservice/v1" - v12 "go.temporal.io/server/api/matchingservice/v1" + workflowservice "go.temporal.io/api/workflowservice/v1" + adminservice "go.temporal.io/server/api/adminservice/v1" + historyservice "go.temporal.io/server/api/historyservice/v1" + matchingservice "go.temporal.io/server/api/matchingservice/v1" + gomock "go.uber.org/mock/gomock" grpc "google.golang.org/grpc" ) @@ -43,6 +24,7 @@ import ( type MockBean struct { ctrl *gomock.Controller recorder *MockBeanMockRecorder + isgomock struct{} } // MockBeanMockRecorder is the mock recorder for MockBean. @@ -63,10 +45,10 @@ func (m *MockBean) EXPECT() *MockBeanMockRecorder { } // GetFrontendClient mocks base method. -func (m *MockBean) GetFrontendClient() v1.WorkflowServiceClient { +func (m *MockBean) GetFrontendClient() workflowservice.WorkflowServiceClient { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetFrontendClient") - ret0, _ := ret[0].(v1.WorkflowServiceClient) + ret0, _ := ret[0].(workflowservice.WorkflowServiceClient) return ret0 } @@ -77,10 +59,10 @@ func (mr *MockBeanMockRecorder) GetFrontendClient() *gomock.Call { } // GetHistoryClient mocks base method. -func (m *MockBean) GetHistoryClient() v11.HistoryServiceClient { +func (m *MockBean) GetHistoryClient() historyservice.HistoryServiceClient { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetHistoryClient") - ret0, _ := ret[0].(v11.HistoryServiceClient) + ret0, _ := ret[0].(historyservice.HistoryServiceClient) return ret0 } @@ -91,59 +73,47 @@ func (mr *MockBeanMockRecorder) GetHistoryClient() *gomock.Call { } // GetMatchingClient mocks base method. -func (m *MockBean) GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (v12.MatchingServiceClient, error) { +func (m *MockBean) GetMatchingClient(namespaceIDToName NamespaceIDToNameFunc) (matchingservice.MatchingServiceClient, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetMatchingClient", namespaceIDToName) - ret0, _ := ret[0].(v12.MatchingServiceClient) + ret0, _ := ret[0].(matchingservice.MatchingServiceClient) ret1, _ := ret[1].(error) return ret0, ret1 } // GetMatchingClient indicates an expected call of GetMatchingClient. -func (mr *MockBeanMockRecorder) GetMatchingClient(namespaceIDToName interface{}) *gomock.Call { +func (mr *MockBeanMockRecorder) GetMatchingClient(namespaceIDToName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMatchingClient", reflect.TypeOf((*MockBean)(nil).GetMatchingClient), namespaceIDToName) } // GetRemoteAdminClient mocks base method. -func (m *MockBean) GetRemoteAdminClient(arg0 string) (v10.AdminServiceClient, error) { +func (m *MockBean) GetRemoteAdminClient(arg0 string) (adminservice.AdminServiceClient, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetRemoteAdminClient", arg0) - ret0, _ := ret[0].(v10.AdminServiceClient) + ret0, _ := ret[0].(adminservice.AdminServiceClient) ret1, _ := ret[1].(error) return ret0, ret1 } // GetRemoteAdminClient indicates an expected call of GetRemoteAdminClient. -func (mr *MockBeanMockRecorder) GetRemoteAdminClient(arg0 interface{}) *gomock.Call { +func (mr *MockBeanMockRecorder) GetRemoteAdminClient(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRemoteAdminClient", reflect.TypeOf((*MockBean)(nil).GetRemoteAdminClient), arg0) } // GetRemoteFrontendClient mocks base method. -func (m *MockBean) GetRemoteFrontendClient(arg0 string) (grpc.ClientConnInterface, v1.WorkflowServiceClient, error) { +func (m *MockBean) GetRemoteFrontendClient(arg0 string) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetRemoteFrontendClient", arg0) ret0, _ := ret[0].(grpc.ClientConnInterface) - ret1, _ := ret[1].(v1.WorkflowServiceClient) + ret1, _ := ret[1].(workflowservice.WorkflowServiceClient) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // GetRemoteFrontendClient indicates an expected call of GetRemoteFrontendClient. -func (mr *MockBeanMockRecorder) GetRemoteFrontendClient(arg0 interface{}) *gomock.Call { +func (mr *MockBeanMockRecorder) GetRemoteFrontendClient(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRemoteFrontendClient", reflect.TypeOf((*MockBean)(nil).GetRemoteFrontendClient), arg0) } - -// SetRemoteAdminClient mocks base method. -func (m *MockBean) SetRemoteAdminClient(arg0 string, arg1 v10.AdminServiceClient) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SetRemoteAdminClient", arg0, arg1) -} - -// SetRemoteAdminClient indicates an expected call of SetRemoteAdminClient. -func (mr *MockBeanMockRecorder) SetRemoteAdminClient(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetRemoteAdminClient", reflect.TypeOf((*MockBean)(nil).SetRemoteAdminClient), arg0, arg1) -} diff --git a/client/client_factory_mock.go b/client/client_factory_mock.go index 6b379758c20..705fa442a40 100644 --- a/client/client_factory_mock.go +++ b/client/client_factory_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: clientfactory.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package client -source clientfactory.go -destination client_factory_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: clientfactory.go // Package client is a generated GoMock package. package client @@ -32,16 +13,17 @@ import ( reflect "reflect" time "time" - gomock "github.com/golang/mock/gomock" - v1 "go.temporal.io/api/workflowservice/v1" - v10 "go.temporal.io/server/api/adminservice/v1" - v11 "go.temporal.io/server/api/historyservice/v1" - v12 "go.temporal.io/server/api/matchingservice/v1" + workflowservice "go.temporal.io/api/workflowservice/v1" + adminservice "go.temporal.io/server/api/adminservice/v1" + historyservice "go.temporal.io/server/api/historyservice/v1" + matchingservice "go.temporal.io/server/api/matchingservice/v1" common "go.temporal.io/server/common" dynamicconfig "go.temporal.io/server/common/dynamicconfig" log "go.temporal.io/server/common/log" membership "go.temporal.io/server/common/membership" metrics "go.temporal.io/server/common/metrics" + testhooks "go.temporal.io/server/common/testing/testhooks" + gomock "go.uber.org/mock/gomock" grpc "google.golang.org/grpc" ) @@ -49,6 +31,7 @@ import ( type MockFactory struct { ctrl *gomock.Controller recorder *MockFactoryMockRecorder + isgomock struct{} } // MockFactoryMockRecorder is the mock recorder for MockFactory. @@ -69,91 +52,91 @@ func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { } // NewHistoryClientWithTimeout mocks base method. -func (m *MockFactory) NewHistoryClientWithTimeout(timeout time.Duration) (v11.HistoryServiceClient, error) { +func (m *MockFactory) NewHistoryClientWithTimeout(timeout time.Duration) (historyservice.HistoryServiceClient, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewHistoryClientWithTimeout", timeout) - ret0, _ := ret[0].(v11.HistoryServiceClient) + ret0, _ := ret[0].(historyservice.HistoryServiceClient) ret1, _ := ret[1].(error) return ret0, ret1 } // NewHistoryClientWithTimeout indicates an expected call of NewHistoryClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewHistoryClientWithTimeout(timeout interface{}) *gomock.Call { +func (mr *MockFactoryMockRecorder) NewHistoryClientWithTimeout(timeout any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewHistoryClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewHistoryClientWithTimeout), timeout) } // NewLocalAdminClientWithTimeout mocks base method. -func (m *MockFactory) NewLocalAdminClientWithTimeout(timeout, largeTimeout time.Duration) (v10.AdminServiceClient, error) { +func (m *MockFactory) NewLocalAdminClientWithTimeout(timeout, largeTimeout time.Duration) (adminservice.AdminServiceClient, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewLocalAdminClientWithTimeout", timeout, largeTimeout) - ret0, _ := ret[0].(v10.AdminServiceClient) + ret0, _ := ret[0].(adminservice.AdminServiceClient) ret1, _ := ret[1].(error) return ret0, ret1 } // NewLocalAdminClientWithTimeout indicates an expected call of NewLocalAdminClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewLocalAdminClientWithTimeout(timeout, largeTimeout interface{}) *gomock.Call { +func (mr *MockFactoryMockRecorder) NewLocalAdminClientWithTimeout(timeout, largeTimeout any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLocalAdminClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewLocalAdminClientWithTimeout), timeout, largeTimeout) } // NewLocalFrontendClientWithTimeout mocks base method. -func (m *MockFactory) NewLocalFrontendClientWithTimeout(timeout, longPollTimeout time.Duration) (grpc.ClientConnInterface, v1.WorkflowServiceClient, error) { +func (m *MockFactory) NewLocalFrontendClientWithTimeout(timeout, longPollTimeout time.Duration) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewLocalFrontendClientWithTimeout", timeout, longPollTimeout) ret0, _ := ret[0].(grpc.ClientConnInterface) - ret1, _ := ret[1].(v1.WorkflowServiceClient) + ret1, _ := ret[1].(workflowservice.WorkflowServiceClient) ret2, _ := ret[2].(error) return ret0, ret1, ret2 } // NewLocalFrontendClientWithTimeout indicates an expected call of NewLocalFrontendClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewLocalFrontendClientWithTimeout(timeout, longPollTimeout interface{}) *gomock.Call { +func (mr *MockFactoryMockRecorder) NewLocalFrontendClientWithTimeout(timeout, longPollTimeout any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewLocalFrontendClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewLocalFrontendClientWithTimeout), timeout, longPollTimeout) } // NewMatchingClientWithTimeout mocks base method. -func (m *MockFactory) NewMatchingClientWithTimeout(namespaceIDToName NamespaceIDToNameFunc, timeout, longPollTimeout time.Duration) (v12.MatchingServiceClient, error) { +func (m *MockFactory) NewMatchingClientWithTimeout(namespaceIDToName NamespaceIDToNameFunc, timeout, longPollTimeout time.Duration) (matchingservice.MatchingServiceClient, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewMatchingClientWithTimeout", namespaceIDToName, timeout, longPollTimeout) - ret0, _ := ret[0].(v12.MatchingServiceClient) + ret0, _ := ret[0].(matchingservice.MatchingServiceClient) ret1, _ := ret[1].(error) return ret0, ret1 } // NewMatchingClientWithTimeout indicates an expected call of NewMatchingClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewMatchingClientWithTimeout(namespaceIDToName, timeout, longPollTimeout interface{}) *gomock.Call { +func (mr *MockFactoryMockRecorder) NewMatchingClientWithTimeout(namespaceIDToName, timeout, longPollTimeout any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewMatchingClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewMatchingClientWithTimeout), namespaceIDToName, timeout, longPollTimeout) } // NewRemoteAdminClientWithTimeout mocks base method. -func (m *MockFactory) NewRemoteAdminClientWithTimeout(rpcAddress string, timeout, largeTimeout time.Duration) v10.AdminServiceClient { +func (m *MockFactory) NewRemoteAdminClientWithTimeout(rpcAddress string, timeout, largeTimeout time.Duration) adminservice.AdminServiceClient { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewRemoteAdminClientWithTimeout", rpcAddress, timeout, largeTimeout) - ret0, _ := ret[0].(v10.AdminServiceClient) + ret0, _ := ret[0].(adminservice.AdminServiceClient) return ret0 } // NewRemoteAdminClientWithTimeout indicates an expected call of NewRemoteAdminClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewRemoteAdminClientWithTimeout(rpcAddress, timeout, largeTimeout interface{}) *gomock.Call { +func (mr *MockFactoryMockRecorder) NewRemoteAdminClientWithTimeout(rpcAddress, timeout, largeTimeout any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemoteAdminClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewRemoteAdminClientWithTimeout), rpcAddress, timeout, largeTimeout) } // NewRemoteFrontendClientWithTimeout mocks base method. -func (m *MockFactory) NewRemoteFrontendClientWithTimeout(rpcAddress string, timeout, longPollTimeout time.Duration) (grpc.ClientConnInterface, v1.WorkflowServiceClient) { +func (m *MockFactory) NewRemoteFrontendClientWithTimeout(rpcAddress string, timeout, longPollTimeout time.Duration) (grpc.ClientConnInterface, workflowservice.WorkflowServiceClient) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewRemoteFrontendClientWithTimeout", rpcAddress, timeout, longPollTimeout) ret0, _ := ret[0].(grpc.ClientConnInterface) - ret1, _ := ret[1].(v1.WorkflowServiceClient) + ret1, _ := ret[1].(workflowservice.WorkflowServiceClient) return ret0, ret1 } // NewRemoteFrontendClientWithTimeout indicates an expected call of NewRemoteFrontendClientWithTimeout. -func (mr *MockFactoryMockRecorder) NewRemoteFrontendClientWithTimeout(rpcAddress, timeout, longPollTimeout interface{}) *gomock.Call { +func (mr *MockFactoryMockRecorder) NewRemoteFrontendClientWithTimeout(rpcAddress, timeout, longPollTimeout any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewRemoteFrontendClientWithTimeout", reflect.TypeOf((*MockFactory)(nil).NewRemoteFrontendClientWithTimeout), rpcAddress, timeout, longPollTimeout) } @@ -162,6 +145,7 @@ func (mr *MockFactoryMockRecorder) NewRemoteFrontendClientWithTimeout(rpcAddress type MockFactoryProvider struct { ctrl *gomock.Controller recorder *MockFactoryProviderMockRecorder + isgomock struct{} } // MockFactoryProviderMockRecorder is the mock recorder for MockFactoryProvider. @@ -182,15 +166,15 @@ func (m *MockFactoryProvider) EXPECT() *MockFactoryProviderMockRecorder { } // NewFactory mocks base method. -func (m *MockFactoryProvider) NewFactory(rpcFactory common.RPCFactory, monitor membership.Monitor, metricsHandler metrics.Handler, dc *dynamicconfig.Collection, numberOfHistoryShards int32, logger, throttledLogger log.Logger) Factory { +func (m *MockFactoryProvider) NewFactory(rpcFactory common.RPCFactory, monitor membership.Monitor, metricsHandler metrics.Handler, dc *dynamicconfig.Collection, testHooks testhooks.TestHooks, numberOfHistoryShards int32, logger, throttledLogger log.Logger) Factory { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "NewFactory", rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger) + ret := m.ctrl.Call(m, "NewFactory", rpcFactory, monitor, metricsHandler, dc, testHooks, numberOfHistoryShards, logger, throttledLogger) ret0, _ := ret[0].(Factory) return ret0 } // NewFactory indicates an expected call of NewFactory. -func (mr *MockFactoryProviderMockRecorder) NewFactory(rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger interface{}) *gomock.Call { +func (mr *MockFactoryProviderMockRecorder) NewFactory(rpcFactory, monitor, metricsHandler, dc, testHooks, numberOfHistoryShards, logger, throttledLogger any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFactory", reflect.TypeOf((*MockFactoryProvider)(nil).NewFactory), rpcFactory, monitor, metricsHandler, dc, numberOfHistoryShards, logger, throttledLogger) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewFactory", reflect.TypeOf((*MockFactoryProvider)(nil).NewFactory), rpcFactory, monitor, metricsHandler, dc, testHooks, numberOfHistoryShards, logger, throttledLogger) } diff --git a/client/clientfactory.go b/client/clientfactory.go index 69650a5b74c..d0aa5eb10f7 100644 --- a/client/clientfactory.go +++ b/client/clientfactory.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../LICENSE -package $GOPACKAGE -source $GOFILE -destination client_factory_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination client_factory_mock.go package client @@ -30,8 +6,6 @@ import ( "time" "go.temporal.io/api/workflowservice/v1" - "google.golang.org/grpc" - "go.temporal.io/server/api/adminservice/v1" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/matchingservice/v1" @@ -46,6 +20,8 @@ import ( "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/testing/testhooks" + "google.golang.org/grpc" ) type ( @@ -66,6 +42,7 @@ type ( monitor membership.Monitor, metricsHandler metrics.Handler, dc *dynamicconfig.Collection, + testHooks testhooks.TestHooks, numberOfHistoryShards int32, logger log.Logger, throttledLogger log.Logger, @@ -80,6 +57,7 @@ type ( monitor membership.Monitor metricsHandler metrics.Handler dynConfig *dynamicconfig.Collection + testHooks testhooks.TestHooks numberOfHistoryShards int32 logger log.Logger throttledLogger log.Logger @@ -104,6 +82,7 @@ func (p *factoryProviderImpl) NewFactory( monitor membership.Monitor, metricsHandler metrics.Handler, dc *dynamicconfig.Collection, + testHooks testhooks.TestHooks, numberOfHistoryShards int32, logger log.Logger, throttledLogger log.Logger, @@ -113,6 +92,7 @@ func (p *factoryProviderImpl) NewFactory( monitor: monitor, metricsHandler: metricsHandler, dynConfig: dc, + testHooks: testHooks, numberOfHistoryShards: numberOfHistoryShards, logger: logger, throttledLogger: throttledLogger, @@ -124,7 +104,6 @@ func (cf *rpcClientFactory) NewHistoryClientWithTimeout(timeout time.Duration) ( if err != nil { return nil, err } - client := history.NewClient( cf.dynConfig, resolver, @@ -150,15 +129,18 @@ func (cf *rpcClientFactory) NewMatchingClientWithTimeout( } keyResolver := newServiceKeyResolver(resolver) - clientProvider := func(clientKey string) (interface{}, error) { - connection := cf.rpcFactory.CreateInternodeGRPCConnection(clientKey) + clientProvider := func(clientKey string) (any, error) { + connection := cf.rpcFactory.CreateMatchingGRPCConnection(clientKey) return matchingservice.NewMatchingServiceClient(connection), nil } client := matching.NewClient( timeout, longPollTimeout, common.NewClientCache(keyResolver, clientProvider), - matching.NewLoadBalancer(namespaceIDToName, cf.dynConfig), + cf.metricsHandler, + cf.logger, + matching.NewLoadBalancer(namespaceIDToName, cf.dynConfig, cf.testHooks), + dynamicconfig.MatchingSpreadRoutingBatchSize.Get(cf.dynConfig), ) if cf.metricsHandler != nil { @@ -236,12 +218,18 @@ func newServiceKeyResolver(resolver membership.ServiceResolver) *serviceKeyResol } } -func (r *serviceKeyResolverImpl) Lookup(key string) (string, error) { - host, err := r.resolver.Lookup(key) - if err != nil { - return "", err +// Lookup returns the address for a node within a batch. key contains the key (including batch +// number), and index is the index within the batch. If not using batches, index should be 0. +// Note that Lookup(key) and LookupN(key, n)[0] are equal. +func (r *serviceKeyResolverImpl) Lookup(key string, index int) (string, error) { + hosts := r.resolver.LookupN(key, index+1) + if len(hosts) == 0 { + return "", membership.ErrInsufficientHosts + } + if index >= len(hosts) { + index %= len(hosts) } - return host.GetAddress(), nil + return hosts[index].GetAddress(), nil } func (r *serviceKeyResolverImpl) GetAllAddresses() ([]string, error) { diff --git a/client/frontend/client.go b/client/frontend/client.go index d7a6476fa97..9bdf3febd89 100644 --- a/client/frontend/client.go +++ b/client/frontend/client.go @@ -1,29 +1,5 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Generates all three generated files in this package: -//go:generate go run ../../cmd/tools/rpcwrappers -service frontend +//go:generate go run ../../cmd/tools/genrpcwrappers -service frontend package frontend @@ -32,7 +8,6 @@ import ( "time" "go.temporal.io/api/workflowservice/v1" - "go.temporal.io/server/common/debug" ) diff --git a/client/frontend/client_gen.go b/client/frontend/client_gen.go index 2c6deccf264..c72793f75b2 100644 --- a/client/frontend/client_gen.go +++ b/client/frontend/client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package frontend @@ -33,6 +9,36 @@ import ( "google.golang.org/grpc" ) +func (c *clientImpl) CountActivityExecutions( + ctx context.Context, + request *workflowservice.CountActivityExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.CountActivityExecutionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.CountActivityExecutions(ctx, request, opts...) +} + +func (c *clientImpl) CountNexusOperationExecutions( + ctx context.Context, + request *workflowservice.CountNexusOperationExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.CountNexusOperationExecutionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.CountNexusOperationExecutions(ctx, request, opts...) +} + +func (c *clientImpl) CountSchedules( + ctx context.Context, + request *workflowservice.CountSchedulesRequest, + opts ...grpc.CallOption, +) (*workflowservice.CountSchedulesResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.CountSchedules(ctx, request, opts...) +} + func (c *clientImpl) CountWorkflowExecutions( ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest, @@ -53,6 +59,56 @@ func (c *clientImpl) CreateSchedule( return c.client.CreateSchedule(ctx, request, opts...) } +func (c *clientImpl) CreateWorkerDeployment( + ctx context.Context, + request *workflowservice.CreateWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.CreateWorkerDeploymentResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.CreateWorkerDeployment(ctx, request, opts...) +} + +func (c *clientImpl) CreateWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.CreateWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.CreateWorkerDeploymentVersionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.CreateWorkerDeploymentVersion(ctx, request, opts...) +} + +func (c *clientImpl) CreateWorkflowRule( + ctx context.Context, + request *workflowservice.CreateWorkflowRuleRequest, + opts ...grpc.CallOption, +) (*workflowservice.CreateWorkflowRuleResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.CreateWorkflowRule(ctx, request, opts...) +} + +func (c *clientImpl) DeleteActivityExecution( + ctx context.Context, + request *workflowservice.DeleteActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DeleteActivityExecution(ctx, request, opts...) +} + +func (c *clientImpl) DeleteNexusOperationExecution( + ctx context.Context, + request *workflowservice.DeleteNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteNexusOperationExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DeleteNexusOperationExecution(ctx, request, opts...) +} + func (c *clientImpl) DeleteSchedule( ctx context.Context, request *workflowservice.DeleteScheduleRequest, @@ -63,6 +119,26 @@ func (c *clientImpl) DeleteSchedule( return c.client.DeleteSchedule(ctx, request, opts...) } +func (c *clientImpl) DeleteWorkerDeployment( + ctx context.Context, + request *workflowservice.DeleteWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteWorkerDeploymentResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DeleteWorkerDeployment(ctx, request, opts...) +} + +func (c *clientImpl) DeleteWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.DeleteWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteWorkerDeploymentVersionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DeleteWorkerDeploymentVersion(ctx, request, opts...) +} + func (c *clientImpl) DeleteWorkflowExecution( ctx context.Context, request *workflowservice.DeleteWorkflowExecutionRequest, @@ -73,6 +149,16 @@ func (c *clientImpl) DeleteWorkflowExecution( return c.client.DeleteWorkflowExecution(ctx, request, opts...) } +func (c *clientImpl) DeleteWorkflowRule( + ctx context.Context, + request *workflowservice.DeleteWorkflowRuleRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteWorkflowRuleResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DeleteWorkflowRule(ctx, request, opts...) +} + func (c *clientImpl) DeprecateNamespace( ctx context.Context, request *workflowservice.DeprecateNamespaceRequest, @@ -83,6 +169,16 @@ func (c *clientImpl) DeprecateNamespace( return c.client.DeprecateNamespace(ctx, request, opts...) } +func (c *clientImpl) DescribeActivityExecution( + ctx context.Context, + request *workflowservice.DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) DescribeBatchOperation( ctx context.Context, request *workflowservice.DescribeBatchOperationRequest, @@ -93,6 +189,16 @@ func (c *clientImpl) DescribeBatchOperation( return c.client.DescribeBatchOperation(ctx, request, opts...) } +func (c *clientImpl) DescribeDeployment( + ctx context.Context, + request *workflowservice.DescribeDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeDeploymentResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeDeployment(ctx, request, opts...) +} + func (c *clientImpl) DescribeNamespace( ctx context.Context, request *workflowservice.DescribeNamespaceRequest, @@ -103,6 +209,16 @@ func (c *clientImpl) DescribeNamespace( return c.client.DescribeNamespace(ctx, request, opts...) } +func (c *clientImpl) DescribeNexusOperationExecution( + ctx context.Context, + request *workflowservice.DescribeNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeNexusOperationExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeNexusOperationExecution(ctx, request, opts...) +} + func (c *clientImpl) DescribeSchedule( ctx context.Context, request *workflowservice.DescribeScheduleRequest, @@ -123,6 +239,36 @@ func (c *clientImpl) DescribeTaskQueue( return c.client.DescribeTaskQueue(ctx, request, opts...) } +func (c *clientImpl) DescribeWorker( + ctx context.Context, + request *workflowservice.DescribeWorkerRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeWorkerResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeWorker(ctx, request, opts...) +} + +func (c *clientImpl) DescribeWorkerDeployment( + ctx context.Context, + request *workflowservice.DescribeWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeWorkerDeploymentResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeWorkerDeployment(ctx, request, opts...) +} + +func (c *clientImpl) DescribeWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.DescribeWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeWorkerDeploymentVersionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeWorkerDeploymentVersion(ctx, request, opts...) +} + func (c *clientImpl) DescribeWorkflowExecution( ctx context.Context, request *workflowservice.DescribeWorkflowExecutionRequest, @@ -133,6 +279,36 @@ func (c *clientImpl) DescribeWorkflowExecution( return c.client.DescribeWorkflowExecution(ctx, request, opts...) } +func (c *clientImpl) DescribeWorkflowRule( + ctx context.Context, + request *workflowservice.DescribeWorkflowRuleRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeWorkflowRuleResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.DescribeWorkflowRule(ctx, request, opts...) +} + +func (c *clientImpl) ExecuteMultiOperation( + ctx context.Context, + request *workflowservice.ExecuteMultiOperationRequest, + opts ...grpc.CallOption, +) (*workflowservice.ExecuteMultiOperationResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ExecuteMultiOperation(ctx, request, opts...) +} + +func (c *clientImpl) FetchWorkerConfig( + ctx context.Context, + request *workflowservice.FetchWorkerConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.FetchWorkerConfigResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.FetchWorkerConfig(ctx, request, opts...) +} + func (c *clientImpl) GetClusterInfo( ctx context.Context, request *workflowservice.GetClusterInfoRequest, @@ -143,6 +319,26 @@ func (c *clientImpl) GetClusterInfo( return c.client.GetClusterInfo(ctx, request, opts...) } +func (c *clientImpl) GetCurrentDeployment( + ctx context.Context, + request *workflowservice.GetCurrentDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.GetCurrentDeploymentResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.GetCurrentDeployment(ctx, request, opts...) +} + +func (c *clientImpl) GetDeploymentReachability( + ctx context.Context, + request *workflowservice.GetDeploymentReachabilityRequest, + opts ...grpc.CallOption, +) (*workflowservice.GetDeploymentReachabilityResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.GetDeploymentReachability(ctx, request, opts...) +} + func (c *clientImpl) GetSearchAttributes( ctx context.Context, request *workflowservice.GetSearchAttributesRequest, @@ -183,6 +379,16 @@ func (c *clientImpl) GetWorkerTaskReachability( return c.client.GetWorkerTaskReachability(ctx, request, opts...) } +func (c *clientImpl) GetWorkerVersioningRules( + ctx context.Context, + request *workflowservice.GetWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (*workflowservice.GetWorkerVersioningRulesResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.GetWorkerVersioningRules(ctx, request, opts...) +} + func (c *clientImpl) GetWorkflowExecutionHistory( ctx context.Context, request *workflowservice.GetWorkflowExecutionHistoryRequest, @@ -203,6 +409,16 @@ func (c *clientImpl) GetWorkflowExecutionHistoryReverse( return c.client.GetWorkflowExecutionHistoryReverse(ctx, request, opts...) } +func (c *clientImpl) ListActivityExecutions( + ctx context.Context, + request *workflowservice.ListActivityExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListActivityExecutionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ListActivityExecutions(ctx, request, opts...) +} + func (c *clientImpl) ListArchivedWorkflowExecutions( ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest, @@ -233,6 +449,16 @@ func (c *clientImpl) ListClosedWorkflowExecutions( return c.client.ListClosedWorkflowExecutions(ctx, request, opts...) } +func (c *clientImpl) ListDeployments( + ctx context.Context, + request *workflowservice.ListDeploymentsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListDeploymentsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ListDeployments(ctx, request, opts...) +} + func (c *clientImpl) ListNamespaces( ctx context.Context, request *workflowservice.ListNamespacesRequest, @@ -243,6 +469,16 @@ func (c *clientImpl) ListNamespaces( return c.client.ListNamespaces(ctx, request, opts...) } +func (c *clientImpl) ListNexusOperationExecutions( + ctx context.Context, + request *workflowservice.ListNexusOperationExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListNexusOperationExecutionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ListNexusOperationExecutions(ctx, request, opts...) +} + func (c *clientImpl) ListOpenWorkflowExecutions( ctx context.Context, request *workflowservice.ListOpenWorkflowExecutionsRequest, @@ -283,6 +519,26 @@ func (c *clientImpl) ListTaskQueuePartitions( return c.client.ListTaskQueuePartitions(ctx, request, opts...) } +func (c *clientImpl) ListWorkerDeployments( + ctx context.Context, + request *workflowservice.ListWorkerDeploymentsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListWorkerDeploymentsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ListWorkerDeployments(ctx, request, opts...) +} + +func (c *clientImpl) ListWorkers( + ctx context.Context, + request *workflowservice.ListWorkersRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListWorkersResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ListWorkers(ctx, request, opts...) +} + func (c *clientImpl) ListWorkflowExecutions( ctx context.Context, request *workflowservice.ListWorkflowExecutionsRequest, @@ -293,6 +549,16 @@ func (c *clientImpl) ListWorkflowExecutions( return c.client.ListWorkflowExecutions(ctx, request, opts...) } +func (c *clientImpl) ListWorkflowRules( + ctx context.Context, + request *workflowservice.ListWorkflowRulesRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListWorkflowRulesResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ListWorkflowRules(ctx, request, opts...) +} + func (c *clientImpl) PatchSchedule( ctx context.Context, request *workflowservice.PatchScheduleRequest, @@ -303,6 +569,46 @@ func (c *clientImpl) PatchSchedule( return c.client.PatchSchedule(ctx, request, opts...) } +func (c *clientImpl) PauseActivity( + ctx context.Context, + request *workflowservice.PauseActivityRequest, + opts ...grpc.CallOption, +) (*workflowservice.PauseActivityResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.PauseActivity(ctx, request, opts...) +} + +func (c *clientImpl) PauseActivityExecution( + ctx context.Context, + request *workflowservice.PauseActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PauseActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.PauseActivityExecution(ctx, request, opts...) +} + +func (c *clientImpl) PauseWorkflowExecution( + ctx context.Context, + request *workflowservice.PauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PauseWorkflowExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.PauseWorkflowExecution(ctx, request, opts...) +} + +func (c *clientImpl) PollActivityExecution( + ctx context.Context, + request *workflowservice.PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PollActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.PollActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) PollActivityTaskQueue( ctx context.Context, request *workflowservice.PollActivityTaskQueueRequest, @@ -313,6 +619,16 @@ func (c *clientImpl) PollActivityTaskQueue( return c.client.PollActivityTaskQueue(ctx, request, opts...) } +func (c *clientImpl) PollNexusOperationExecution( + ctx context.Context, + request *workflowservice.PollNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PollNexusOperationExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.PollNexusOperationExecution(ctx, request, opts...) +} + func (c *clientImpl) PollNexusTaskQueue( ctx context.Context, request *workflowservice.PollNexusTaskQueueRequest, @@ -373,6 +689,16 @@ func (c *clientImpl) RecordActivityTaskHeartbeatById( return c.client.RecordActivityTaskHeartbeatById(ctx, request, opts...) } +func (c *clientImpl) RecordWorkerHeartbeat( + ctx context.Context, + request *workflowservice.RecordWorkerHeartbeatRequest, + opts ...grpc.CallOption, +) (*workflowservice.RecordWorkerHeartbeatResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.RecordWorkerHeartbeat(ctx, request, opts...) +} + func (c *clientImpl) RegisterNamespace( ctx context.Context, request *workflowservice.RegisterNamespaceRequest, @@ -383,6 +709,26 @@ func (c *clientImpl) RegisterNamespace( return c.client.RegisterNamespace(ctx, request, opts...) } +func (c *clientImpl) RequestCancelActivityExecution( + ctx context.Context, + request *workflowservice.RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.RequestCancelActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.RequestCancelActivityExecution(ctx, request, opts...) +} + +func (c *clientImpl) RequestCancelNexusOperationExecution( + ctx context.Context, + request *workflowservice.RequestCancelNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.RequestCancelNexusOperationExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.RequestCancelNexusOperationExecution(ctx, request, opts...) +} + func (c *clientImpl) RequestCancelWorkflowExecution( ctx context.Context, request *workflowservice.RequestCancelWorkflowExecutionRequest, @@ -393,6 +739,26 @@ func (c *clientImpl) RequestCancelWorkflowExecution( return c.client.RequestCancelWorkflowExecution(ctx, request, opts...) } +func (c *clientImpl) ResetActivity( + ctx context.Context, + request *workflowservice.ResetActivityRequest, + opts ...grpc.CallOption, +) (*workflowservice.ResetActivityResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ResetActivity(ctx, request, opts...) +} + +func (c *clientImpl) ResetActivityExecution( + ctx context.Context, + request *workflowservice.ResetActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.ResetActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ResetActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) ResetStickyTaskQueue( ctx context.Context, request *workflowservice.ResetStickyTaskQueueRequest, @@ -533,6 +899,56 @@ func (c *clientImpl) ScanWorkflowExecutions( return c.client.ScanWorkflowExecutions(ctx, request, opts...) } +func (c *clientImpl) SetCurrentDeployment( + ctx context.Context, + request *workflowservice.SetCurrentDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.SetCurrentDeploymentResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.SetCurrentDeployment(ctx, request, opts...) +} + +func (c *clientImpl) SetWorkerDeploymentCurrentVersion( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentCurrentVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.SetWorkerDeploymentCurrentVersionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.SetWorkerDeploymentCurrentVersion(ctx, request, opts...) +} + +func (c *clientImpl) SetWorkerDeploymentManager( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentManagerRequest, + opts ...grpc.CallOption, +) (*workflowservice.SetWorkerDeploymentManagerResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.SetWorkerDeploymentManager(ctx, request, opts...) +} + +func (c *clientImpl) SetWorkerDeploymentRampingVersion( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentRampingVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.SetWorkerDeploymentRampingVersionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.SetWorkerDeploymentRampingVersion(ctx, request, opts...) +} + +func (c *clientImpl) ShutdownWorker( + ctx context.Context, + request *workflowservice.ShutdownWorkerRequest, + opts ...grpc.CallOption, +) (*workflowservice.ShutdownWorkerResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ShutdownWorker(ctx, request, opts...) +} + func (c *clientImpl) SignalWithStartWorkflowExecution( ctx context.Context, request *workflowservice.SignalWithStartWorkflowExecutionRequest, @@ -553,6 +969,16 @@ func (c *clientImpl) SignalWorkflowExecution( return c.client.SignalWorkflowExecution(ctx, request, opts...) } +func (c *clientImpl) StartActivityExecution( + ctx context.Context, + request *workflowservice.StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.StartActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.StartActivityExecution(ctx, request, opts...) +} + func (c *clientImpl) StartBatchOperation( ctx context.Context, request *workflowservice.StartBatchOperationRequest, @@ -563,6 +989,16 @@ func (c *clientImpl) StartBatchOperation( return c.client.StartBatchOperation(ctx, request, opts...) } +func (c *clientImpl) StartNexusOperationExecution( + ctx context.Context, + request *workflowservice.StartNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.StartNexusOperationExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.StartNexusOperationExecution(ctx, request, opts...) +} + func (c *clientImpl) StartWorkflowExecution( ctx context.Context, request *workflowservice.StartWorkflowExecutionRequest, @@ -583,6 +1019,26 @@ func (c *clientImpl) StopBatchOperation( return c.client.StopBatchOperation(ctx, request, opts...) } +func (c *clientImpl) TerminateActivityExecution( + ctx context.Context, + request *workflowservice.TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.TerminateActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.TerminateActivityExecution(ctx, request, opts...) +} + +func (c *clientImpl) TerminateNexusOperationExecution( + ctx context.Context, + request *workflowservice.TerminateNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.TerminateNexusOperationExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.TerminateNexusOperationExecution(ctx, request, opts...) +} + func (c *clientImpl) TerminateWorkflowExecution( ctx context.Context, request *workflowservice.TerminateWorkflowExecutionRequest, @@ -593,6 +1049,66 @@ func (c *clientImpl) TerminateWorkflowExecution( return c.client.TerminateWorkflowExecution(ctx, request, opts...) } +func (c *clientImpl) TriggerWorkflowRule( + ctx context.Context, + request *workflowservice.TriggerWorkflowRuleRequest, + opts ...grpc.CallOption, +) (*workflowservice.TriggerWorkflowRuleResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.TriggerWorkflowRule(ctx, request, opts...) +} + +func (c *clientImpl) UnpauseActivity( + ctx context.Context, + request *workflowservice.UnpauseActivityRequest, + opts ...grpc.CallOption, +) (*workflowservice.UnpauseActivityResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UnpauseActivity(ctx, request, opts...) +} + +func (c *clientImpl) UnpauseActivityExecution( + ctx context.Context, + request *workflowservice.UnpauseActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.UnpauseActivityExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UnpauseActivityExecution(ctx, request, opts...) +} + +func (c *clientImpl) UnpauseWorkflowExecution( + ctx context.Context, + request *workflowservice.UnpauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.UnpauseWorkflowExecutionResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UnpauseWorkflowExecution(ctx, request, opts...) +} + +func (c *clientImpl) UpdateActivityExecutionOptions( + ctx context.Context, + request *workflowservice.UpdateActivityExecutionOptionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateActivityExecutionOptionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UpdateActivityExecutionOptions(ctx, request, opts...) +} + +func (c *clientImpl) UpdateActivityOptions( + ctx context.Context, + request *workflowservice.UpdateActivityOptionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateActivityOptionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UpdateActivityOptions(ctx, request, opts...) +} + func (c *clientImpl) UpdateNamespace( ctx context.Context, request *workflowservice.UpdateNamespaceRequest, @@ -613,6 +1129,16 @@ func (c *clientImpl) UpdateSchedule( return c.client.UpdateSchedule(ctx, request, opts...) } +func (c *clientImpl) UpdateTaskQueueConfig( + ctx context.Context, + request *workflowservice.UpdateTaskQueueConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateTaskQueueConfigResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UpdateTaskQueueConfig(ctx, request, opts...) +} + func (c *clientImpl) UpdateWorkerBuildIdCompatibility( ctx context.Context, request *workflowservice.UpdateWorkerBuildIdCompatibilityRequest, @@ -623,6 +1149,46 @@ func (c *clientImpl) UpdateWorkerBuildIdCompatibility( return c.client.UpdateWorkerBuildIdCompatibility(ctx, request, opts...) } +func (c *clientImpl) UpdateWorkerConfig( + ctx context.Context, + request *workflowservice.UpdateWorkerConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkerConfigResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UpdateWorkerConfig(ctx, request, opts...) +} + +func (c *clientImpl) UpdateWorkerDeploymentVersionComputeConfig( + ctx context.Context, + request *workflowservice.UpdateWorkerDeploymentVersionComputeConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkerDeploymentVersionComputeConfigResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UpdateWorkerDeploymentVersionComputeConfig(ctx, request, opts...) +} + +func (c *clientImpl) UpdateWorkerDeploymentVersionMetadata( + ctx context.Context, + request *workflowservice.UpdateWorkerDeploymentVersionMetadataRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkerDeploymentVersionMetadataResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UpdateWorkerDeploymentVersionMetadata(ctx, request, opts...) +} + +func (c *clientImpl) UpdateWorkerVersioningRules( + ctx context.Context, + request *workflowservice.UpdateWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkerVersioningRulesResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UpdateWorkerVersioningRules(ctx, request, opts...) +} + func (c *clientImpl) UpdateWorkflowExecution( ctx context.Context, request *workflowservice.UpdateWorkflowExecutionRequest, @@ -632,3 +1198,23 @@ func (c *clientImpl) UpdateWorkflowExecution( defer cancel() return c.client.UpdateWorkflowExecution(ctx, request, opts...) } + +func (c *clientImpl) UpdateWorkflowExecutionOptions( + ctx context.Context, + request *workflowservice.UpdateWorkflowExecutionOptionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkflowExecutionOptionsResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.UpdateWorkflowExecutionOptions(ctx, request, opts...) +} + +func (c *clientImpl) ValidateWorkerDeploymentVersionComputeConfig( + ctx context.Context, + request *workflowservice.ValidateWorkerDeploymentVersionComputeConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.ValidateWorkerDeploymentVersionComputeConfigResponse, error) { + ctx, cancel := c.createContext(ctx) + defer cancel() + return c.client.ValidateWorkerDeploymentVersionComputeConfig(ctx, request, opts...) +} diff --git a/client/frontend/metric_client.go b/client/frontend/metric_client.go index d4a6dd3768c..f9c204a83c5 100644 --- a/client/frontend/metric_client.go +++ b/client/frontend/metric_client.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package frontend import ( @@ -30,7 +6,6 @@ import ( "go.temporal.io/api/serviceerror" "go.temporal.io/api/workflowservice/v1" - "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" @@ -81,7 +56,8 @@ func (c *metricClient) finishMetricsRecording( *serviceerror.QueryFailed, *serviceerror.NamespaceNotFound, *serviceerror.WorkflowNotReady, - *serviceerror.WorkflowExecutionAlreadyStarted: + *serviceerror.WorkflowExecutionAlreadyStarted, + *serviceerror.ResourceExhausted: // noop - not interest and too many logs default: c.throttledLogger.Info("frontend client encountered error", tag.Error(err), tag.ServiceErrorType(err)) diff --git a/client/frontend/metric_client_gen.go b/client/frontend/metric_client_gen.go index 3d15e86a331..247c90a2ed9 100644 --- a/client/frontend/metric_client_gen.go +++ b/client/frontend/metric_client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package frontend @@ -33,6 +9,48 @@ import ( "google.golang.org/grpc" ) +func (c *metricClient) CountActivityExecutions( + ctx context.Context, + request *workflowservice.CountActivityExecutionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.CountActivityExecutionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientCountActivityExecutions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CountActivityExecutions(ctx, request, opts...) +} + +func (c *metricClient) CountNexusOperationExecutions( + ctx context.Context, + request *workflowservice.CountNexusOperationExecutionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.CountNexusOperationExecutionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientCountNexusOperationExecutions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CountNexusOperationExecutions(ctx, request, opts...) +} + +func (c *metricClient) CountSchedules( + ctx context.Context, + request *workflowservice.CountSchedulesRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.CountSchedulesResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientCountSchedules") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CountSchedules(ctx, request, opts...) +} + func (c *metricClient) CountWorkflowExecutions( ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest, @@ -61,6 +79,76 @@ func (c *metricClient) CreateSchedule( return c.client.CreateSchedule(ctx, request, opts...) } +func (c *metricClient) CreateWorkerDeployment( + ctx context.Context, + request *workflowservice.CreateWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.CreateWorkerDeploymentResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientCreateWorkerDeployment") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CreateWorkerDeployment(ctx, request, opts...) +} + +func (c *metricClient) CreateWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.CreateWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.CreateWorkerDeploymentVersionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientCreateWorkerDeploymentVersion") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CreateWorkerDeploymentVersion(ctx, request, opts...) +} + +func (c *metricClient) CreateWorkflowRule( + ctx context.Context, + request *workflowservice.CreateWorkflowRuleRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.CreateWorkflowRuleResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientCreateWorkflowRule") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CreateWorkflowRule(ctx, request, opts...) +} + +func (c *metricClient) DeleteActivityExecution( + ctx context.Context, + request *workflowservice.DeleteActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DeleteActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDeleteActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeleteActivityExecution(ctx, request, opts...) +} + +func (c *metricClient) DeleteNexusOperationExecution( + ctx context.Context, + request *workflowservice.DeleteNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DeleteNexusOperationExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDeleteNexusOperationExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeleteNexusOperationExecution(ctx, request, opts...) +} + func (c *metricClient) DeleteSchedule( ctx context.Context, request *workflowservice.DeleteScheduleRequest, @@ -75,6 +163,34 @@ func (c *metricClient) DeleteSchedule( return c.client.DeleteSchedule(ctx, request, opts...) } +func (c *metricClient) DeleteWorkerDeployment( + ctx context.Context, + request *workflowservice.DeleteWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DeleteWorkerDeploymentResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDeleteWorkerDeployment") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeleteWorkerDeployment(ctx, request, opts...) +} + +func (c *metricClient) DeleteWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.DeleteWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DeleteWorkerDeploymentVersionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDeleteWorkerDeploymentVersion") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeleteWorkerDeploymentVersion(ctx, request, opts...) +} + func (c *metricClient) DeleteWorkflowExecution( ctx context.Context, request *workflowservice.DeleteWorkflowExecutionRequest, @@ -89,6 +205,20 @@ func (c *metricClient) DeleteWorkflowExecution( return c.client.DeleteWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) DeleteWorkflowRule( + ctx context.Context, + request *workflowservice.DeleteWorkflowRuleRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DeleteWorkflowRuleResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDeleteWorkflowRule") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeleteWorkflowRule(ctx, request, opts...) +} + func (c *metricClient) DeprecateNamespace( ctx context.Context, request *workflowservice.DeprecateNamespaceRequest, @@ -103,6 +233,20 @@ func (c *metricClient) DeprecateNamespace( return c.client.DeprecateNamespace(ctx, request, opts...) } +func (c *metricClient) DescribeActivityExecution( + ctx context.Context, + request *workflowservice.DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DescribeActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDescribeActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeActivityExecution(ctx, request, opts...) +} + func (c *metricClient) DescribeBatchOperation( ctx context.Context, request *workflowservice.DescribeBatchOperationRequest, @@ -117,6 +261,20 @@ func (c *metricClient) DescribeBatchOperation( return c.client.DescribeBatchOperation(ctx, request, opts...) } +func (c *metricClient) DescribeDeployment( + ctx context.Context, + request *workflowservice.DescribeDeploymentRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DescribeDeploymentResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDescribeDeployment") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeDeployment(ctx, request, opts...) +} + func (c *metricClient) DescribeNamespace( ctx context.Context, request *workflowservice.DescribeNamespaceRequest, @@ -131,6 +289,20 @@ func (c *metricClient) DescribeNamespace( return c.client.DescribeNamespace(ctx, request, opts...) } +func (c *metricClient) DescribeNexusOperationExecution( + ctx context.Context, + request *workflowservice.DescribeNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DescribeNexusOperationExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDescribeNexusOperationExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeNexusOperationExecution(ctx, request, opts...) +} + func (c *metricClient) DescribeSchedule( ctx context.Context, request *workflowservice.DescribeScheduleRequest, @@ -159,6 +331,48 @@ func (c *metricClient) DescribeTaskQueue( return c.client.DescribeTaskQueue(ctx, request, opts...) } +func (c *metricClient) DescribeWorker( + ctx context.Context, + request *workflowservice.DescribeWorkerRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DescribeWorkerResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDescribeWorker") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeWorker(ctx, request, opts...) +} + +func (c *metricClient) DescribeWorkerDeployment( + ctx context.Context, + request *workflowservice.DescribeWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DescribeWorkerDeploymentResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDescribeWorkerDeployment") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeWorkerDeployment(ctx, request, opts...) +} + +func (c *metricClient) DescribeWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.DescribeWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DescribeWorkerDeploymentVersionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDescribeWorkerDeploymentVersion") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeWorkerDeploymentVersion(ctx, request, opts...) +} + func (c *metricClient) DescribeWorkflowExecution( ctx context.Context, request *workflowservice.DescribeWorkflowExecutionRequest, @@ -173,6 +387,48 @@ func (c *metricClient) DescribeWorkflowExecution( return c.client.DescribeWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) DescribeWorkflowRule( + ctx context.Context, + request *workflowservice.DescribeWorkflowRuleRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.DescribeWorkflowRuleResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientDescribeWorkflowRule") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeWorkflowRule(ctx, request, opts...) +} + +func (c *metricClient) ExecuteMultiOperation( + ctx context.Context, + request *workflowservice.ExecuteMultiOperationRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ExecuteMultiOperationResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientExecuteMultiOperation") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ExecuteMultiOperation(ctx, request, opts...) +} + +func (c *metricClient) FetchWorkerConfig( + ctx context.Context, + request *workflowservice.FetchWorkerConfigRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.FetchWorkerConfigResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientFetchWorkerConfig") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.FetchWorkerConfig(ctx, request, opts...) +} + func (c *metricClient) GetClusterInfo( ctx context.Context, request *workflowservice.GetClusterInfoRequest, @@ -187,6 +443,34 @@ func (c *metricClient) GetClusterInfo( return c.client.GetClusterInfo(ctx, request, opts...) } +func (c *metricClient) GetCurrentDeployment( + ctx context.Context, + request *workflowservice.GetCurrentDeploymentRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.GetCurrentDeploymentResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientGetCurrentDeployment") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.GetCurrentDeployment(ctx, request, opts...) +} + +func (c *metricClient) GetDeploymentReachability( + ctx context.Context, + request *workflowservice.GetDeploymentReachabilityRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.GetDeploymentReachabilityResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientGetDeploymentReachability") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.GetDeploymentReachability(ctx, request, opts...) +} + func (c *metricClient) GetSearchAttributes( ctx context.Context, request *workflowservice.GetSearchAttributesRequest, @@ -243,6 +527,20 @@ func (c *metricClient) GetWorkerTaskReachability( return c.client.GetWorkerTaskReachability(ctx, request, opts...) } +func (c *metricClient) GetWorkerVersioningRules( + ctx context.Context, + request *workflowservice.GetWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.GetWorkerVersioningRulesResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientGetWorkerVersioningRules") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.GetWorkerVersioningRules(ctx, request, opts...) +} + func (c *metricClient) GetWorkflowExecutionHistory( ctx context.Context, request *workflowservice.GetWorkflowExecutionHistoryRequest, @@ -271,6 +569,20 @@ func (c *metricClient) GetWorkflowExecutionHistoryReverse( return c.client.GetWorkflowExecutionHistoryReverse(ctx, request, opts...) } +func (c *metricClient) ListActivityExecutions( + ctx context.Context, + request *workflowservice.ListActivityExecutionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ListActivityExecutionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientListActivityExecutions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ListActivityExecutions(ctx, request, opts...) +} + func (c *metricClient) ListArchivedWorkflowExecutions( ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest, @@ -313,6 +625,20 @@ func (c *metricClient) ListClosedWorkflowExecutions( return c.client.ListClosedWorkflowExecutions(ctx, request, opts...) } +func (c *metricClient) ListDeployments( + ctx context.Context, + request *workflowservice.ListDeploymentsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ListDeploymentsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientListDeployments") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ListDeployments(ctx, request, opts...) +} + func (c *metricClient) ListNamespaces( ctx context.Context, request *workflowservice.ListNamespacesRequest, @@ -327,6 +653,20 @@ func (c *metricClient) ListNamespaces( return c.client.ListNamespaces(ctx, request, opts...) } +func (c *metricClient) ListNexusOperationExecutions( + ctx context.Context, + request *workflowservice.ListNexusOperationExecutionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ListNexusOperationExecutionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientListNexusOperationExecutions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ListNexusOperationExecutions(ctx, request, opts...) +} + func (c *metricClient) ListOpenWorkflowExecutions( ctx context.Context, request *workflowservice.ListOpenWorkflowExecutionsRequest, @@ -383,6 +723,34 @@ func (c *metricClient) ListTaskQueuePartitions( return c.client.ListTaskQueuePartitions(ctx, request, opts...) } +func (c *metricClient) ListWorkerDeployments( + ctx context.Context, + request *workflowservice.ListWorkerDeploymentsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ListWorkerDeploymentsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientListWorkerDeployments") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ListWorkerDeployments(ctx, request, opts...) +} + +func (c *metricClient) ListWorkers( + ctx context.Context, + request *workflowservice.ListWorkersRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ListWorkersResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientListWorkers") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ListWorkers(ctx, request, opts...) +} + func (c *metricClient) ListWorkflowExecutions( ctx context.Context, request *workflowservice.ListWorkflowExecutionsRequest, @@ -397,18 +765,88 @@ func (c *metricClient) ListWorkflowExecutions( return c.client.ListWorkflowExecutions(ctx, request, opts...) } +func (c *metricClient) ListWorkflowRules( + ctx context.Context, + request *workflowservice.ListWorkflowRulesRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ListWorkflowRulesResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientListWorkflowRules") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ListWorkflowRules(ctx, request, opts...) +} + func (c *metricClient) PatchSchedule( ctx context.Context, - request *workflowservice.PatchScheduleRequest, + request *workflowservice.PatchScheduleRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.PatchScheduleResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientPatchSchedule") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.PatchSchedule(ctx, request, opts...) +} + +func (c *metricClient) PauseActivity( + ctx context.Context, + request *workflowservice.PauseActivityRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.PauseActivityResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientPauseActivity") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.PauseActivity(ctx, request, opts...) +} + +func (c *metricClient) PauseActivityExecution( + ctx context.Context, + request *workflowservice.PauseActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.PauseActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientPauseActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.PauseActivityExecution(ctx, request, opts...) +} + +func (c *metricClient) PauseWorkflowExecution( + ctx context.Context, + request *workflowservice.PauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.PauseWorkflowExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientPauseWorkflowExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.PauseWorkflowExecution(ctx, request, opts...) +} + +func (c *metricClient) PollActivityExecution( + ctx context.Context, + request *workflowservice.PollActivityExecutionRequest, opts ...grpc.CallOption, -) (_ *workflowservice.PatchScheduleResponse, retError error) { +) (_ *workflowservice.PollActivityExecutionResponse, retError error) { - metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientPatchSchedule") + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientPollActivityExecution") defer func() { c.finishMetricsRecording(metricsHandler, startTime, retError) }() - return c.client.PatchSchedule(ctx, request, opts...) + return c.client.PollActivityExecution(ctx, request, opts...) } func (c *metricClient) PollActivityTaskQueue( @@ -425,6 +863,20 @@ func (c *metricClient) PollActivityTaskQueue( return c.client.PollActivityTaskQueue(ctx, request, opts...) } +func (c *metricClient) PollNexusOperationExecution( + ctx context.Context, + request *workflowservice.PollNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.PollNexusOperationExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientPollNexusOperationExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.PollNexusOperationExecution(ctx, request, opts...) +} + func (c *metricClient) PollNexusTaskQueue( ctx context.Context, request *workflowservice.PollNexusTaskQueueRequest, @@ -509,6 +961,20 @@ func (c *metricClient) RecordActivityTaskHeartbeatById( return c.client.RecordActivityTaskHeartbeatById(ctx, request, opts...) } +func (c *metricClient) RecordWorkerHeartbeat( + ctx context.Context, + request *workflowservice.RecordWorkerHeartbeatRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.RecordWorkerHeartbeatResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientRecordWorkerHeartbeat") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.RecordWorkerHeartbeat(ctx, request, opts...) +} + func (c *metricClient) RegisterNamespace( ctx context.Context, request *workflowservice.RegisterNamespaceRequest, @@ -523,6 +989,34 @@ func (c *metricClient) RegisterNamespace( return c.client.RegisterNamespace(ctx, request, opts...) } +func (c *metricClient) RequestCancelActivityExecution( + ctx context.Context, + request *workflowservice.RequestCancelActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.RequestCancelActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientRequestCancelActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.RequestCancelActivityExecution(ctx, request, opts...) +} + +func (c *metricClient) RequestCancelNexusOperationExecution( + ctx context.Context, + request *workflowservice.RequestCancelNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.RequestCancelNexusOperationExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientRequestCancelNexusOperationExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.RequestCancelNexusOperationExecution(ctx, request, opts...) +} + func (c *metricClient) RequestCancelWorkflowExecution( ctx context.Context, request *workflowservice.RequestCancelWorkflowExecutionRequest, @@ -537,6 +1031,34 @@ func (c *metricClient) RequestCancelWorkflowExecution( return c.client.RequestCancelWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) ResetActivity( + ctx context.Context, + request *workflowservice.ResetActivityRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ResetActivityResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientResetActivity") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ResetActivity(ctx, request, opts...) +} + +func (c *metricClient) ResetActivityExecution( + ctx context.Context, + request *workflowservice.ResetActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ResetActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientResetActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ResetActivityExecution(ctx, request, opts...) +} + func (c *metricClient) ResetStickyTaskQueue( ctx context.Context, request *workflowservice.ResetStickyTaskQueueRequest, @@ -733,6 +1255,76 @@ func (c *metricClient) ScanWorkflowExecutions( return c.client.ScanWorkflowExecutions(ctx, request, opts...) } +func (c *metricClient) SetCurrentDeployment( + ctx context.Context, + request *workflowservice.SetCurrentDeploymentRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.SetCurrentDeploymentResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientSetCurrentDeployment") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.SetCurrentDeployment(ctx, request, opts...) +} + +func (c *metricClient) SetWorkerDeploymentCurrentVersion( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentCurrentVersionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.SetWorkerDeploymentCurrentVersionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientSetWorkerDeploymentCurrentVersion") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.SetWorkerDeploymentCurrentVersion(ctx, request, opts...) +} + +func (c *metricClient) SetWorkerDeploymentManager( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentManagerRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.SetWorkerDeploymentManagerResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientSetWorkerDeploymentManager") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.SetWorkerDeploymentManager(ctx, request, opts...) +} + +func (c *metricClient) SetWorkerDeploymentRampingVersion( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentRampingVersionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.SetWorkerDeploymentRampingVersionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientSetWorkerDeploymentRampingVersion") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.SetWorkerDeploymentRampingVersion(ctx, request, opts...) +} + +func (c *metricClient) ShutdownWorker( + ctx context.Context, + request *workflowservice.ShutdownWorkerRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ShutdownWorkerResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientShutdownWorker") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ShutdownWorker(ctx, request, opts...) +} + func (c *metricClient) SignalWithStartWorkflowExecution( ctx context.Context, request *workflowservice.SignalWithStartWorkflowExecutionRequest, @@ -761,6 +1353,20 @@ func (c *metricClient) SignalWorkflowExecution( return c.client.SignalWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) StartActivityExecution( + ctx context.Context, + request *workflowservice.StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.StartActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientStartActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.StartActivityExecution(ctx, request, opts...) +} + func (c *metricClient) StartBatchOperation( ctx context.Context, request *workflowservice.StartBatchOperationRequest, @@ -775,6 +1381,20 @@ func (c *metricClient) StartBatchOperation( return c.client.StartBatchOperation(ctx, request, opts...) } +func (c *metricClient) StartNexusOperationExecution( + ctx context.Context, + request *workflowservice.StartNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.StartNexusOperationExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientStartNexusOperationExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.StartNexusOperationExecution(ctx, request, opts...) +} + func (c *metricClient) StartWorkflowExecution( ctx context.Context, request *workflowservice.StartWorkflowExecutionRequest, @@ -803,6 +1423,34 @@ func (c *metricClient) StopBatchOperation( return c.client.StopBatchOperation(ctx, request, opts...) } +func (c *metricClient) TerminateActivityExecution( + ctx context.Context, + request *workflowservice.TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.TerminateActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientTerminateActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.TerminateActivityExecution(ctx, request, opts...) +} + +func (c *metricClient) TerminateNexusOperationExecution( + ctx context.Context, + request *workflowservice.TerminateNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.TerminateNexusOperationExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientTerminateNexusOperationExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.TerminateNexusOperationExecution(ctx, request, opts...) +} + func (c *metricClient) TerminateWorkflowExecution( ctx context.Context, request *workflowservice.TerminateWorkflowExecutionRequest, @@ -817,6 +1465,90 @@ func (c *metricClient) TerminateWorkflowExecution( return c.client.TerminateWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) TriggerWorkflowRule( + ctx context.Context, + request *workflowservice.TriggerWorkflowRuleRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.TriggerWorkflowRuleResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientTriggerWorkflowRule") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.TriggerWorkflowRule(ctx, request, opts...) +} + +func (c *metricClient) UnpauseActivity( + ctx context.Context, + request *workflowservice.UnpauseActivityRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UnpauseActivityResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUnpauseActivity") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UnpauseActivity(ctx, request, opts...) +} + +func (c *metricClient) UnpauseActivityExecution( + ctx context.Context, + request *workflowservice.UnpauseActivityExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UnpauseActivityExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUnpauseActivityExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UnpauseActivityExecution(ctx, request, opts...) +} + +func (c *metricClient) UnpauseWorkflowExecution( + ctx context.Context, + request *workflowservice.UnpauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UnpauseWorkflowExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUnpauseWorkflowExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UnpauseWorkflowExecution(ctx, request, opts...) +} + +func (c *metricClient) UpdateActivityExecutionOptions( + ctx context.Context, + request *workflowservice.UpdateActivityExecutionOptionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UpdateActivityExecutionOptionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUpdateActivityExecutionOptions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateActivityExecutionOptions(ctx, request, opts...) +} + +func (c *metricClient) UpdateActivityOptions( + ctx context.Context, + request *workflowservice.UpdateActivityOptionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UpdateActivityOptionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUpdateActivityOptions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateActivityOptions(ctx, request, opts...) +} + func (c *metricClient) UpdateNamespace( ctx context.Context, request *workflowservice.UpdateNamespaceRequest, @@ -845,6 +1577,20 @@ func (c *metricClient) UpdateSchedule( return c.client.UpdateSchedule(ctx, request, opts...) } +func (c *metricClient) UpdateTaskQueueConfig( + ctx context.Context, + request *workflowservice.UpdateTaskQueueConfigRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UpdateTaskQueueConfigResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUpdateTaskQueueConfig") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateTaskQueueConfig(ctx, request, opts...) +} + func (c *metricClient) UpdateWorkerBuildIdCompatibility( ctx context.Context, request *workflowservice.UpdateWorkerBuildIdCompatibilityRequest, @@ -859,6 +1605,62 @@ func (c *metricClient) UpdateWorkerBuildIdCompatibility( return c.client.UpdateWorkerBuildIdCompatibility(ctx, request, opts...) } +func (c *metricClient) UpdateWorkerConfig( + ctx context.Context, + request *workflowservice.UpdateWorkerConfigRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UpdateWorkerConfigResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUpdateWorkerConfig") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateWorkerConfig(ctx, request, opts...) +} + +func (c *metricClient) UpdateWorkerDeploymentVersionComputeConfig( + ctx context.Context, + request *workflowservice.UpdateWorkerDeploymentVersionComputeConfigRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UpdateWorkerDeploymentVersionComputeConfigResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUpdateWorkerDeploymentVersionComputeConfig") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateWorkerDeploymentVersionComputeConfig(ctx, request, opts...) +} + +func (c *metricClient) UpdateWorkerDeploymentVersionMetadata( + ctx context.Context, + request *workflowservice.UpdateWorkerDeploymentVersionMetadataRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UpdateWorkerDeploymentVersionMetadataResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUpdateWorkerDeploymentVersionMetadata") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateWorkerDeploymentVersionMetadata(ctx, request, opts...) +} + +func (c *metricClient) UpdateWorkerVersioningRules( + ctx context.Context, + request *workflowservice.UpdateWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UpdateWorkerVersioningRulesResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUpdateWorkerVersioningRules") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateWorkerVersioningRules(ctx, request, opts...) +} + func (c *metricClient) UpdateWorkflowExecution( ctx context.Context, request *workflowservice.UpdateWorkflowExecutionRequest, @@ -872,3 +1674,31 @@ func (c *metricClient) UpdateWorkflowExecution( return c.client.UpdateWorkflowExecution(ctx, request, opts...) } + +func (c *metricClient) UpdateWorkflowExecutionOptions( + ctx context.Context, + request *workflowservice.UpdateWorkflowExecutionOptionsRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.UpdateWorkflowExecutionOptionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientUpdateWorkflowExecutionOptions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateWorkflowExecutionOptions(ctx, request, opts...) +} + +func (c *metricClient) ValidateWorkerDeploymentVersionComputeConfig( + ctx context.Context, + request *workflowservice.ValidateWorkerDeploymentVersionComputeConfigRequest, + opts ...grpc.CallOption, +) (_ *workflowservice.ValidateWorkerDeploymentVersionComputeConfigResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "FrontendClientValidateWorkerDeploymentVersionComputeConfig") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ValidateWorkerDeploymentVersionComputeConfig(ctx, request, opts...) +} diff --git a/client/frontend/retryable_client.go b/client/frontend/retryable_client.go index d34fffeaadc..3ccf70ec70d 100644 --- a/client/frontend/retryable_client.go +++ b/client/frontend/retryable_client.go @@ -1,32 +1,7 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package frontend import ( "go.temporal.io/api/workflowservice/v1" - "go.temporal.io/server/common/backoff" ) diff --git a/client/frontend/retryable_client_gen.go b/client/frontend/retryable_client_gen.go index 5eb83d5e552..7a5a63cf4de 100644 --- a/client/frontend/retryable_client_gen.go +++ b/client/frontend/retryable_client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package frontend @@ -35,6 +11,51 @@ import ( "go.temporal.io/server/common/backoff" ) +func (c *retryableClient) CountActivityExecutions( + ctx context.Context, + request *workflowservice.CountActivityExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.CountActivityExecutionsResponse, error) { + var resp *workflowservice.CountActivityExecutionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CountActivityExecutions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) CountNexusOperationExecutions( + ctx context.Context, + request *workflowservice.CountNexusOperationExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.CountNexusOperationExecutionsResponse, error) { + var resp *workflowservice.CountNexusOperationExecutionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CountNexusOperationExecutions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) CountSchedules( + ctx context.Context, + request *workflowservice.CountSchedulesRequest, + opts ...grpc.CallOption, +) (*workflowservice.CountSchedulesResponse, error) { + var resp *workflowservice.CountSchedulesResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CountSchedules(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) CountWorkflowExecutions( ctx context.Context, request *workflowservice.CountWorkflowExecutionsRequest, @@ -65,6 +86,81 @@ func (c *retryableClient) CreateSchedule( return resp, err } +func (c *retryableClient) CreateWorkerDeployment( + ctx context.Context, + request *workflowservice.CreateWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.CreateWorkerDeploymentResponse, error) { + var resp *workflowservice.CreateWorkerDeploymentResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CreateWorkerDeployment(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) CreateWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.CreateWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.CreateWorkerDeploymentVersionResponse, error) { + var resp *workflowservice.CreateWorkerDeploymentVersionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CreateWorkerDeploymentVersion(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) CreateWorkflowRule( + ctx context.Context, + request *workflowservice.CreateWorkflowRuleRequest, + opts ...grpc.CallOption, +) (*workflowservice.CreateWorkflowRuleResponse, error) { + var resp *workflowservice.CreateWorkflowRuleResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CreateWorkflowRule(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DeleteActivityExecution( + ctx context.Context, + request *workflowservice.DeleteActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteActivityExecutionResponse, error) { + var resp *workflowservice.DeleteActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeleteActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DeleteNexusOperationExecution( + ctx context.Context, + request *workflowservice.DeleteNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteNexusOperationExecutionResponse, error) { + var resp *workflowservice.DeleteNexusOperationExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeleteNexusOperationExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DeleteSchedule( ctx context.Context, request *workflowservice.DeleteScheduleRequest, @@ -80,6 +176,36 @@ func (c *retryableClient) DeleteSchedule( return resp, err } +func (c *retryableClient) DeleteWorkerDeployment( + ctx context.Context, + request *workflowservice.DeleteWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteWorkerDeploymentResponse, error) { + var resp *workflowservice.DeleteWorkerDeploymentResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeleteWorkerDeployment(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DeleteWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.DeleteWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteWorkerDeploymentVersionResponse, error) { + var resp *workflowservice.DeleteWorkerDeploymentVersionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeleteWorkerDeploymentVersion(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DeleteWorkflowExecution( ctx context.Context, request *workflowservice.DeleteWorkflowExecutionRequest, @@ -95,6 +221,21 @@ func (c *retryableClient) DeleteWorkflowExecution( return resp, err } +func (c *retryableClient) DeleteWorkflowRule( + ctx context.Context, + request *workflowservice.DeleteWorkflowRuleRequest, + opts ...grpc.CallOption, +) (*workflowservice.DeleteWorkflowRuleResponse, error) { + var resp *workflowservice.DeleteWorkflowRuleResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeleteWorkflowRule(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DeprecateNamespace( ctx context.Context, request *workflowservice.DeprecateNamespaceRequest, @@ -110,6 +251,21 @@ func (c *retryableClient) DeprecateNamespace( return resp, err } +func (c *retryableClient) DescribeActivityExecution( + ctx context.Context, + request *workflowservice.DescribeActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeActivityExecutionResponse, error) { + var resp *workflowservice.DescribeActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DescribeBatchOperation( ctx context.Context, request *workflowservice.DescribeBatchOperationRequest, @@ -125,6 +281,21 @@ func (c *retryableClient) DescribeBatchOperation( return resp, err } +func (c *retryableClient) DescribeDeployment( + ctx context.Context, + request *workflowservice.DescribeDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeDeploymentResponse, error) { + var resp *workflowservice.DescribeDeploymentResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeDeployment(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DescribeNamespace( ctx context.Context, request *workflowservice.DescribeNamespaceRequest, @@ -140,6 +311,21 @@ func (c *retryableClient) DescribeNamespace( return resp, err } +func (c *retryableClient) DescribeNexusOperationExecution( + ctx context.Context, + request *workflowservice.DescribeNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeNexusOperationExecutionResponse, error) { + var resp *workflowservice.DescribeNexusOperationExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeNexusOperationExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DescribeSchedule( ctx context.Context, request *workflowservice.DescribeScheduleRequest, @@ -170,6 +356,51 @@ func (c *retryableClient) DescribeTaskQueue( return resp, err } +func (c *retryableClient) DescribeWorker( + ctx context.Context, + request *workflowservice.DescribeWorkerRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeWorkerResponse, error) { + var resp *workflowservice.DescribeWorkerResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeWorker(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DescribeWorkerDeployment( + ctx context.Context, + request *workflowservice.DescribeWorkerDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeWorkerDeploymentResponse, error) { + var resp *workflowservice.DescribeWorkerDeploymentResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeWorkerDeployment(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DescribeWorkerDeploymentVersion( + ctx context.Context, + request *workflowservice.DescribeWorkerDeploymentVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeWorkerDeploymentVersionResponse, error) { + var resp *workflowservice.DescribeWorkerDeploymentVersionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeWorkerDeploymentVersion(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DescribeWorkflowExecution( ctx context.Context, request *workflowservice.DescribeWorkflowExecutionRequest, @@ -185,6 +416,51 @@ func (c *retryableClient) DescribeWorkflowExecution( return resp, err } +func (c *retryableClient) DescribeWorkflowRule( + ctx context.Context, + request *workflowservice.DescribeWorkflowRuleRequest, + opts ...grpc.CallOption, +) (*workflowservice.DescribeWorkflowRuleResponse, error) { + var resp *workflowservice.DescribeWorkflowRuleResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeWorkflowRule(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) ExecuteMultiOperation( + ctx context.Context, + request *workflowservice.ExecuteMultiOperationRequest, + opts ...grpc.CallOption, +) (*workflowservice.ExecuteMultiOperationResponse, error) { + var resp *workflowservice.ExecuteMultiOperationResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ExecuteMultiOperation(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) FetchWorkerConfig( + ctx context.Context, + request *workflowservice.FetchWorkerConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.FetchWorkerConfigResponse, error) { + var resp *workflowservice.FetchWorkerConfigResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.FetchWorkerConfig(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) GetClusterInfo( ctx context.Context, request *workflowservice.GetClusterInfoRequest, @@ -200,6 +476,36 @@ func (c *retryableClient) GetClusterInfo( return resp, err } +func (c *retryableClient) GetCurrentDeployment( + ctx context.Context, + request *workflowservice.GetCurrentDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.GetCurrentDeploymentResponse, error) { + var resp *workflowservice.GetCurrentDeploymentResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.GetCurrentDeployment(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) GetDeploymentReachability( + ctx context.Context, + request *workflowservice.GetDeploymentReachabilityRequest, + opts ...grpc.CallOption, +) (*workflowservice.GetDeploymentReachabilityResponse, error) { + var resp *workflowservice.GetDeploymentReachabilityResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.GetDeploymentReachability(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) GetSearchAttributes( ctx context.Context, request *workflowservice.GetSearchAttributesRequest, @@ -260,6 +566,21 @@ func (c *retryableClient) GetWorkerTaskReachability( return resp, err } +func (c *retryableClient) GetWorkerVersioningRules( + ctx context.Context, + request *workflowservice.GetWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (*workflowservice.GetWorkerVersioningRulesResponse, error) { + var resp *workflowservice.GetWorkerVersioningRulesResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.GetWorkerVersioningRules(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) GetWorkflowExecutionHistory( ctx context.Context, request *workflowservice.GetWorkflowExecutionHistoryRequest, @@ -290,6 +611,21 @@ func (c *retryableClient) GetWorkflowExecutionHistoryReverse( return resp, err } +func (c *retryableClient) ListActivityExecutions( + ctx context.Context, + request *workflowservice.ListActivityExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListActivityExecutionsResponse, error) { + var resp *workflowservice.ListActivityExecutionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListActivityExecutions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ListArchivedWorkflowExecutions( ctx context.Context, request *workflowservice.ListArchivedWorkflowExecutionsRequest, @@ -335,6 +671,21 @@ func (c *retryableClient) ListClosedWorkflowExecutions( return resp, err } +func (c *retryableClient) ListDeployments( + ctx context.Context, + request *workflowservice.ListDeploymentsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListDeploymentsResponse, error) { + var resp *workflowservice.ListDeploymentsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListDeployments(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ListNamespaces( ctx context.Context, request *workflowservice.ListNamespacesRequest, @@ -350,6 +701,21 @@ func (c *retryableClient) ListNamespaces( return resp, err } +func (c *retryableClient) ListNexusOperationExecutions( + ctx context.Context, + request *workflowservice.ListNexusOperationExecutionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListNexusOperationExecutionsResponse, error) { + var resp *workflowservice.ListNexusOperationExecutionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListNexusOperationExecutions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ListOpenWorkflowExecutions( ctx context.Context, request *workflowservice.ListOpenWorkflowExecutionsRequest, @@ -410,6 +776,36 @@ func (c *retryableClient) ListTaskQueuePartitions( return resp, err } +func (c *retryableClient) ListWorkerDeployments( + ctx context.Context, + request *workflowservice.ListWorkerDeploymentsRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListWorkerDeploymentsResponse, error) { + var resp *workflowservice.ListWorkerDeploymentsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListWorkerDeployments(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) ListWorkers( + ctx context.Context, + request *workflowservice.ListWorkersRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListWorkersResponse, error) { + var resp *workflowservice.ListWorkersResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListWorkers(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ListWorkflowExecutions( ctx context.Context, request *workflowservice.ListWorkflowExecutionsRequest, @@ -425,6 +821,21 @@ func (c *retryableClient) ListWorkflowExecutions( return resp, err } +func (c *retryableClient) ListWorkflowRules( + ctx context.Context, + request *workflowservice.ListWorkflowRulesRequest, + opts ...grpc.CallOption, +) (*workflowservice.ListWorkflowRulesResponse, error) { + var resp *workflowservice.ListWorkflowRulesResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListWorkflowRules(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) PatchSchedule( ctx context.Context, request *workflowservice.PatchScheduleRequest, @@ -440,6 +851,66 @@ func (c *retryableClient) PatchSchedule( return resp, err } +func (c *retryableClient) PauseActivity( + ctx context.Context, + request *workflowservice.PauseActivityRequest, + opts ...grpc.CallOption, +) (*workflowservice.PauseActivityResponse, error) { + var resp *workflowservice.PauseActivityResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PauseActivity(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) PauseActivityExecution( + ctx context.Context, + request *workflowservice.PauseActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PauseActivityExecutionResponse, error) { + var resp *workflowservice.PauseActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PauseActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) PauseWorkflowExecution( + ctx context.Context, + request *workflowservice.PauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PauseWorkflowExecutionResponse, error) { + var resp *workflowservice.PauseWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PauseWorkflowExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) PollActivityExecution( + ctx context.Context, + request *workflowservice.PollActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PollActivityExecutionResponse, error) { + var resp *workflowservice.PollActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PollActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) PollActivityTaskQueue( ctx context.Context, request *workflowservice.PollActivityTaskQueueRequest, @@ -455,6 +926,21 @@ func (c *retryableClient) PollActivityTaskQueue( return resp, err } +func (c *retryableClient) PollNexusOperationExecution( + ctx context.Context, + request *workflowservice.PollNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.PollNexusOperationExecutionResponse, error) { + var resp *workflowservice.PollNexusOperationExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PollNexusOperationExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) PollNexusTaskQueue( ctx context.Context, request *workflowservice.PollNexusTaskQueueRequest, @@ -489,86 +975,161 @@ func (c *retryableClient) PollWorkflowTaskQueue( ctx context.Context, request *workflowservice.PollWorkflowTaskQueueRequest, opts ...grpc.CallOption, -) (*workflowservice.PollWorkflowTaskQueueResponse, error) { - var resp *workflowservice.PollWorkflowTaskQueueResponse +) (*workflowservice.PollWorkflowTaskQueueResponse, error) { + var resp *workflowservice.PollWorkflowTaskQueueResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PollWorkflowTaskQueue(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) QueryWorkflow( + ctx context.Context, + request *workflowservice.QueryWorkflowRequest, + opts ...grpc.CallOption, +) (*workflowservice.QueryWorkflowResponse, error) { + var resp *workflowservice.QueryWorkflowResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.QueryWorkflow(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) RecordActivityTaskHeartbeat( + ctx context.Context, + request *workflowservice.RecordActivityTaskHeartbeatRequest, + opts ...grpc.CallOption, +) (*workflowservice.RecordActivityTaskHeartbeatResponse, error) { + var resp *workflowservice.RecordActivityTaskHeartbeatResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.RecordActivityTaskHeartbeat(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) RecordActivityTaskHeartbeatById( + ctx context.Context, + request *workflowservice.RecordActivityTaskHeartbeatByIdRequest, + opts ...grpc.CallOption, +) (*workflowservice.RecordActivityTaskHeartbeatByIdResponse, error) { + var resp *workflowservice.RecordActivityTaskHeartbeatByIdResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.RecordActivityTaskHeartbeatById(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) RecordWorkerHeartbeat( + ctx context.Context, + request *workflowservice.RecordWorkerHeartbeatRequest, + opts ...grpc.CallOption, +) (*workflowservice.RecordWorkerHeartbeatResponse, error) { + var resp *workflowservice.RecordWorkerHeartbeatResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.RecordWorkerHeartbeat(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) RegisterNamespace( + ctx context.Context, + request *workflowservice.RegisterNamespaceRequest, + opts ...grpc.CallOption, +) (*workflowservice.RegisterNamespaceResponse, error) { + var resp *workflowservice.RegisterNamespaceResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.PollWorkflowTaskQueue(ctx, request, opts...) + resp, err = c.client.RegisterNamespace(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } -func (c *retryableClient) QueryWorkflow( +func (c *retryableClient) RequestCancelActivityExecution( ctx context.Context, - request *workflowservice.QueryWorkflowRequest, + request *workflowservice.RequestCancelActivityExecutionRequest, opts ...grpc.CallOption, -) (*workflowservice.QueryWorkflowResponse, error) { - var resp *workflowservice.QueryWorkflowResponse +) (*workflowservice.RequestCancelActivityExecutionResponse, error) { + var resp *workflowservice.RequestCancelActivityExecutionResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.QueryWorkflow(ctx, request, opts...) + resp, err = c.client.RequestCancelActivityExecution(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } -func (c *retryableClient) RecordActivityTaskHeartbeat( +func (c *retryableClient) RequestCancelNexusOperationExecution( ctx context.Context, - request *workflowservice.RecordActivityTaskHeartbeatRequest, + request *workflowservice.RequestCancelNexusOperationExecutionRequest, opts ...grpc.CallOption, -) (*workflowservice.RecordActivityTaskHeartbeatResponse, error) { - var resp *workflowservice.RecordActivityTaskHeartbeatResponse +) (*workflowservice.RequestCancelNexusOperationExecutionResponse, error) { + var resp *workflowservice.RequestCancelNexusOperationExecutionResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.RecordActivityTaskHeartbeat(ctx, request, opts...) + resp, err = c.client.RequestCancelNexusOperationExecution(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } -func (c *retryableClient) RecordActivityTaskHeartbeatById( +func (c *retryableClient) RequestCancelWorkflowExecution( ctx context.Context, - request *workflowservice.RecordActivityTaskHeartbeatByIdRequest, + request *workflowservice.RequestCancelWorkflowExecutionRequest, opts ...grpc.CallOption, -) (*workflowservice.RecordActivityTaskHeartbeatByIdResponse, error) { - var resp *workflowservice.RecordActivityTaskHeartbeatByIdResponse +) (*workflowservice.RequestCancelWorkflowExecutionResponse, error) { + var resp *workflowservice.RequestCancelWorkflowExecutionResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.RecordActivityTaskHeartbeatById(ctx, request, opts...) + resp, err = c.client.RequestCancelWorkflowExecution(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } -func (c *retryableClient) RegisterNamespace( +func (c *retryableClient) ResetActivity( ctx context.Context, - request *workflowservice.RegisterNamespaceRequest, + request *workflowservice.ResetActivityRequest, opts ...grpc.CallOption, -) (*workflowservice.RegisterNamespaceResponse, error) { - var resp *workflowservice.RegisterNamespaceResponse +) (*workflowservice.ResetActivityResponse, error) { + var resp *workflowservice.ResetActivityResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.RegisterNamespace(ctx, request, opts...) + resp, err = c.client.ResetActivity(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } -func (c *retryableClient) RequestCancelWorkflowExecution( +func (c *retryableClient) ResetActivityExecution( ctx context.Context, - request *workflowservice.RequestCancelWorkflowExecutionRequest, + request *workflowservice.ResetActivityExecutionRequest, opts ...grpc.CallOption, -) (*workflowservice.RequestCancelWorkflowExecutionResponse, error) { - var resp *workflowservice.RequestCancelWorkflowExecutionResponse +) (*workflowservice.ResetActivityExecutionResponse, error) { + var resp *workflowservice.ResetActivityExecutionResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.RequestCancelWorkflowExecution(ctx, request, opts...) + resp, err = c.client.ResetActivityExecution(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) @@ -785,6 +1346,81 @@ func (c *retryableClient) ScanWorkflowExecutions( return resp, err } +func (c *retryableClient) SetCurrentDeployment( + ctx context.Context, + request *workflowservice.SetCurrentDeploymentRequest, + opts ...grpc.CallOption, +) (*workflowservice.SetCurrentDeploymentResponse, error) { + var resp *workflowservice.SetCurrentDeploymentResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.SetCurrentDeployment(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) SetWorkerDeploymentCurrentVersion( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentCurrentVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.SetWorkerDeploymentCurrentVersionResponse, error) { + var resp *workflowservice.SetWorkerDeploymentCurrentVersionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.SetWorkerDeploymentCurrentVersion(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) SetWorkerDeploymentManager( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentManagerRequest, + opts ...grpc.CallOption, +) (*workflowservice.SetWorkerDeploymentManagerResponse, error) { + var resp *workflowservice.SetWorkerDeploymentManagerResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.SetWorkerDeploymentManager(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) SetWorkerDeploymentRampingVersion( + ctx context.Context, + request *workflowservice.SetWorkerDeploymentRampingVersionRequest, + opts ...grpc.CallOption, +) (*workflowservice.SetWorkerDeploymentRampingVersionResponse, error) { + var resp *workflowservice.SetWorkerDeploymentRampingVersionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.SetWorkerDeploymentRampingVersion(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) ShutdownWorker( + ctx context.Context, + request *workflowservice.ShutdownWorkerRequest, + opts ...grpc.CallOption, +) (*workflowservice.ShutdownWorkerResponse, error) { + var resp *workflowservice.ShutdownWorkerResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ShutdownWorker(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) SignalWithStartWorkflowExecution( ctx context.Context, request *workflowservice.SignalWithStartWorkflowExecutionRequest, @@ -815,6 +1451,21 @@ func (c *retryableClient) SignalWorkflowExecution( return resp, err } +func (c *retryableClient) StartActivityExecution( + ctx context.Context, + request *workflowservice.StartActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.StartActivityExecutionResponse, error) { + var resp *workflowservice.StartActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.StartActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) StartBatchOperation( ctx context.Context, request *workflowservice.StartBatchOperationRequest, @@ -830,6 +1481,21 @@ func (c *retryableClient) StartBatchOperation( return resp, err } +func (c *retryableClient) StartNexusOperationExecution( + ctx context.Context, + request *workflowservice.StartNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.StartNexusOperationExecutionResponse, error) { + var resp *workflowservice.StartNexusOperationExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.StartNexusOperationExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) StartWorkflowExecution( ctx context.Context, request *workflowservice.StartWorkflowExecutionRequest, @@ -860,6 +1526,36 @@ func (c *retryableClient) StopBatchOperation( return resp, err } +func (c *retryableClient) TerminateActivityExecution( + ctx context.Context, + request *workflowservice.TerminateActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.TerminateActivityExecutionResponse, error) { + var resp *workflowservice.TerminateActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.TerminateActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) TerminateNexusOperationExecution( + ctx context.Context, + request *workflowservice.TerminateNexusOperationExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.TerminateNexusOperationExecutionResponse, error) { + var resp *workflowservice.TerminateNexusOperationExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.TerminateNexusOperationExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) TerminateWorkflowExecution( ctx context.Context, request *workflowservice.TerminateWorkflowExecutionRequest, @@ -875,6 +1571,96 @@ func (c *retryableClient) TerminateWorkflowExecution( return resp, err } +func (c *retryableClient) TriggerWorkflowRule( + ctx context.Context, + request *workflowservice.TriggerWorkflowRuleRequest, + opts ...grpc.CallOption, +) (*workflowservice.TriggerWorkflowRuleResponse, error) { + var resp *workflowservice.TriggerWorkflowRuleResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.TriggerWorkflowRule(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UnpauseActivity( + ctx context.Context, + request *workflowservice.UnpauseActivityRequest, + opts ...grpc.CallOption, +) (*workflowservice.UnpauseActivityResponse, error) { + var resp *workflowservice.UnpauseActivityResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UnpauseActivity(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UnpauseActivityExecution( + ctx context.Context, + request *workflowservice.UnpauseActivityExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.UnpauseActivityExecutionResponse, error) { + var resp *workflowservice.UnpauseActivityExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UnpauseActivityExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UnpauseWorkflowExecution( + ctx context.Context, + request *workflowservice.UnpauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (*workflowservice.UnpauseWorkflowExecutionResponse, error) { + var resp *workflowservice.UnpauseWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UnpauseWorkflowExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateActivityExecutionOptions( + ctx context.Context, + request *workflowservice.UpdateActivityExecutionOptionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateActivityExecutionOptionsResponse, error) { + var resp *workflowservice.UpdateActivityExecutionOptionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateActivityExecutionOptions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateActivityOptions( + ctx context.Context, + request *workflowservice.UpdateActivityOptionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateActivityOptionsResponse, error) { + var resp *workflowservice.UpdateActivityOptionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateActivityOptions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) UpdateNamespace( ctx context.Context, request *workflowservice.UpdateNamespaceRequest, @@ -905,6 +1691,21 @@ func (c *retryableClient) UpdateSchedule( return resp, err } +func (c *retryableClient) UpdateTaskQueueConfig( + ctx context.Context, + request *workflowservice.UpdateTaskQueueConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateTaskQueueConfigResponse, error) { + var resp *workflowservice.UpdateTaskQueueConfigResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateTaskQueueConfig(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) UpdateWorkerBuildIdCompatibility( ctx context.Context, request *workflowservice.UpdateWorkerBuildIdCompatibilityRequest, @@ -920,6 +1721,66 @@ func (c *retryableClient) UpdateWorkerBuildIdCompatibility( return resp, err } +func (c *retryableClient) UpdateWorkerConfig( + ctx context.Context, + request *workflowservice.UpdateWorkerConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkerConfigResponse, error) { + var resp *workflowservice.UpdateWorkerConfigResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateWorkerConfig(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateWorkerDeploymentVersionComputeConfig( + ctx context.Context, + request *workflowservice.UpdateWorkerDeploymentVersionComputeConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkerDeploymentVersionComputeConfigResponse, error) { + var resp *workflowservice.UpdateWorkerDeploymentVersionComputeConfigResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateWorkerDeploymentVersionComputeConfig(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateWorkerDeploymentVersionMetadata( + ctx context.Context, + request *workflowservice.UpdateWorkerDeploymentVersionMetadataRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkerDeploymentVersionMetadataResponse, error) { + var resp *workflowservice.UpdateWorkerDeploymentVersionMetadataResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateWorkerDeploymentVersionMetadata(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateWorkerVersioningRules( + ctx context.Context, + request *workflowservice.UpdateWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkerVersioningRulesResponse, error) { + var resp *workflowservice.UpdateWorkerVersioningRulesResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateWorkerVersioningRules(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) UpdateWorkflowExecution( ctx context.Context, request *workflowservice.UpdateWorkflowExecutionRequest, @@ -934,3 +1795,33 @@ func (c *retryableClient) UpdateWorkflowExecution( err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } + +func (c *retryableClient) UpdateWorkflowExecutionOptions( + ctx context.Context, + request *workflowservice.UpdateWorkflowExecutionOptionsRequest, + opts ...grpc.CallOption, +) (*workflowservice.UpdateWorkflowExecutionOptionsResponse, error) { + var resp *workflowservice.UpdateWorkflowExecutionOptionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateWorkflowExecutionOptions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) ValidateWorkerDeploymentVersionComputeConfig( + ctx context.Context, + request *workflowservice.ValidateWorkerDeploymentVersionComputeConfigRequest, + opts ...grpc.CallOption, +) (*workflowservice.ValidateWorkerDeploymentVersionComputeConfigResponse, error) { + var resp *workflowservice.ValidateWorkerDeploymentVersionComputeConfigResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ValidateWorkerDeploymentVersionComputeConfig(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} diff --git a/client/history/caching_redirector.go b/client/history/caching_redirector.go index bee80837272..a4e20011ecf 100644 --- a/client/history/caching_redirector.go +++ b/client/history/caching_redirector.go @@ -1,38 +1,17 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history import ( "context" "errors" + "fmt" "sync" + "time" + "github.com/google/uuid" "go.temporal.io/api/serviceerror" - - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/goro" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/membership" @@ -40,55 +19,73 @@ import ( ) type ( - cacheEntry struct { + cacheEntry[C any] struct { shardID int32 address rpcAddress - connection clientConnection + connection clientConnection[C] + staleAt time.Time } - // A cachingRedirector is a redirector that maintains a cache of shard + // A CachingRedirector is a redirector that maintains a cache of shard // owners, and uses that cache instead of querying membership for each // operation. Cache entries are evicted either for shard ownership lost // errors, or for any error that might indicate the history instance // is no longer available, including timeouts. - cachingRedirector struct { + CachingRedirector[C any] struct { mu struct { sync.RWMutex - cache map[int32]cacheEntry + cache map[int32]cacheEntry[C] } - connections connectionPool + connections connectionPool[C] + goros goro.Group historyServiceResolver membership.ServiceResolver logger log.Logger + membershipUpdateCh chan *membership.ChangedEvent + staleTTL dynamicconfig.DurationPropertyFn + listenerName string } ) -func newCachingRedirector( - connections connectionPool, +func NewCachingRedirector[C any]( + connections connectionPool[C], historyServiceResolver membership.ServiceResolver, logger log.Logger, -) *cachingRedirector { - r := &cachingRedirector{ + staleTTL dynamicconfig.DurationPropertyFn, +) *CachingRedirector[C] { + r := &CachingRedirector[C]{ connections: connections, historyServiceResolver: historyServiceResolver, logger: logger, + membershipUpdateCh: make(chan *membership.ChangedEvent, 1), + staleTTL: staleTTL, + listenerName: fmt.Sprintf("cachingRedirectorListener-%s", uuid.New().String()), } - r.mu.cache = make(map[int32]cacheEntry) + r.mu.cache = make(map[int32]cacheEntry[C]) + + r.goros.Go(r.eventLoop) + return r } -func (r *cachingRedirector) clientForShardID(shardID int32) (historyservice.HistoryServiceClient, error) { +func (r *CachingRedirector[C]) stop() { + r.goros.Cancel() + r.goros.Wait() +} + +func (r *CachingRedirector[C]) clientForShardID(shardID int32) (C, error) { + var zero C if err := checkShardID(shardID); err != nil { - return nil, err + return zero, err } entry, err := r.getOrCreateEntry(shardID) if err != nil { - return nil, err + return zero, err } - return entry.connection.historyClient, nil + return entry.connection.grpcClient, nil } -func (r *cachingRedirector) execute(ctx context.Context, shardID int32, op clientOperation) error { +func (r *CachingRedirector[C]) Execute(ctx context.Context, shardID int32, op ClientOperation[C]) error { if err := checkShardID(shardID); err != nil { return err } @@ -99,12 +96,12 @@ func (r *cachingRedirector) execute(ctx context.Context, shardID int32, op clien return r.redirectLoop(ctx, opEntry, op) } -func (r *cachingRedirector) redirectLoop(ctx context.Context, opEntry cacheEntry, op clientOperation) error { +func (r *CachingRedirector[C]) redirectLoop(ctx context.Context, opEntry cacheEntry[C], op ClientOperation[C]) error { for { if err := common.IsValidContext(ctx); err != nil { return err } - opErr := op(ctx, opEntry.connection.historyClient) + opErr := op(ctx, opEntry.connection.grpcClient) if opErr == nil { return opErr } @@ -124,12 +121,15 @@ func (r *cachingRedirector) redirectLoop(ctx context.Context, opEntry cacheEntry } } -func (r *cachingRedirector) getOrCreateEntry(shardID int32) (cacheEntry, error) { +func (r *CachingRedirector[C]) getOrCreateEntry(shardID int32) (cacheEntry[C], error) { r.mu.RLock() entry, ok := r.mu.cache[shardID] r.mu.RUnlock() if ok { - return entry, nil + if entry.staleAt.IsZero() || time.Now().Before(entry.staleAt) { + return entry, nil + } + // Otherwise, check below under write lock. } r.mu.Lock() @@ -138,18 +138,22 @@ func (r *cachingRedirector) getOrCreateEntry(shardID int32) (cacheEntry, error) // Recheck under write lock. entry, ok = r.mu.cache[shardID] if ok { - return entry, nil + if entry.staleAt.IsZero() || time.Now().Before(entry.staleAt) { + return entry, nil + } + // Delete and fallthrough below to re-check ownership. + delete(r.mu.cache, shardID) } address, err := shardLookup(r.historyServiceResolver, shardID) if err != nil { - return cacheEntry{}, err + return cacheEntry[C]{}, err } return r.cacheAddLocked(shardID, address), nil } -func (r *cachingRedirector) cacheAddLocked(shardID int32, addr rpcAddress) cacheEntry { +func (r *CachingRedirector[C]) cacheAddLocked(shardID int32, addr rpcAddress) cacheEntry[C] { // New history instances might reuse the address of a previously live history // instance. Since we don't currently close GRPC connections when they become // unused or idle, we might have a GRPC connection that has gone into its @@ -163,17 +167,20 @@ func (r *cachingRedirector) cacheAddLocked(shardID int32, addr rpcAddress) cache connection := r.connections.getOrCreateClientConn(addr) r.connections.resetConnectBackoff(connection) - entry := cacheEntry{ + entry := cacheEntry[C]{ shardID: shardID, address: addr, connection: connection, + // staleAt is left at zero; it's only set when r.staleTTL is set, + // and after a membership update informs us that this address is no + // longer the shard owner. } r.mu.cache[shardID] = entry return entry } -func (r *cachingRedirector) cacheDeleteByAddress(address rpcAddress) { +func (r *CachingRedirector[C]) cacheDeleteByAddress(address rpcAddress) { r.mu.Lock() defer r.mu.Unlock() @@ -184,7 +191,7 @@ func (r *cachingRedirector) cacheDeleteByAddress(address rpcAddress) { } } -func (r *cachingRedirector) handleSolError(opEntry cacheEntry, solErr *serviceerrors.ShardOwnershipLost) (cacheEntry, bool) { +func (r *CachingRedirector[C]) handleSolError(opEntry cacheEntry[C], solErr *serviceerrors.ShardOwnershipLost) (cacheEntry[C], bool) { r.mu.Lock() defer r.mu.Unlock() @@ -198,12 +205,12 @@ func (r *cachingRedirector) handleSolError(opEntry cacheEntry, solErr *serviceer if len(solErrNewOwner) != 0 && solErrNewOwner != opEntry.address { r.logger.Info("historyClient: updating cache from shard ownership lost error", tag.ShardID(opEntry.shardID), - tag.NewAnyTag("oldAddress", opEntry.address), - tag.NewAnyTag("newAddress", solErrNewOwner)) + tag.Any("oldAddress", opEntry.address), + tag.Any("newAddress", solErrNewOwner)) return r.cacheAddLocked(opEntry.shardID, solErrNewOwner), true } - return cacheEntry{}, false + return cacheEntry[C]{}, false } func maybeHostDownError(opErr error) bool { @@ -213,3 +220,47 @@ func maybeHostDownError(opErr error) bool { } return common.IsContextDeadlineExceededErr(opErr) } + +func (r *CachingRedirector[C]) eventLoop(ctx context.Context) error { + if err := r.historyServiceResolver.AddListener(r.listenerName, r.membershipUpdateCh); err != nil { + r.logger.Fatal("Error adding listener", tag.Error(err)) + } + defer func() { + if err := r.historyServiceResolver.RemoveListener(r.listenerName); err != nil { + r.logger.Warn("Error removing listener", tag.Error(err)) + } + }() + + for { + select { + case <-ctx.Done(): + return nil + case <-r.membershipUpdateCh: + r.staleCheck() + } + } +} + +func (r *CachingRedirector[C]) staleCheck() { + staleTTL := r.staleTTL() + + r.mu.Lock() + defer r.mu.Unlock() + + now := time.Now() + for shardID, entry := range r.mu.cache { + if !entry.staleAt.IsZero() { + if now.After(entry.staleAt) { + delete(r.mu.cache, shardID) + } + continue + } + if staleTTL > 0 { + addr, err := shardLookup(r.historyServiceResolver, shardID) + if err != nil || addr != entry.address { + entry.staleAt = now.Add(staleTTL) + r.mu.cache[shardID] = entry + } + } + } +} diff --git a/client/history/caching_redirector_test.go b/client/history/caching_redirector_test.go index a1fa3a1c4ab..ccce1604251 100644 --- a/client/history/caching_redirector_test.go +++ b/client/history/caching_redirector_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history import ( @@ -30,18 +6,18 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.temporal.io/api/serviceerror" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/historyservicemock/v1" "go.temporal.io/server/common/convert" + "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" - serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/common/membership" + serviceerrors "go.temporal.io/server/common/serviceerror" + "go.uber.org/mock/gomock" ) type ( @@ -50,7 +26,7 @@ type ( *require.Assertions controller *gomock.Controller - connections *MockconnectionPool + connections *mockConnectionPool[historyservice.HistoryServiceClient] logger log.Logger resolver *membership.MockServiceResolver } @@ -65,20 +41,32 @@ func (s *cachingRedirectorSuite) SetupTest() { s.Assertions = require.New(s.T()) s.controller = gomock.NewController(s.T()) - s.connections = NewMockconnectionPool(s.controller) + s.connections = &mockConnectionPool[historyservice.HistoryServiceClient]{} s.logger = log.NewNoopLogger() s.resolver = membership.NewMockServiceResolver(s.controller) + s.resolver.EXPECT().AddListener(gomock.Any(), gomock.Any()).Return(nil).AnyTimes() + s.resolver.EXPECT().RemoveListener(gomock.Any()).Return(nil).AnyTimes() } func (s *cachingRedirectorSuite) TearDownTest() { s.controller.Finish() } +func (s *cachingRedirectorSuite) newCachingDirector(staleTTL time.Duration) *CachingRedirector[historyservice.HistoryServiceClient] { + return NewCachingRedirector( + s.connections, + s.resolver, + s.logger, + dynamicconfig.GetDurationPropertyFn(staleTTL), + ) +} + func (s *cachingRedirectorSuite) TestShardCheck() { - r := newCachingRedirector(s.connections, s.resolver, s.logger) + r := s.newCachingDirector(0) + defer r.stop() invalErr := &serviceerror.InvalidArgument{} - err := r.execute( + err := r.Execute( context.Background(), -1, func(_ context.Context, _ historyservice.HistoryServiceClient) error { @@ -100,14 +88,7 @@ func cacheRetainingTest(s *cachingRedirectorSuite, opErr error, verify func(erro Times(1) mockClient := historyservicemock.NewMockHistoryServiceClient(s.controller) - clientConn := clientConnection{ - historyClient: mockClient, - } - s.connections.EXPECT(). - getOrCreateClientConn(testAddr). - Return(clientConn) - s.connections.EXPECT(). - resetConnectBackoff(clientConn) + s.connections.client = mockClient clientOp := func(ctx context.Context, client historyservice.HistoryServiceClient) error { if client != mockClient { @@ -115,16 +96,18 @@ func cacheRetainingTest(s *cachingRedirectorSuite, opErr error, verify func(erro } return opErr } - r := newCachingRedirector(s.connections, s.resolver, s.logger) + r := NewCachingRedirector(s.connections, s.resolver, s.logger, dynamicconfig.GetDurationPropertyFn(0)) + defer r.stop() - for i := 0; i < 3; i++ { - err := r.execute( + for range 3 { + err := r.Execute( context.Background(), shardID, clientOp, ) verify(err) } + s.Equal(1, s.connections.resetCalls) } func (s *cachingRedirectorSuite) TestExecuteShardSuccess() { @@ -141,7 +124,7 @@ func (s *cachingRedirectorSuite) TestExecuteCacheRetainingError() { }) } -func hostDownErrorTest(s *cachingRedirectorSuite, clientOp clientOperation, verify func(err error)) { +func hostDownErrorTest(s *cachingRedirectorSuite, clientOp ClientOperation[historyservice.HistoryServiceClient], verify func(err error)) { testAddr := rpcAddress("testaddr") shardID := int32(1) @@ -151,28 +134,21 @@ func hostDownErrorTest(s *cachingRedirectorSuite, clientOp clientOperation, veri Times(1) mockClient := historyservicemock.NewMockHistoryServiceClient(s.controller) - clientConn := clientConnection{ - historyClient: mockClient, - } - s.connections.EXPECT(). - getOrCreateClientConn(testAddr). - Return(clientConn). - Times(1) - s.connections.EXPECT(). - resetConnectBackoff(clientConn). - Times(1) + s.connections.client = mockClient - r := newCachingRedirector(s.connections, s.resolver, s.logger) + r := s.newCachingDirector(0) + defer r.stop() ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel() - err := r.execute( + err := r.Execute( ctx, shardID, clientOp, ) verify(err) + s.Equal(1, s.connections.resetCalls) } func (s *cachingRedirectorSuite) TestDeadlineExceededError() { @@ -202,45 +178,30 @@ func (s *cachingRedirectorSuite) TestShardOwnershipLostErrors() { testAddr2 := rpcAddress("testaddr2") shardID := int32(1) - mockClient1 := historyservicemock.NewMockHistoryServiceClient(s.controller) - mockClient2 := historyservicemock.NewMockHistoryServiceClient(s.controller) + mockClient := historyservicemock.NewMockHistoryServiceClient(s.controller) - r := newCachingRedirector(s.connections, s.resolver, s.logger) + r := s.newCachingDirector(0) + defer r.stop() opCalls := 1 doExecute := func() error { - return r.execute( + return r.Execute( context.Background(), shardID, func(ctx context.Context, client historyservice.HistoryServiceClient) error { switch opCalls { case 1: - if client != mockClient1 { - return errors.New("wrong client") - } opCalls++ return serviceerrors.NewShardOwnershipLost(string(testAddr1), "current") case 2: - if client != mockClient1 { - return errors.New("wrong client") - } opCalls++ return serviceerrors.NewShardOwnershipLost("", "current") case 3: - if client != mockClient1 { - return errors.New("wrong client") - } opCalls++ return serviceerrors.NewShardOwnershipLost(string(testAddr2), "current") case 4: - if client != mockClient2 { - return errors.New("wrong client") - } opCalls++ return nil case 5: - if client != mockClient2 { - return errors.New("wrong client") - } opCalls++ return nil } @@ -255,22 +216,14 @@ func (s *cachingRedirectorSuite) TestShardOwnershipLostErrors() { Return(membership.NewHostInfoFromAddress(string(testAddr1)), nil). Times(1) - clientConn1 := clientConnection{ - historyClient: mockClient1, - } - s.connections.EXPECT(). - getOrCreateClientConn(testAddr1). - Return(clientConn1). - Times(1) - s.connections.EXPECT(). - resetConnectBackoff(clientConn1). - Times(1) + s.connections.client = mockClient err := doExecute() s.Error(err) solErr := &serviceerrors.ShardOwnershipLost{} s.ErrorAs(err, &solErr) s.Equal(string(testAddr1), solErr.OwnerHost) + s.Equal(1, s.connections.resetCalls) // opCall 2: return SOL, but with empty new owner hint. s.resolver.EXPECT(). @@ -278,20 +231,13 @@ func (s *cachingRedirectorSuite) TestShardOwnershipLostErrors() { Return(membership.NewHostInfoFromAddress(string(testAddr1)), nil). Times(1) - s.connections.EXPECT(). - getOrCreateClientConn(testAddr1). - Return(clientConn1). - Times(1) - s.connections.EXPECT(). - resetConnectBackoff(clientConn1). - Times(1) - err = doExecute() s.Error(err) solErr = &serviceerrors.ShardOwnershipLost{} s.ErrorAs(err, &solErr) s.Empty(solErr.OwnerHost) s.Equal(3, opCalls) + s.Equal(2, s.connections.resetCalls) // opCall 3 & 4: return SOL with new owner hint. s.resolver.EXPECT(). @@ -299,32 +245,15 @@ func (s *cachingRedirectorSuite) TestShardOwnershipLostErrors() { Return(membership.NewHostInfoFromAddress(string(testAddr1)), nil). Times(1) - s.connections.EXPECT(). - getOrCreateClientConn(testAddr1). - Return(clientConn1). - Times(1) - s.connections.EXPECT(). - resetConnectBackoff(clientConn1). - Times(1) - - clientConn2 := clientConnection{ - historyClient: mockClient2, - } - s.connections.EXPECT(). - getOrCreateClientConn(testAddr2). - Return(clientConn2). - Times(1) - s.connections.EXPECT(). - resetConnectBackoff(clientConn2). - Times(1) - err = doExecute() s.NoError(err) s.Equal(5, opCalls) + s.Equal(4, s.connections.resetCalls) // OpCall 5: should use cached lookup & connection, so no additional mocks. err = doExecute() s.NoError(err) + s.Equal(4, s.connections.resetCalls) } func (s *cachingRedirectorSuite) TestClientForTargetByShard() { @@ -337,17 +266,10 @@ func (s *cachingRedirectorSuite) TestClientForTargetByShard() { Times(1) mockClient := historyservicemock.NewMockHistoryServiceClient(s.controller) - clientConn := clientConnection{ - historyClient: mockClient, - } - s.connections.EXPECT(). - getOrCreateClientConn(testAddr). - Return(clientConn) - s.connections.EXPECT(). - resetConnectBackoff(clientConn). - Times(1) + s.connections.client = mockClient - r := newCachingRedirector(s.connections, s.resolver, s.logger) + r := s.newCachingDirector(0) + defer r.stop() cli, err := r.clientForShardID(shardID) s.NoError(err) s.Equal(mockClient, cli) @@ -356,4 +278,56 @@ func (s *cachingRedirectorSuite) TestClientForTargetByShard() { cli, err = r.clientForShardID(shardID) s.NoError(err) s.Equal(mockClient, cli) + s.Equal(1, s.connections.resetCalls) +} + +func (s *cachingRedirectorSuite) TestStaleTTL() { + testAddr1 := rpcAddress("testaddr1") + shardID := int32(1) + mockClient := historyservicemock.NewMockHistoryServiceClient(s.controller) + s.connections.client = mockClient + + staleTTL := 500 * time.Millisecond + r := s.newCachingDirector(staleTTL) + defer r.stop() + + // Trigger the creation of a cache entry for the shard. + s.resolver.EXPECT(). + Lookup(convert.Int32ToString(shardID)). + Return(membership.NewHostInfoFromAddress(string(testAddr1)), nil). + Times(1) + + cli, err := r.clientForShardID(shardID) + s.NoError(err) + s.Equal(mockClient, cli) + s.Equal(1, s.connections.resetCalls) + + // Now simulate a membership update that changes the shard owner. + testAddr2 := rpcAddress("testaddr2") + s.resolver.EXPECT(). + Lookup(convert.Int32ToString(shardID)). + Return(membership.NewHostInfoFromAddress(string(testAddr2)), nil). + Times(1) + + // Simulate the update, should see the entry marked as stale. + r.membershipUpdateCh <- &membership.ChangedEvent{} + s.Eventually(func() bool { + r.mu.RLock() + defer r.mu.RUnlock() + entry := r.mu.cache[shardID] + return !entry.staleAt.IsZero() + }, 4*staleTTL, 10*time.Millisecond) + + // Wait for the stale TTL to expire so clientForShardID re-resolves the shard owner. + s.resolver.EXPECT(). + Lookup(convert.Int32ToString(shardID)). + Return(membership.NewHostInfoFromAddress(string(testAddr2)), nil). + Times(1) + + s.EventuallyWithT(func(t *assert.CollectT) { + cli, err = r.clientForShardID(shardID) + assert.NoError(t, err) + assert.Equal(t, mockClient, cli) + assert.Equal(t, 2, s.connections.resetCalls) + }, 4*staleTTL, 10*time.Millisecond) } diff --git a/client/history/client.go b/client/history/client.go index 92c7f1075bc..6e9d53232de 100644 --- a/client/history/client.go +++ b/client/history/client.go @@ -1,52 +1,27 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Generates all three generated files in this package: -//go:generate go run ../../cmd/tools/rpcwrappers -service history +//go:generate go run ../../cmd/tools/genrpcwrappers -service history package history import ( "context" - "fmt" + "math/rand" "sync" - "sync/atomic" "time" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - "go.temporal.io/api/serviceerror" - "go.temporal.io/server/api/historyservice/v1" replicationspb "go.temporal.io/server/api/replication/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/debug" "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/membership" + "go.temporal.io/server/common/tasktoken" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) var ( @@ -59,17 +34,12 @@ const ( ) type clientImpl struct { - connections connectionPool + connections connectionPool[historyservice.HistoryServiceClient] logger log.Logger numberOfShards int32 - redirector redirector + redirector Redirector[historyservice.HistoryServiceClient] timeout time.Duration - tokenSerializer common.TaskTokenSerializer - // shardIndex is incremented every time a shard-agnostic API is invoked. It is used to load balance requests - // across hosts by picking an essentially random host. We use an index here so that we don't need to inject any - // random number generator in order to make tests deterministic. We use a uint instead of an int because we - // don't want this to become negative if we ever overflow. - shardIndex atomic.Uint32 + tokenSerializer *tasktoken.Serializer } // NewClient creates a new history service gRPC client @@ -81,15 +51,20 @@ func NewClient( rpcFactory RPCFactory, timeout time.Duration, ) historyservice.HistoryServiceClient { - connections := newConnectionPool(historyServiceResolver, rpcFactory) + connections := NewConnectionPool(historyServiceResolver, rpcFactory, historyservice.NewHistoryServiceClient) - var redirector redirector - if dc.GetBoolProperty(dynamicconfig.HistoryClientOwnershipCachingEnabled, false)() { + var redirector Redirector[historyservice.HistoryServiceClient] + if dynamicconfig.HistoryClientOwnershipCachingEnabled.Get(dc)() { logger.Info("historyClient: ownership caching enabled") - redirector = newCachingRedirector(connections, historyServiceResolver, logger) + redirector = NewCachingRedirector( + connections, + historyServiceResolver, + logger, + dynamicconfig.HistoryClientOwnershipCachingStaleTTL.Get(dc), + ) } else { logger.Info("historyClient: ownership caching disabled") - redirector = newBasicRedirector(connections, historyServiceResolver) + redirector = NewBasicRedirector(connections, historyServiceResolver) } return &clientImpl{ @@ -98,10 +73,14 @@ func NewClient( numberOfShards: numberOfShards, redirector: redirector, timeout: timeout, - tokenSerializer: common.NewProtoTaskTokenSerializer(), + tokenSerializer: tasktoken.NewSerializer(), } } +func (c *clientImpl) DeepHealthCheck(ctx context.Context, request *historyservice.DeepHealthCheckRequest, opts ...grpc.CallOption) (*historyservice.DeepHealthCheckResponse, error) { + return c.connections.getOrCreateClientConn(rpcAddress(request.GetHostAddress())).grpcClient.DeepHealthCheck(ctx, request, opts...) +} + func (c *clientImpl) DescribeHistoryHost( ctx context.Context, request *historyservice.DescribeHistoryHostRequest, @@ -114,7 +93,7 @@ func (c *clientImpl) DescribeHistoryHost( shardID = c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) } else { clientConn := c.connections.getOrCreateClientConn(rpcAddress(request.GetHostAddress())) - return clientConn.historyClient.DescribeHistoryHost(ctx, request, opts...) + return clientConn.grpcClient.DescribeHistoryHost(ctx, request, opts...) } var response *historyservice.DescribeHistoryHostResponse @@ -208,7 +187,7 @@ func (c *clientImpl) GetReplicationStatus( var wg sync.WaitGroup wg.Add(len(clientConns)) for _, client := range clientConns { - historyClient := client.historyClient + historyClient := client.grpcClient go func(client historyservice.HistoryServiceClient) { defer wg.Done() resp, err := historyClient.GetReplicationStatus(ctx, request, opts...) @@ -239,103 +218,71 @@ func (c *clientImpl) GetReplicationStatus( return response, nil } -func (c *clientImpl) StreamWorkflowReplicationMessages( +func (c *clientImpl) RecordActivityTaskStarted( ctx context.Context, + request *historyservice.RecordActivityTaskStartedRequest, opts ...grpc.CallOption, -) (historyservice.HistoryService_StreamWorkflowReplicationMessagesClient, error) { - ctxMetadata, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, serviceerror.NewInvalidArgument("missing cluster & shard ID metadata") - } - _, targetClusterShardID, err := DecodeClusterShardMD(ctxMetadata) - if err != nil { - return nil, err - } - client, err := c.redirector.clientForShardID(targetClusterShardID.ShardID) - if err != nil { - return nil, err - } - return client.StreamWorkflowReplicationMessages( - metadata.NewOutgoingContext(ctx, ctxMetadata), - opts..., - ) -} +) (*historyservice.RecordActivityTaskStartedResponse, error) { + var shardID int32 -// GetDLQTasks doesn't need redirects or routing because DLQ tasks are not sharded, so it just picks any available host -// in the connection pool (or creates one) and forwards the request to it. -func (c *clientImpl) GetDLQTasks( - ctx context.Context, - in *historyservice.GetDLQTasksRequest, - opts ...grpc.CallOption, -) (*historyservice.GetDLQTasksResponse, error) { - historyClient, err := c.getAnyClient("GetDLQTasks") - if err != nil { - return nil, err + // For Chasm components we need to route the shard based on business ID. Note that shardIDFromWorkflowID simply + // calculates the hash from the ID so it works for both workflowID and businessID. + if len(request.GetComponentRef()) == 0 { + shardID = c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) + } else { + componentRef, err := c.tokenSerializer.DeserializeChasmComponentRef(request.GetComponentRef()) + if err != nil { + return nil, err + } + + shardID = c.shardIDFromWorkflowID(componentRef.GetNamespaceId(), componentRef.GetBusinessId()) } - return historyClient.GetDLQTasks(ctx, in, opts...) -} -func (c *clientImpl) DeleteDLQTasks( - ctx context.Context, - in *historyservice.DeleteDLQTasksRequest, - opts ...grpc.CallOption, -) (*historyservice.DeleteDLQTasksResponse, error) { - historyClient, err := c.getAnyClient("DeleteDLQTasks") - if err != nil { + var response *historyservice.RecordActivityTaskStartedResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.RecordActivityTaskStarted(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { return nil, err } - return historyClient.DeleteDLQTasks(ctx, in, opts...) + return response, nil } -func (c *clientImpl) ListQueues( +func (c *clientImpl) StreamWorkflowReplicationMessages( ctx context.Context, - in *historyservice.ListQueuesRequest, opts ...grpc.CallOption, -) (*historyservice.ListQueuesResponse, error) { - historyClient, err := c.getAnyClient("ListQueues") +) (historyservice.HistoryService_StreamWorkflowReplicationMessagesClient, error) { + ctxMetadata, ok := metadata.FromIncomingContext(ctx) + if !ok { + return nil, serviceerror.NewInvalidArgument("missing cluster & shard ID metadata") + } + _, targetClusterShardID, err := DecodeClusterShardMD(headers.NewGRPCHeaderGetter(ctx)) if err != nil { return nil, err } - return historyClient.ListQueues(ctx, in, opts...) -} -func (c *clientImpl) ListTasks( - ctx context.Context, - in *historyservice.ListTasksRequest, - opts ...grpc.CallOption, -) (*historyservice.ListTasksResponse, error) { - // Depth of the shardId field is 2 which is not supported by the rpcwrapper generator. - // Simply changing the maxDepth for ShardId field in the rpcwrapper generator will - // cause the generation logic for other methods to find more than one routing fields. - - shardID := in.Request.GetShardId() - var response *historyservice.ListTasksResponse + var streamClient historyservice.HistoryService_StreamWorkflowReplicationMessagesClient op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error - ctx, cancel := c.createContext(ctx) - defer cancel() - response, err = client.ListTasks(ctx, in, opts...) + streamClient, err = client.StreamWorkflowReplicationMessages( + metadata.NewOutgoingContext(ctx, ctxMetadata), + opts...) return err } - if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + if err := c.executeWithRedirect(ctx, targetClusterShardID.ShardID, op); err != nil { return nil, err } - return response, nil + return streamClient, nil } -// getAnyClient returns an arbitrary client by looking up a client by a sequentially increasing shard ID. This is useful -// for history APIs that are shard-agnostic (e.g. namespace or DLQ v2 APIs). -func (c *clientImpl) getAnyClient(apiName string) (historyservice.HistoryServiceClient, error) { - // Subtract 1 so that the first index is 0 because Add returns the new value. - shardIndex := c.shardIndex.Add(1) - 1 +// getRandomShard returns a random shard ID for history APIs that are shard-agnostic (e.g. namespace or DLQ v2 APIs). +func (c *clientImpl) getRandomShard() int32 { // Add 1 at the end because shard IDs are 1-indexed. - shardID := shardIndex%uint32(c.numberOfShards) + 1 - client, err := c.redirector.clientForShardID(int32(shardID)) - if err != nil { - msg := fmt.Sprintf("can't find history host to serve API: %q, err: %v", apiName, err) - return nil, serviceerror.NewUnavailable(msg) - } - return client, nil + return int32(rand.Intn(int(c.numberOfShards)) + 1) } func (c *clientImpl) createContext(parent context.Context) (context.Context, context.CancelFunc) { @@ -348,7 +295,7 @@ func (c *clientImpl) shardIDFromWorkflowID(namespaceID, workflowID string) int32 func checkShardID(shardID int32) error { if shardID <= 0 { - return serviceerror.NewInvalidArgument(fmt.Sprintf("Invalid ShardID: %d", shardID)) + return serviceerror.NewInvalidArgumentf("Invalid ShardID: %d", shardID) } return nil } @@ -356,7 +303,7 @@ func checkShardID(shardID int32) error { func (c *clientImpl) executeWithRedirect( ctx context.Context, shardID int32, - op clientOperation, + op ClientOperation[historyservice.HistoryServiceClient], ) error { - return c.redirector.execute(ctx, shardID, op) + return c.redirector.Execute(ctx, shardID, op) } diff --git a/client/history/client_gen.go b/client/history/client_gen.go index 0ab3e1cc14f..dc631c3c52a 100644 --- a/client/history/client_gen.go +++ b/client/history/client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package history @@ -54,6 +30,26 @@ func (c *clientImpl) AddTasks( return response, nil } +func (c *clientImpl) CancelNexusOperation( + ctx context.Context, + request *historyservice.CancelNexusOperationRequest, + opts ...grpc.CallOption, +) (*historyservice.CancelNexusOperationResponse, error) { + shardID := request.GetShardId() + var response *historyservice.CancelNexusOperationResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.CancelNexusOperation(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) CloseShard( ctx context.Context, request *historyservice.CloseShardRequest, @@ -74,12 +70,77 @@ func (c *clientImpl) CloseShard( return response, nil } +func (c *clientImpl) CompleteNexusOperation( + ctx context.Context, + request *historyservice.CompleteNexusOperationRequest, + opts ...grpc.CallOption, +) (*historyservice.CompleteNexusOperationResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetCompletion().GetNamespaceId(), request.GetCompletion().GetWorkflowId()) + var response *historyservice.CompleteNexusOperationResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.CompleteNexusOperation(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + +func (c *clientImpl) CompleteNexusOperationChasm( + ctx context.Context, + request *historyservice.CompleteNexusOperationChasmRequest, + opts ...grpc.CallOption, +) (*historyservice.CompleteNexusOperationChasmResponse, error) { + ref, err := c.tokenSerializer.DeserializeChasmComponentRef(request.GetCompletion().GetComponentRef()) + if err != nil { + return nil, serviceerror.NewInvalidArgument("error deserializing component ref") + } + shardID := c.shardIDFromWorkflowID(ref.GetNamespaceId(), ref.GetBusinessId()) + + var response *historyservice.CompleteNexusOperationChasmResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.CompleteNexusOperationChasm(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + +func (c *clientImpl) DeleteDLQTasks( + ctx context.Context, + request *historyservice.DeleteDLQTasksRequest, + opts ...grpc.CallOption, +) (*historyservice.DeleteDLQTasksResponse, error) { + shardID := c.getRandomShard() + var response *historyservice.DeleteDLQTasksResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.DeleteDLQTasks(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) DeleteWorkflowExecution( ctx context.Context, request *historyservice.DeleteWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.DeleteWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) var response *historyservice.DeleteWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -99,7 +160,7 @@ func (c *clientImpl) DeleteWorkflowVisibilityRecord( request *historyservice.DeleteWorkflowVisibilityRecordRequest, opts ...grpc.CallOption, ) (*historyservice.DeleteWorkflowVisibilityRecordResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.DeleteWorkflowVisibilityRecordResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -119,7 +180,7 @@ func (c *clientImpl) DescribeMutableState( request *historyservice.DescribeMutableStateRequest, opts ...grpc.CallOption, ) (*historyservice.DescribeMutableStateResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.DescribeMutableStateResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -139,7 +200,7 @@ func (c *clientImpl) DescribeWorkflowExecution( request *historyservice.DescribeWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.DescribeWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetExecution().GetWorkflowId()) var response *historyservice.DescribeWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -154,12 +215,32 @@ func (c *clientImpl) DescribeWorkflowExecution( return response, nil } +func (c *clientImpl) ExecuteMultiOperation( + ctx context.Context, + request *historyservice.ExecuteMultiOperationRequest, + opts ...grpc.CallOption, +) (*historyservice.ExecuteMultiOperationResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowId()) + var response *historyservice.ExecuteMultiOperationResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.ExecuteMultiOperation(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) ForceDeleteWorkflowExecution( ctx context.Context, request *historyservice.ForceDeleteWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.ForceDeleteWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetExecution().GetWorkflowId()) var response *historyservice.ForceDeleteWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -179,7 +260,7 @@ func (c *clientImpl) GenerateLastHistoryReplicationTasks( request *historyservice.GenerateLastHistoryReplicationTasksRequest, opts ...grpc.CallOption, ) (*historyservice.GenerateLastHistoryReplicationTasksResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.GenerateLastHistoryReplicationTasksResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -238,12 +319,32 @@ func (c *clientImpl) GetDLQReplicationMessages( return response, nil } +func (c *clientImpl) GetDLQTasks( + ctx context.Context, + request *historyservice.GetDLQTasksRequest, + opts ...grpc.CallOption, +) (*historyservice.GetDLQTasksResponse, error) { + shardID := c.getRandomShard() + var response *historyservice.GetDLQTasksResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.GetDLQTasks(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) GetMutableState( ctx context.Context, request *historyservice.GetMutableStateRequest, opts ...grpc.CallOption, ) (*historyservice.GetMutableStateResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.GetMutableStateResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -283,7 +384,7 @@ func (c *clientImpl) GetWorkflowExecutionHistory( request *historyservice.GetWorkflowExecutionHistoryRequest, opts ...grpc.CallOption, ) (*historyservice.GetWorkflowExecutionHistoryResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetExecution().GetWorkflowId()) var response *historyservice.GetWorkflowExecutionHistoryResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -303,7 +404,7 @@ func (c *clientImpl) GetWorkflowExecutionHistoryReverse( request *historyservice.GetWorkflowExecutionHistoryReverseRequest, opts ...grpc.CallOption, ) (*historyservice.GetWorkflowExecutionHistoryReverseResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetExecution().GetWorkflowId()) var response *historyservice.GetWorkflowExecutionHistoryReverseResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -323,7 +424,7 @@ func (c *clientImpl) GetWorkflowExecutionRawHistory( request *historyservice.GetWorkflowExecutionRawHistoryRequest, opts ...grpc.CallOption, ) (*historyservice.GetWorkflowExecutionRawHistoryResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetExecution().GetWorkflowId()) var response *historyservice.GetWorkflowExecutionRawHistoryResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -343,7 +444,7 @@ func (c *clientImpl) GetWorkflowExecutionRawHistoryV2( request *historyservice.GetWorkflowExecutionRawHistoryV2Request, opts ...grpc.CallOption, ) (*historyservice.GetWorkflowExecutionRawHistoryV2Response, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetExecution().GetWorkflowId()) var response *historyservice.GetWorkflowExecutionRawHistoryV2Response op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -363,7 +464,7 @@ func (c *clientImpl) ImportWorkflowExecution( request *historyservice.ImportWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.ImportWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.ImportWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -378,12 +479,32 @@ func (c *clientImpl) ImportWorkflowExecution( return response, nil } +func (c *clientImpl) InvokeStateMachineMethod( + ctx context.Context, + request *historyservice.InvokeStateMachineMethodRequest, + opts ...grpc.CallOption, +) (*historyservice.InvokeStateMachineMethodResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowId()) + var response *historyservice.InvokeStateMachineMethodResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.InvokeStateMachineMethod(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) IsActivityTaskValid( ctx context.Context, request *historyservice.IsActivityTaskValidRequest, opts ...grpc.CallOption, ) (*historyservice.IsActivityTaskValidResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.IsActivityTaskValidResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -403,7 +524,7 @@ func (c *clientImpl) IsWorkflowTaskValid( request *historyservice.IsWorkflowTaskValidRequest, opts ...grpc.CallOption, ) (*historyservice.IsWorkflowTaskValidResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.IsWorkflowTaskValidResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -418,6 +539,46 @@ func (c *clientImpl) IsWorkflowTaskValid( return response, nil } +func (c *clientImpl) ListQueues( + ctx context.Context, + request *historyservice.ListQueuesRequest, + opts ...grpc.CallOption, +) (*historyservice.ListQueuesResponse, error) { + shardID := c.getRandomShard() + var response *historyservice.ListQueuesResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.ListQueues(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + +func (c *clientImpl) ListTasks( + ctx context.Context, + request *historyservice.ListTasksRequest, + opts ...grpc.CallOption, +) (*historyservice.ListTasksResponse, error) { + shardID := request.GetRequest().GetShardId() + var response *historyservice.ListTasksResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.ListTasks(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) MergeDLQMessages( ctx context.Context, request *historyservice.MergeDLQMessagesRequest, @@ -438,12 +599,52 @@ func (c *clientImpl) MergeDLQMessages( return response, nil } +func (c *clientImpl) PauseActivity( + ctx context.Context, + request *historyservice.PauseActivityRequest, + opts ...grpc.CallOption, +) (*historyservice.PauseActivityResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetFrontendRequest().GetExecution().GetWorkflowId()) + var response *historyservice.PauseActivityResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.PauseActivity(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + +func (c *clientImpl) PauseWorkflowExecution( + ctx context.Context, + request *historyservice.PauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (*historyservice.PauseWorkflowExecutionResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetPauseRequest().GetWorkflowId()) + var response *historyservice.PauseWorkflowExecutionResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.PauseWorkflowExecution(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) PollMutableState( ctx context.Context, request *historyservice.PollMutableStateRequest, opts ...grpc.CallOption, ) (*historyservice.PollMutableStateResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.PollMutableStateResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -463,7 +664,7 @@ func (c *clientImpl) PollWorkflowExecutionUpdate( request *historyservice.PollWorkflowExecutionUpdateRequest, opts ...grpc.CallOption, ) (*historyservice.PollWorkflowExecutionUpdateResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetUpdateRef().GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetUpdateRef().GetWorkflowExecution().GetWorkflowId()) var response *historyservice.PollWorkflowExecutionUpdateResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -503,7 +704,7 @@ func (c *clientImpl) QueryWorkflow( request *historyservice.QueryWorkflowRequest, opts ...grpc.CallOption, ) (*historyservice.QueryWorkflowResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetExecution().GetWorkflowId()) var response *historyservice.QueryWorkflowResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -523,7 +724,7 @@ func (c *clientImpl) ReapplyEvents( request *historyservice.ReapplyEventsRequest, opts ...grpc.CallOption, ) (*historyservice.ReapplyEventsResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetWorkflowExecution().GetWorkflowId()) var response *historyservice.ReapplyEventsResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -543,7 +744,7 @@ func (c *clientImpl) RebuildMutableState( request *historyservice.RebuildMutableStateRequest, opts ...grpc.CallOption, ) (*historyservice.RebuildMutableStateResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.RebuildMutableStateResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -565,10 +766,23 @@ func (c *clientImpl) RecordActivityTaskHeartbeat( ) (*historyservice.RecordActivityTaskHeartbeatResponse, error) { taskToken, err := c.tokenSerializer.Deserialize(request.GetHeartbeatRequest().GetTaskToken()) if err != nil { - return nil, err - } - shardID := c.shardIDFromWorkflowID(request.NamespaceId, taskToken.GetWorkflowId()) - + return nil, serviceerror.NewInvalidArgument("error deserializing task token") + } + var namespaceID string + var businessID string + if len(taskToken.GetComponentRef()) > 0 { + ref, err := c.tokenSerializer.DeserializeChasmComponentRef(taskToken.GetComponentRef()) + if err != nil { + return nil, err + } + namespaceID = ref.GetNamespaceId() + businessID = ref.GetBusinessId() + } else { + namespaceID = request.GetNamespaceId() + businessID = taskToken.GetWorkflowId() + } + shardID := c.shardIDFromWorkflowID(namespaceID, businessID) + var response *historyservice.RecordActivityTaskHeartbeatResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -583,32 +797,12 @@ func (c *clientImpl) RecordActivityTaskHeartbeat( return response, nil } -func (c *clientImpl) RecordActivityTaskStarted( - ctx context.Context, - request *historyservice.RecordActivityTaskStartedRequest, - opts ...grpc.CallOption, -) (*historyservice.RecordActivityTaskStartedResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowExecution().GetWorkflowId()) - var response *historyservice.RecordActivityTaskStartedResponse - op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { - var err error - ctx, cancel := c.createContext(ctx) - defer cancel() - response, err = client.RecordActivityTaskStarted(ctx, request, opts...) - return err - } - if err := c.executeWithRedirect(ctx, shardID, op); err != nil { - return nil, err - } - return response, nil -} - func (c *clientImpl) RecordChildExecutionCompleted( ctx context.Context, request *historyservice.RecordChildExecutionCompletedRequest, opts ...grpc.CallOption, ) (*historyservice.RecordChildExecutionCompletedResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetParentExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetParentExecution().GetWorkflowId()) var response *historyservice.RecordChildExecutionCompletedResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -628,7 +822,7 @@ func (c *clientImpl) RecordWorkflowTaskStarted( request *historyservice.RecordWorkflowTaskStartedRequest, opts ...grpc.CallOption, ) (*historyservice.RecordWorkflowTaskStartedResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) var response *historyservice.RecordWorkflowTaskStartedResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -648,7 +842,7 @@ func (c *clientImpl) RefreshWorkflowTasks( request *historyservice.RefreshWorkflowTasksRequest, opts ...grpc.CallOption, ) (*historyservice.RefreshWorkflowTasksResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetExecution().GetWorkflowId()) var response *historyservice.RefreshWorkflowTasksResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -668,7 +862,7 @@ func (c *clientImpl) RemoveSignalMutableState( request *historyservice.RemoveSignalMutableStateRequest, opts ...grpc.CallOption, ) (*historyservice.RemoveSignalMutableStateResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) var response *historyservice.RemoveSignalMutableStateResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -708,7 +902,7 @@ func (c *clientImpl) ReplicateEventsV2( request *historyservice.ReplicateEventsV2Request, opts ...grpc.CallOption, ) (*historyservice.ReplicateEventsV2Response, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) var response *historyservice.ReplicateEventsV2Response op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -728,7 +922,7 @@ func (c *clientImpl) ReplicateWorkflowState( request *historyservice.ReplicateWorkflowStateRequest, opts ...grpc.CallOption, ) (*historyservice.ReplicateWorkflowStateResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowState().GetExecutionInfo().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowState().GetExecutionInfo().GetWorkflowId()) var response *historyservice.ReplicateWorkflowStateResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -748,7 +942,7 @@ func (c *clientImpl) RequestCancelWorkflowExecution( request *historyservice.RequestCancelWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.RequestCancelWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetCancelRequest().GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetCancelRequest().GetWorkflowExecution().GetWorkflowId()) var response *historyservice.RequestCancelWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -763,12 +957,32 @@ func (c *clientImpl) RequestCancelWorkflowExecution( return response, nil } +func (c *clientImpl) ResetActivity( + ctx context.Context, + request *historyservice.ResetActivityRequest, + opts ...grpc.CallOption, +) (*historyservice.ResetActivityResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetFrontendRequest().GetExecution().GetWorkflowId()) + var response *historyservice.ResetActivityResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.ResetActivity(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) ResetStickyTaskQueue( ctx context.Context, request *historyservice.ResetStickyTaskQueueRequest, opts ...grpc.CallOption, ) (*historyservice.ResetStickyTaskQueueResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) var response *historyservice.ResetStickyTaskQueueResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -788,7 +1002,7 @@ func (c *clientImpl) ResetWorkflowExecution( request *historyservice.ResetWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.ResetWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetResetRequest().GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetResetRequest().GetWorkflowExecution().GetWorkflowId()) var response *historyservice.ResetWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -810,10 +1024,23 @@ func (c *clientImpl) RespondActivityTaskCanceled( ) (*historyservice.RespondActivityTaskCanceledResponse, error) { taskToken, err := c.tokenSerializer.Deserialize(request.GetCancelRequest().GetTaskToken()) if err != nil { - return nil, err - } - shardID := c.shardIDFromWorkflowID(request.NamespaceId, taskToken.GetWorkflowId()) - + return nil, serviceerror.NewInvalidArgument("error deserializing task token") + } + var namespaceID string + var businessID string + if len(taskToken.GetComponentRef()) > 0 { + ref, err := c.tokenSerializer.DeserializeChasmComponentRef(taskToken.GetComponentRef()) + if err != nil { + return nil, err + } + namespaceID = ref.GetNamespaceId() + businessID = ref.GetBusinessId() + } else { + namespaceID = request.GetNamespaceId() + businessID = taskToken.GetWorkflowId() + } + shardID := c.shardIDFromWorkflowID(namespaceID, businessID) + var response *historyservice.RespondActivityTaskCanceledResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -835,10 +1062,23 @@ func (c *clientImpl) RespondActivityTaskCompleted( ) (*historyservice.RespondActivityTaskCompletedResponse, error) { taskToken, err := c.tokenSerializer.Deserialize(request.GetCompleteRequest().GetTaskToken()) if err != nil { - return nil, err - } - shardID := c.shardIDFromWorkflowID(request.NamespaceId, taskToken.GetWorkflowId()) - + return nil, serviceerror.NewInvalidArgument("error deserializing task token") + } + var namespaceID string + var businessID string + if len(taskToken.GetComponentRef()) > 0 { + ref, err := c.tokenSerializer.DeserializeChasmComponentRef(taskToken.GetComponentRef()) + if err != nil { + return nil, err + } + namespaceID = ref.GetNamespaceId() + businessID = ref.GetBusinessId() + } else { + namespaceID = request.GetNamespaceId() + businessID = taskToken.GetWorkflowId() + } + shardID := c.shardIDFromWorkflowID(namespaceID, businessID) + var response *historyservice.RespondActivityTaskCompletedResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -860,10 +1100,23 @@ func (c *clientImpl) RespondActivityTaskFailed( ) (*historyservice.RespondActivityTaskFailedResponse, error) { taskToken, err := c.tokenSerializer.Deserialize(request.GetFailedRequest().GetTaskToken()) if err != nil { - return nil, err - } - shardID := c.shardIDFromWorkflowID(request.NamespaceId, taskToken.GetWorkflowId()) - + return nil, serviceerror.NewInvalidArgument("error deserializing task token") + } + var namespaceID string + var businessID string + if len(taskToken.GetComponentRef()) > 0 { + ref, err := c.tokenSerializer.DeserializeChasmComponentRef(taskToken.GetComponentRef()) + if err != nil { + return nil, err + } + namespaceID = ref.GetNamespaceId() + businessID = ref.GetBusinessId() + } else { + namespaceID = request.GetNamespaceId() + businessID = taskToken.GetWorkflowId() + } + shardID := c.shardIDFromWorkflowID(namespaceID, businessID) + var response *historyservice.RespondActivityTaskFailedResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -885,10 +1138,23 @@ func (c *clientImpl) RespondWorkflowTaskCompleted( ) (*historyservice.RespondWorkflowTaskCompletedResponse, error) { taskToken, err := c.tokenSerializer.Deserialize(request.GetCompleteRequest().GetTaskToken()) if err != nil { - return nil, err - } - shardID := c.shardIDFromWorkflowID(request.NamespaceId, taskToken.GetWorkflowId()) - + return nil, serviceerror.NewInvalidArgument("error deserializing task token") + } + var namespaceID string + var businessID string + if len(taskToken.GetComponentRef()) > 0 { + ref, err := c.tokenSerializer.DeserializeChasmComponentRef(taskToken.GetComponentRef()) + if err != nil { + return nil, err + } + namespaceID = ref.GetNamespaceId() + businessID = ref.GetBusinessId() + } else { + namespaceID = request.GetNamespaceId() + businessID = taskToken.GetWorkflowId() + } + shardID := c.shardIDFromWorkflowID(namespaceID, businessID) + var response *historyservice.RespondWorkflowTaskCompletedResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -910,10 +1176,23 @@ func (c *clientImpl) RespondWorkflowTaskFailed( ) (*historyservice.RespondWorkflowTaskFailedResponse, error) { taskToken, err := c.tokenSerializer.Deserialize(request.GetFailedRequest().GetTaskToken()) if err != nil { - return nil, err - } - shardID := c.shardIDFromWorkflowID(request.NamespaceId, taskToken.GetWorkflowId()) - + return nil, serviceerror.NewInvalidArgument("error deserializing task token") + } + var namespaceID string + var businessID string + if len(taskToken.GetComponentRef()) > 0 { + ref, err := c.tokenSerializer.DeserializeChasmComponentRef(taskToken.GetComponentRef()) + if err != nil { + return nil, err + } + namespaceID = ref.GetNamespaceId() + businessID = ref.GetBusinessId() + } else { + namespaceID = request.GetNamespaceId() + businessID = taskToken.GetWorkflowId() + } + shardID := c.shardIDFromWorkflowID(namespaceID, businessID) + var response *historyservice.RespondWorkflowTaskFailedResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -933,7 +1212,7 @@ func (c *clientImpl) ScheduleWorkflowTask( request *historyservice.ScheduleWorkflowTaskRequest, opts ...grpc.CallOption, ) (*historyservice.ScheduleWorkflowTaskResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) var response *historyservice.ScheduleWorkflowTaskResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -953,7 +1232,7 @@ func (c *clientImpl) SignalWithStartWorkflowExecution( request *historyservice.SignalWithStartWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.SignalWithStartWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetSignalWithStartRequest().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetSignalWithStartRequest().GetWorkflowId()) var response *historyservice.SignalWithStartWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -973,7 +1252,7 @@ func (c *clientImpl) SignalWorkflowExecution( request *historyservice.SignalWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.SignalWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetSignalRequest().GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetSignalRequest().GetWorkflowExecution().GetWorkflowId()) var response *historyservice.SignalWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -988,12 +1267,32 @@ func (c *clientImpl) SignalWorkflowExecution( return response, nil } +func (c *clientImpl) StartNexusOperation( + ctx context.Context, + request *historyservice.StartNexusOperationRequest, + opts ...grpc.CallOption, +) (*historyservice.StartNexusOperationResponse, error) { + shardID := request.GetShardId() + var response *historyservice.StartNexusOperationResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.StartNexusOperation(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) StartWorkflowExecution( ctx context.Context, request *historyservice.StartWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.StartWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetStartRequest().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetStartRequest().GetWorkflowId()) var response *historyservice.StartWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -1013,7 +1312,7 @@ func (c *clientImpl) SyncActivity( request *historyservice.SyncActivityRequest, opts ...grpc.CallOption, ) (*historyservice.SyncActivityResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowId()) var response *historyservice.SyncActivityResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -1048,12 +1347,32 @@ func (c *clientImpl) SyncShardStatus( return response, nil } +func (c *clientImpl) SyncWorkflowState( + ctx context.Context, + request *historyservice.SyncWorkflowStateRequest, + opts ...grpc.CallOption, +) (*historyservice.SyncWorkflowStateResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetExecution().GetWorkflowId()) + var response *historyservice.SyncWorkflowStateResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.SyncWorkflowState(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) TerminateWorkflowExecution( ctx context.Context, request *historyservice.TerminateWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.TerminateWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetTerminateRequest().GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetTerminateRequest().GetWorkflowExecution().GetWorkflowId()) var response *historyservice.TerminateWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -1068,12 +1387,72 @@ func (c *clientImpl) TerminateWorkflowExecution( return response, nil } +func (c *clientImpl) UnpauseActivity( + ctx context.Context, + request *historyservice.UnpauseActivityRequest, + opts ...grpc.CallOption, +) (*historyservice.UnpauseActivityResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetFrontendRequest().GetExecution().GetWorkflowId()) + var response *historyservice.UnpauseActivityResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.UnpauseActivity(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + +func (c *clientImpl) UnpauseWorkflowExecution( + ctx context.Context, + request *historyservice.UnpauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (*historyservice.UnpauseWorkflowExecutionResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetUnpauseRequest().GetWorkflowId()) + var response *historyservice.UnpauseWorkflowExecutionResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.UnpauseWorkflowExecution(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + +func (c *clientImpl) UpdateActivityOptions( + ctx context.Context, + request *historyservice.UpdateActivityOptionsRequest, + opts ...grpc.CallOption, +) (*historyservice.UpdateActivityOptionsResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetUpdateRequest().GetExecution().GetWorkflowId()) + var response *historyservice.UpdateActivityOptionsResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.UpdateActivityOptions(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) UpdateWorkflowExecution( ctx context.Context, request *historyservice.UpdateWorkflowExecutionRequest, opts ...grpc.CallOption, ) (*historyservice.UpdateWorkflowExecutionResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetRequest().GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetRequest().GetWorkflowExecution().GetWorkflowId()) var response *historyservice.UpdateWorkflowExecutionResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -1088,12 +1467,32 @@ func (c *clientImpl) UpdateWorkflowExecution( return response, nil } +func (c *clientImpl) UpdateWorkflowExecutionOptions( + ctx context.Context, + request *historyservice.UpdateWorkflowExecutionOptionsRequest, + opts ...grpc.CallOption, +) (*historyservice.UpdateWorkflowExecutionOptionsResponse, error) { + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetUpdateRequest().GetWorkflowExecution().GetWorkflowId()) + var response *historyservice.UpdateWorkflowExecutionOptionsResponse + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.UpdateWorkflowExecutionOptions(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} + func (c *clientImpl) VerifyChildExecutionCompletionRecorded( ctx context.Context, request *historyservice.VerifyChildExecutionCompletionRecordedRequest, opts ...grpc.CallOption, ) (*historyservice.VerifyChildExecutionCompletionRecordedResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetParentExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetParentExecution().GetWorkflowId()) var response *historyservice.VerifyChildExecutionCompletionRecordedResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error @@ -1113,7 +1512,7 @@ func (c *clientImpl) VerifyFirstWorkflowTaskScheduled( request *historyservice.VerifyFirstWorkflowTaskScheduledRequest, opts ...grpc.CallOption, ) (*historyservice.VerifyFirstWorkflowTaskScheduledResponse, error) { - shardID := c.shardIDFromWorkflowID(request.NamespaceId, request.GetWorkflowExecution().GetWorkflowId()) + shardID := c.shardIDFromWorkflowID(request.GetNamespaceId(), request.GetWorkflowExecution().GetWorkflowId()) var response *historyservice.VerifyFirstWorkflowTaskScheduledResponse op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { var err error diff --git a/client/history/client_test.go b/client/history/client_test.go index 1feaa33567e..be8ee24891f 100644 --- a/client/history/client_test.go +++ b/client/history/client_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history_test import ( @@ -29,16 +5,15 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.temporal.io/api/serviceerror" "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/client/history" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/membership" - "go.temporal.io/server/internal/nettest" + "go.temporal.io/server/common/testing/nettest" + "go.uber.org/mock/gomock" "google.golang.org/grpc" ) @@ -57,7 +32,7 @@ func TestErrLookup(t *testing.T) { ctrl := gomock.NewController(t) serviceResolver := membership.NewMockServiceResolver(ctrl) - serviceResolver.EXPECT().Lookup("1").Return(nil, assert.AnError).AnyTimes() + serviceResolver.EXPECT().Lookup(gomock.Any()).Return(nil, membership.ErrInsufficientHosts).AnyTimes() client := history.NewClient( dynamicconfig.NewNoopCollection(), serviceResolver, @@ -86,21 +61,17 @@ func TestErrLookup(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() err := tc.fn() - var unavailableErr *serviceerror.Unavailable - require.ErrorAs(t, err, &unavailableErr, "Should return an 'Unavailable' error when there "+ - "are no history hosts available to serve the request") - assert.ErrorContains(t, err, assert.AnError.Error()) + require.ErrorIs(t, err, membership.ErrInsufficientHosts) }) } } // This tests our strategy for getting history hosts to serve shard-agnostic requests, like those interacting with the -// DLQ. For such requests, we should round-robin over all available history shards. In addition, we should re-use any -// available connections when the round-robin wraps around. +// DLQ. For such requests, we should route to a random history shard. In addition, we should re-use any available +// connections if we hit the same host. func TestShardAgnosticConnectionStrategy(t *testing.T) { t.Parallel() @@ -123,7 +94,6 @@ func TestShardAgnosticConnectionStrategy(t *testing.T) { }, }, } { - tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() ctrl := gomock.NewController(t) @@ -131,9 +101,9 @@ func TestShardAgnosticConnectionStrategy(t *testing.T) { // Create a service resolver that just returns 2 hosts for the first 3 requests. We want to send 3 requests // with 2 hosts so that we can verify that we re-use the connection of "test1" on the last request. serviceResolver := membership.NewMockServiceResolver(ctrl) - serviceResolver.EXPECT().Lookup("1").Return(membership.NewHostInfoFromAddress("test1"), nil) - serviceResolver.EXPECT().Lookup("2").Return(membership.NewHostInfoFromAddress("test2"), nil) - serviceResolver.EXPECT().Lookup("1").Return(membership.NewHostInfoFromAddress("test1"), nil) + serviceResolver.EXPECT().Lookup(gomock.Any()).Return(membership.NewHostInfoFromAddress("localhost"), nil) + serviceResolver.EXPECT().Lookup(gomock.Any()).Return(membership.NewHostInfoFromAddress("127.0.0.1"), nil) + serviceResolver.EXPECT().Lookup(gomock.Any()).Return(membership.NewHostInfoFromAddress("localhost"), nil) // Create an in-memory gRPC server. listener := nettest.NewListener(nettest.NewPipe()) @@ -159,9 +129,9 @@ func TestShardAgnosticConnectionStrategy(t *testing.T) { log.NewTestLogger(), 2, rpcFactory, - time.Duration(0), + time.Second, ) - for i := 0; i < 3; i++ { + for range 3 { err := tc.fn(client) require.NoError(t, err) } @@ -169,7 +139,7 @@ func TestShardAgnosticConnectionStrategy(t *testing.T) { // Verify that there are no repeated dialed addresses (indicating that we re-used the connection). assert.Equal( t, - []string{"test1", "test2"}, + []string{"localhost", "127.0.0.1"}, rpcFactory.dialedAddresses, "Should cache the client connection and reuse it for subsequent requests", ) @@ -191,7 +161,7 @@ func (s *testHistoryService) DeleteDLQTasks( return &historyservice.DeleteDLQTasksResponse{}, nil } -func (t *testRPCFactory) CreateInternodeGRPCConnection(rpcAddress string) *grpc.ClientConn { +func (t *testRPCFactory) CreateHistoryGRPCConnection(rpcAddress string) *grpc.ClientConn { t.dialedAddresses = append(t.dialedAddresses, rpcAddress) - return t.base.CreateInternodeGRPCConnection(rpcAddress) + return t.base.CreateHistoryGRPCConnection(rpcAddress) } diff --git a/client/history/connections.go b/client/history/connections.go index 57a78c24998..1ef2e712043 100644 --- a/client/history/connections.go +++ b/client/history/connections.go @@ -1,83 +1,58 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination connections_mock.go - package history import ( "sync" - "google.golang.org/grpc" - - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common/membership" + "google.golang.org/grpc" ) type ( - clientConnection struct { - historyClient historyservice.HistoryServiceClient - grpcConn *grpc.ClientConn + clientConnection[C any] struct { + grpcClient C + grpcConn *grpc.ClientConn } rpcAddress string - connectionPoolImpl struct { + connectionPoolImpl[C any] struct { mu struct { sync.RWMutex - conns map[rpcAddress]clientConnection + conns map[rpcAddress]clientConnection[C] } historyServiceResolver membership.ServiceResolver rpcFactory RPCFactory + clientCtor func(grpc.ClientConnInterface) C } // RPCFactory is a subset of the [go.temporal.io/server/common/rpc.RPCFactory] interface to make testing easier. RPCFactory interface { - CreateInternodeGRPCConnection(rpcAddress string) *grpc.ClientConn + CreateHistoryGRPCConnection(rpcAddress string) *grpc.ClientConn } - connectionPool interface { - getOrCreateClientConn(addr rpcAddress) clientConnection - getAllClientConns() []clientConnection - resetConnectBackoff(clientConnection) + connectionPool[C any] interface { + getOrCreateClientConn(addr rpcAddress) clientConnection[C] + getAllClientConns() []clientConnection[C] + resetConnectBackoff(clientConnection[C]) } ) -func newConnectionPool( +func NewConnectionPool[C any]( historyServiceResolver membership.ServiceResolver, rpcFactory RPCFactory, -) *connectionPoolImpl { - c := &connectionPoolImpl{ + clientCtor func(grpc.ClientConnInterface) C, +) *connectionPoolImpl[C] { + c := &connectionPoolImpl[C]{ historyServiceResolver: historyServiceResolver, rpcFactory: rpcFactory, + clientCtor: clientCtor, } - c.mu.conns = make(map[rpcAddress]clientConnection) + c.mu.conns = make(map[rpcAddress]clientConnection[C]) return c } -func (c *connectionPoolImpl) getOrCreateClientConn(addr rpcAddress) clientConnection { +func (c *connectionPoolImpl[C]) getOrCreateClientConn(addr rpcAddress) clientConnection[C] { c.mu.RLock() cc, ok := c.mu.conns[addr] c.mu.RUnlock() @@ -91,21 +66,20 @@ func (c *connectionPoolImpl) getOrCreateClientConn(addr rpcAddress) clientConnec if cc, ok = c.mu.conns[addr]; ok { return cc } - - grpcConn := c.rpcFactory.CreateInternodeGRPCConnection(string(addr)) - cc = clientConnection{ - historyClient: historyservice.NewHistoryServiceClient(grpcConn), - grpcConn: grpcConn, + grpcConn := c.rpcFactory.CreateHistoryGRPCConnection(string(addr)) + cc = clientConnection[C]{ + grpcClient: c.clientCtor(grpcConn), + grpcConn: grpcConn, } c.mu.conns[addr] = cc return cc } -func (c *connectionPoolImpl) getAllClientConns() []clientConnection { +func (c *connectionPoolImpl[C]) getAllClientConns() []clientConnection[C] { hostInfos := c.historyServiceResolver.Members() - var clientConns []clientConnection + var clientConns []clientConnection[C] for _, hostInfo := range hostInfos { cc := c.getOrCreateClientConn(rpcAddress(hostInfo.GetAddress())) clientConns = append(clientConns, cc) @@ -114,6 +88,6 @@ func (c *connectionPoolImpl) getAllClientConns() []clientConnection { return clientConns } -func (c *connectionPoolImpl) resetConnectBackoff(cc clientConnection) { +func (c *connectionPoolImpl[C]) resetConnectBackoff(cc clientConnection[C]) { cc.grpcConn.ResetConnectBackoff() } diff --git a/client/history/connections_mock.go b/client/history/connections_mock.go deleted file mode 100644 index e9a88e1220b..00000000000 --- a/client/history/connections_mock.go +++ /dev/null @@ -1,136 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: connections.go - -// Package history is a generated GoMock package. -package history - -import ( - reflect "reflect" - - gomock "github.com/golang/mock/gomock" - grpc "google.golang.org/grpc" -) - -// MockRPCFactory is a mock of RPCFactory interface. -type MockRPCFactory struct { - ctrl *gomock.Controller - recorder *MockRPCFactoryMockRecorder -} - -// MockRPCFactoryMockRecorder is the mock recorder for MockRPCFactory. -type MockRPCFactoryMockRecorder struct { - mock *MockRPCFactory -} - -// NewMockRPCFactory creates a new mock instance. -func NewMockRPCFactory(ctrl *gomock.Controller) *MockRPCFactory { - mock := &MockRPCFactory{ctrl: ctrl} - mock.recorder = &MockRPCFactoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRPCFactory) EXPECT() *MockRPCFactoryMockRecorder { - return m.recorder -} - -// CreateInternodeGRPCConnection mocks base method. -func (m *MockRPCFactory) CreateInternodeGRPCConnection(rpcAddress string) *grpc.ClientConn { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateInternodeGRPCConnection", rpcAddress) - ret0, _ := ret[0].(*grpc.ClientConn) - return ret0 -} - -// CreateInternodeGRPCConnection indicates an expected call of CreateInternodeGRPCConnection. -func (mr *MockRPCFactoryMockRecorder) CreateInternodeGRPCConnection(rpcAddress interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateInternodeGRPCConnection", reflect.TypeOf((*MockRPCFactory)(nil).CreateInternodeGRPCConnection), rpcAddress) -} - -// MockconnectionPool is a mock of connectionPool interface. -type MockconnectionPool struct { - ctrl *gomock.Controller - recorder *MockconnectionPoolMockRecorder -} - -// MockconnectionPoolMockRecorder is the mock recorder for MockconnectionPool. -type MockconnectionPoolMockRecorder struct { - mock *MockconnectionPool -} - -// NewMockconnectionPool creates a new mock instance. -func NewMockconnectionPool(ctrl *gomock.Controller) *MockconnectionPool { - mock := &MockconnectionPool{ctrl: ctrl} - mock.recorder = &MockconnectionPoolMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockconnectionPool) EXPECT() *MockconnectionPoolMockRecorder { - return m.recorder -} - -// getAllClientConns mocks base method. -func (m *MockconnectionPool) getAllClientConns() []clientConnection { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getAllClientConns") - ret0, _ := ret[0].([]clientConnection) - return ret0 -} - -// getAllClientConns indicates an expected call of getAllClientConns. -func (mr *MockconnectionPoolMockRecorder) getAllClientConns() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getAllClientConns", reflect.TypeOf((*MockconnectionPool)(nil).getAllClientConns)) -} - -// getOrCreateClientConn mocks base method. -func (m *MockconnectionPool) getOrCreateClientConn(addr rpcAddress) clientConnection { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "getOrCreateClientConn", addr) - ret0, _ := ret[0].(clientConnection) - return ret0 -} - -// getOrCreateClientConn indicates an expected call of getOrCreateClientConn. -func (mr *MockconnectionPoolMockRecorder) getOrCreateClientConn(addr interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getOrCreateClientConn", reflect.TypeOf((*MockconnectionPool)(nil).getOrCreateClientConn), addr) -} - -// resetConnectBackoff mocks base method. -func (m *MockconnectionPool) resetConnectBackoff(arg0 clientConnection) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "resetConnectBackoff", arg0) -} - -// resetConnectBackoff indicates an expected call of resetConnectBackoff. -func (mr *MockconnectionPoolMockRecorder) resetConnectBackoff(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "resetConnectBackoff", reflect.TypeOf((*MockconnectionPool)(nil).resetConnectBackoff), arg0) -} diff --git a/client/history/historytest/clienttest.go b/client/history/historytest/clienttest.go index 8af81acfd11..de9cb8990a3 100644 --- a/client/history/historytest/clienttest.go +++ b/client/history/historytest/clienttest.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Package historytest contains library test functions for [history.NewClient] that use ahistory task queue manager. // These are not test functions themselves because we construct database clients in another package, which will in turn // call this function, but we don't want to put the testing logic there because it's not specific to any database, but @@ -33,24 +9,24 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/embedded" - "google.golang.org/grpc" - commonspb "go.temporal.io/server/api/common/v1" "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/chasm" "go.temporal.io/server/client/history" "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/membership" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/persistencetest" - "go.temporal.io/server/internal/nettest" + "go.temporal.io/server/common/testing/nettest" historyserver "go.temporal.io/server/service/history" "go.temporal.io/server/service/history/tasks" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" ) // fakeTracerProvider is needed to construct a [historyserver.Handler] object. @@ -62,6 +38,8 @@ func (f fakeTracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer { return nil } +var _ trace.TracerProvider = (*fakeTracerProvider)(nil) + // TestClient works by doing the following: // 1. Enqueue some tasks // 2. Start a server which serves the DLQ endpoints @@ -139,7 +117,7 @@ func readTasks( // We want to run a test where the client makes multiple requests to the server because the client is stateful. In // particular, the first request here should establish a connection, and the next one should reuse that connection. - for i := 0; i < numTasks; i++ { + for i := range numTasks { res, err := client.GetDLQTasks(context.Background(), &historyservice.GetDLQTasksRequest{ DlqKey: &commonspb.HistoryDLQKey{ TaskCategory: int32(tasks.CategoryTransfer.ID()), @@ -158,11 +136,15 @@ func readTasks( func createServer(historyTaskQueueManager persistence.HistoryTaskQueueManager) *grpc.Server { // TODO: find a better way to create a history handler - historyHandler := historyserver.HandlerProvider(historyserver.NewHandlerArgs{ + historyHandler, err := historyserver.HandlerProvider(historyserver.NewHandlerArgs{ TaskQueueManager: historyTaskQueueManager, TracerProvider: fakeTracerProvider{}, TaskCategoryRegistry: tasks.NewDefaultTaskCategoryRegistry(), + ChasmRegistry: chasm.NewRegistry(log.NewNoopLogger()), }) + if err != nil { + panic(err) // nolint:forbidigo // Panic is acceptable in test setup code. + } grpcServer := grpc.NewServer() historyservice.RegisterHistoryServiceServer(grpcServer, historyHandler) return grpcServer @@ -182,7 +164,7 @@ func createClient(ctrl *gomock.Controller, listener *nettest.PipeListener) histo log.NewTestLogger(), 1, rpcFactory, - time.Duration(0), + time.Second, ) return client } @@ -199,7 +181,7 @@ func enqueueTasks( task := &tasks.WorkflowTask{ TaskID: 42, } - for i := 0; i < numTasks; i++ { + for range numTasks { _, err := historyTaskQueueManager.EnqueueTask(context.Background(), &persistence.EnqueueTaskRequest{ QueueType: persistence.QueueTypeHistoryDLQ, SourceCluster: sourceCluster, diff --git a/client/history/metadata.go b/client/history/metadata.go index 3c98126a427..d09f6a0dc5f 100644 --- a/client/history/metadata.go +++ b/client/history/metadata.go @@ -1,34 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history import ( - "fmt" "strconv" "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/headers" "google.golang.org/grpc/metadata" ) @@ -59,55 +35,35 @@ func EncodeClusterShardMD( } func DecodeClusterShardMD( - clusterShardMD metadata.MD, -) (_ ClusterShardID, _ ClusterShardID, _ error) { - var clientClusterShardID ClusterShardID - var serverClusterShardID ClusterShardID - - clientClusterID, err := parseInt32(clusterShardMD, MetadataKeyClientClusterID) - if err != nil { - return clientClusterShardID, serverClusterShardID, err + getter headers.HeaderGetter, +) (client ClusterShardID, server ClusterShardID, err error) { + if client.ClusterID, err = parseInt32(getter, MetadataKeyClientClusterID); err != nil { + return + } else if client.ShardID, err = parseInt32(getter, MetadataKeyClientShardID); err != nil { + return + } else if server.ClusterID, err = parseInt32(getter, MetadataKeyServerClusterID); err != nil { + return + } else if server.ShardID, err = parseInt32(getter, MetadataKeyServerShardID); err != nil { + return } - clientShardID, err := parseInt32(clusterShardMD, MetadataKeyClientShardID) - if err != nil { - return clientClusterShardID, serverClusterShardID, err - } - clientClusterShardID.ClusterID = clientClusterID - clientClusterShardID.ShardID = clientShardID - - serverClusterID, err := parseInt32(clusterShardMD, MetadataKeyServerClusterID) - if err != nil { - return clientClusterShardID, serverClusterShardID, err - } - serverShardID, err := parseInt32(clusterShardMD, MetadataKeyServerShardID) - if err != nil { - return clientClusterShardID, serverClusterShardID, err - } - serverClusterShardID.ClusterID = serverClusterID - serverClusterShardID.ShardID = serverShardID - - return clientClusterShardID, serverClusterShardID, nil + return } func parseInt32( - clusterShardMD metadata.MD, + getter headers.HeaderGetter, metadataKey string, ) (int32, error) { - metadataValues := clusterShardMD.Get(metadataKey) - if len(metadataValues) != 1 { - return 0, serviceerror.NewInvalidArgument(fmt.Sprintf( - "unable to parse metadata key %v: %v", - metadataKey, - metadataValues, - )) + stringValue := getter.Get(metadataKey) + if stringValue == "" { + return 0, serviceerror.NewInvalidArgument("missing cluster & shard ID metadata") } - metadataValue, err := strconv.Atoi(metadataValues[0]) + metadataValue, err := strconv.Atoi(stringValue) if err != nil { - return 0, serviceerror.NewInvalidArgument(fmt.Sprintf( + return 0, serviceerror.NewInvalidArgumentf( "unable to parse metadata key %v: %v", metadataKey, - metadataValues, - )) + err, + ) } return int32(metadataValue), nil } diff --git a/client/history/metadata_test.go b/client/history/metadata_test.go index b65d7a10fa9..b1964780c53 100644 --- a/client/history/metadata_test.go +++ b/client/history/metadata_test.go @@ -1,38 +1,16 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history import ( + "context" "math/rand" "strconv" "testing" - "github.com/golang/mock/gomock" "github.com/google/uuid" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "go.temporal.io/server/common/headers" + "go.uber.org/mock/gomock" "google.golang.org/grpc/metadata" ) @@ -81,7 +59,9 @@ func (s *metadataSuite) TestClusterShardMD_Encode_Decode() { clientClusterShardID, serverClusterShardID, ) - actualClientClusterShardID, actualServerClusterShardID, err := DecodeClusterShardMD(clusterShardMD) + ctx := metadata.NewIncomingContext(context.Background(), clusterShardMD) + getter := headers.NewGRPCHeaderGetter(ctx) + actualClientClusterShardID, actualServerClusterShardID, err := DecodeClusterShardMD(getter) s.NoError(err) s.Equal(clientClusterShardID, actualClientClusterShardID) s.Equal(serverClusterShardID, actualServerClusterShardID) @@ -93,7 +73,9 @@ func (s *metadataSuite) TestClusterShardMD_Decode_Error() { MetadataKeyServerClusterID, uuid.NewString(), MetadataKeyServerShardID, strconv.Itoa(int(rand.Int31())), ) - _, _, err := DecodeClusterShardMD(clusterShardMD) + ctx := metadata.NewIncomingContext(context.Background(), clusterShardMD) + getter := headers.NewGRPCHeaderGetter(ctx) + _, _, err := DecodeClusterShardMD(getter) s.Error(err) clusterShardMD = metadata.Pairs( @@ -101,7 +83,9 @@ func (s *metadataSuite) TestClusterShardMD_Decode_Error() { MetadataKeyServerClusterID, uuid.NewString(), MetadataKeyServerShardID, strconv.Itoa(int(rand.Int31())), ) - _, _, err = DecodeClusterShardMD(clusterShardMD) + ctx = metadata.NewIncomingContext(context.Background(), clusterShardMD) + getter = headers.NewGRPCHeaderGetter(ctx) + _, _, err = DecodeClusterShardMD(getter) s.Error(err) clusterShardMD = metadata.Pairs( @@ -109,7 +93,9 @@ func (s *metadataSuite) TestClusterShardMD_Decode_Error() { MetadataKeyClientShardID, strconv.Itoa(int(rand.Int31())), MetadataKeyServerShardID, strconv.Itoa(int(rand.Int31())), ) - _, _, err = DecodeClusterShardMD(clusterShardMD) + ctx = metadata.NewIncomingContext(context.Background(), clusterShardMD) + getter = headers.NewGRPCHeaderGetter(ctx) + _, _, err = DecodeClusterShardMD(getter) s.Error(err) clusterShardMD = metadata.Pairs( @@ -117,7 +103,9 @@ func (s *metadataSuite) TestClusterShardMD_Decode_Error() { MetadataKeyClientShardID, strconv.Itoa(int(rand.Int31())), MetadataKeyServerClusterID, uuid.NewString(), ) - _, _, err = DecodeClusterShardMD(clusterShardMD) + ctx = metadata.NewIncomingContext(context.Background(), clusterShardMD) + getter = headers.NewGRPCHeaderGetter(ctx) + _, _, err = DecodeClusterShardMD(getter) s.Error(err) clusterShardMD = metadata.Pairs( @@ -126,6 +114,8 @@ func (s *metadataSuite) TestClusterShardMD_Decode_Error() { MetadataKeyServerClusterID, uuid.NewString(), MetadataKeyServerShardID, uuid.NewString(), ) - _, _, err = DecodeClusterShardMD(clusterShardMD) + ctx = metadata.NewIncomingContext(context.Background(), clusterShardMD) + getter = headers.NewGRPCHeaderGetter(ctx) + _, _, err = DecodeClusterShardMD(getter) s.Error(err) } diff --git a/client/history/metric_client.go b/client/history/metric_client.go index 3c5ea23f5c8..e094ff5cb09 100644 --- a/client/history/metric_client.go +++ b/client/history/metric_client.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history import ( @@ -29,13 +5,12 @@ import ( "time" "go.temporal.io/api/serviceerror" - "google.golang.org/grpc" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" + "google.golang.org/grpc" ) var _ historyservice.HistoryServiceClient = (*metricClient)(nil) @@ -75,6 +50,34 @@ func (c *metricClient) StreamWorkflowReplicationMessages( return c.client.StreamWorkflowReplicationMessages(ctx, opts...) } +func (c *metricClient) StartNexusOperation( + ctx context.Context, + request *historyservice.StartNexusOperationRequest, + opts ...grpc.CallOption, +) (_ *historyservice.StartNexusOperationResponse, retError error) { + op := "HistoryClientStartNexusOperation_" + request.GetRequest().GetService() + "_" + request.GetRequest().GetOperation() + metricsHandler, startTime := c.startMetricsRecording(ctx, op) + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.StartNexusOperation(ctx, request, opts...) +} + +func (c *metricClient) CancelNexusOperation( + ctx context.Context, + request *historyservice.CancelNexusOperationRequest, + opts ...grpc.CallOption, +) (_ *historyservice.CancelNexusOperationResponse, retError error) { + op := "HistoryClientCancelNexusOperation_" + request.GetRequest().GetService() + "_" + request.GetRequest().GetOperation() + metricsHandler, startTime := c.startMetricsRecording(ctx, op) + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CancelNexusOperation(ctx, request, opts...) +} + func (c *metricClient) startMetricsRecording( ctx context.Context, operation string, @@ -98,7 +101,8 @@ func (c *metricClient) finishMetricsRecording( *serviceerror.QueryFailed, *serviceerror.NamespaceNotFound, *serviceerror.WorkflowNotReady, - *serviceerror.WorkflowExecutionAlreadyStarted: + *serviceerror.WorkflowExecutionAlreadyStarted, + *serviceerror.ResourceExhausted: // noop - not interest and too many logs default: c.throttledLogger.Info("history client encountered error", tag.Error(err), tag.ServiceErrorType(err)) diff --git a/client/history/metric_client_gen.go b/client/history/metric_client_gen.go index 6ea86378ca3..bbfe7511d8a 100644 --- a/client/history/metric_client_gen.go +++ b/client/history/metric_client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package history @@ -61,6 +37,48 @@ func (c *metricClient) CloseShard( return c.client.CloseShard(ctx, request, opts...) } +func (c *metricClient) CompleteNexusOperation( + ctx context.Context, + request *historyservice.CompleteNexusOperationRequest, + opts ...grpc.CallOption, +) (_ *historyservice.CompleteNexusOperationResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientCompleteNexusOperation") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CompleteNexusOperation(ctx, request, opts...) +} + +func (c *metricClient) CompleteNexusOperationChasm( + ctx context.Context, + request *historyservice.CompleteNexusOperationChasmRequest, + opts ...grpc.CallOption, +) (_ *historyservice.CompleteNexusOperationChasmResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientCompleteNexusOperationChasm") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CompleteNexusOperationChasm(ctx, request, opts...) +} + +func (c *metricClient) DeepHealthCheck( + ctx context.Context, + request *historyservice.DeepHealthCheckRequest, + opts ...grpc.CallOption, +) (_ *historyservice.DeepHealthCheckResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientDeepHealthCheck") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeepHealthCheck(ctx, request, opts...) +} + func (c *metricClient) DeleteDLQTasks( ctx context.Context, request *historyservice.DeleteDLQTasksRequest, @@ -145,6 +163,20 @@ func (c *metricClient) DescribeWorkflowExecution( return c.client.DescribeWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) ExecuteMultiOperation( + ctx context.Context, + request *historyservice.ExecuteMultiOperationRequest, + opts ...grpc.CallOption, +) (_ *historyservice.ExecuteMultiOperationResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientExecuteMultiOperation") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ExecuteMultiOperation(ctx, request, opts...) +} + func (c *metricClient) ForceDeleteWorkflowExecution( ctx context.Context, request *historyservice.ForceDeleteWorkflowExecutionRequest, @@ -341,6 +373,20 @@ func (c *metricClient) ImportWorkflowExecution( return c.client.ImportWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) InvokeStateMachineMethod( + ctx context.Context, + request *historyservice.InvokeStateMachineMethodRequest, + opts ...grpc.CallOption, +) (_ *historyservice.InvokeStateMachineMethodResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientInvokeStateMachineMethod") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.InvokeStateMachineMethod(ctx, request, opts...) +} + func (c *metricClient) IsActivityTaskValid( ctx context.Context, request *historyservice.IsActivityTaskValidRequest, @@ -411,6 +457,34 @@ func (c *metricClient) MergeDLQMessages( return c.client.MergeDLQMessages(ctx, request, opts...) } +func (c *metricClient) PauseActivity( + ctx context.Context, + request *historyservice.PauseActivityRequest, + opts ...grpc.CallOption, +) (_ *historyservice.PauseActivityResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientPauseActivity") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.PauseActivity(ctx, request, opts...) +} + +func (c *metricClient) PauseWorkflowExecution( + ctx context.Context, + request *historyservice.PauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (_ *historyservice.PauseWorkflowExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientPauseWorkflowExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.PauseWorkflowExecution(ctx, request, opts...) +} + func (c *metricClient) PollMutableState( ctx context.Context, request *historyservice.PollMutableStateRequest, @@ -635,6 +709,20 @@ func (c *metricClient) RequestCancelWorkflowExecution( return c.client.RequestCancelWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) ResetActivity( + ctx context.Context, + request *historyservice.ResetActivityRequest, + opts ...grpc.CallOption, +) (_ *historyservice.ResetActivityResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientResetActivity") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ResetActivity(ctx, request, opts...) +} + func (c *metricClient) ResetStickyTaskQueue( ctx context.Context, request *historyservice.ResetStickyTaskQueueRequest, @@ -817,6 +905,20 @@ func (c *metricClient) SyncShardStatus( return c.client.SyncShardStatus(ctx, request, opts...) } +func (c *metricClient) SyncWorkflowState( + ctx context.Context, + request *historyservice.SyncWorkflowStateRequest, + opts ...grpc.CallOption, +) (_ *historyservice.SyncWorkflowStateResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientSyncWorkflowState") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.SyncWorkflowState(ctx, request, opts...) +} + func (c *metricClient) TerminateWorkflowExecution( ctx context.Context, request *historyservice.TerminateWorkflowExecutionRequest, @@ -831,6 +933,48 @@ func (c *metricClient) TerminateWorkflowExecution( return c.client.TerminateWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) UnpauseActivity( + ctx context.Context, + request *historyservice.UnpauseActivityRequest, + opts ...grpc.CallOption, +) (_ *historyservice.UnpauseActivityResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientUnpauseActivity") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UnpauseActivity(ctx, request, opts...) +} + +func (c *metricClient) UnpauseWorkflowExecution( + ctx context.Context, + request *historyservice.UnpauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (_ *historyservice.UnpauseWorkflowExecutionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientUnpauseWorkflowExecution") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UnpauseWorkflowExecution(ctx, request, opts...) +} + +func (c *metricClient) UpdateActivityOptions( + ctx context.Context, + request *historyservice.UpdateActivityOptionsRequest, + opts ...grpc.CallOption, +) (_ *historyservice.UpdateActivityOptionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientUpdateActivityOptions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateActivityOptions(ctx, request, opts...) +} + func (c *metricClient) UpdateWorkflowExecution( ctx context.Context, request *historyservice.UpdateWorkflowExecutionRequest, @@ -845,6 +989,20 @@ func (c *metricClient) UpdateWorkflowExecution( return c.client.UpdateWorkflowExecution(ctx, request, opts...) } +func (c *metricClient) UpdateWorkflowExecutionOptions( + ctx context.Context, + request *historyservice.UpdateWorkflowExecutionOptionsRequest, + opts ...grpc.CallOption, +) (_ *historyservice.UpdateWorkflowExecutionOptionsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "HistoryClientUpdateWorkflowExecutionOptions") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateWorkflowExecutionOptions(ctx, request, opts...) +} + func (c *metricClient) VerifyChildExecutionCompletionRecorded( ctx context.Context, request *historyservice.VerifyChildExecutionCompletionRecordedRequest, diff --git a/client/history/redirector.go b/client/history/redirector.go index 632ef4f2a08..20668155316 100644 --- a/client/history/redirector.go +++ b/client/history/redirector.go @@ -1,34 +1,9 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history import ( "context" "errors" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/convert" "go.temporal.io/server/common/membership" @@ -36,19 +11,18 @@ import ( ) type ( - // A redirector executes a client operation against a history instance. + // A Redirector executes a client operation against a history instance. // If the operation is intended for the owner of a shard, and the request // returns a shard ownership lost error with a hint for a new shard owner, // the redirector will retry the request to the new owner. - redirector interface { - clientForShardID(int32) (historyservice.HistoryServiceClient, error) - execute(context.Context, int32, clientOperation) error + Redirector[C any] interface { + Execute(ctx context.Context, shardID int32, op ClientOperation[C]) error + clientForShardID(int32) (C, error) } + ClientOperation[C any] func(ctx context.Context, client C) error - clientOperation func(ctx context.Context, client historyservice.HistoryServiceClient) error - - basicRedirector struct { - connections connectionPool + BasicRedirector[C any] struct { + connections connectionPool[C] historyServiceResolver membership.ServiceResolver } ) @@ -61,29 +35,30 @@ func shardLookup(resolver membership.ServiceResolver, shardID int32) (rpcAddress return rpcAddress(hostInfo.GetAddress()), nil } -func newBasicRedirector( - connections connectionPool, +func NewBasicRedirector[C any]( + connections connectionPool[C], historyServiceResolver membership.ServiceResolver, -) *basicRedirector { - return &basicRedirector{ +) *BasicRedirector[C] { + return &BasicRedirector[C]{ connections: connections, historyServiceResolver: historyServiceResolver, } } -func (r *basicRedirector) clientForShardID(shardID int32) (historyservice.HistoryServiceClient, error) { +func (r *BasicRedirector[C]) clientForShardID(shardID int32) (C, error) { + var zero C if err := checkShardID(shardID); err != nil { - return nil, err + return zero, err } address, err := shardLookup(r.historyServiceResolver, shardID) if err != nil { - return nil, err + return zero, err } clientConn := r.connections.getOrCreateClientConn(address) - return clientConn.historyClient, nil + return clientConn.grpcClient, nil } -func (r *basicRedirector) execute(ctx context.Context, shardID int32, op clientOperation) error { +func (r *BasicRedirector[C]) Execute(ctx context.Context, shardID int32, op ClientOperation[C]) error { if err := checkShardID(shardID); err != nil { return err } @@ -94,13 +69,13 @@ func (r *basicRedirector) execute(ctx context.Context, shardID int32, op clientO return r.redirectLoop(ctx, address, op) } -func (r *basicRedirector) redirectLoop(ctx context.Context, address rpcAddress, op clientOperation) error { +func (r *BasicRedirector[C]) redirectLoop(ctx context.Context, address rpcAddress, op ClientOperation[C]) error { for { if err := common.IsValidContext(ctx); err != nil { return err } clientConn := r.connections.getOrCreateClientConn(address) - err := op(ctx, clientConn.historyClient) + err := op(ctx, clientConn.grpcClient) var solErr *serviceerrors.ShardOwnershipLost if !errors.As(err, &solErr) || len(solErr.OwnerHost) == 0 { return err diff --git a/client/history/redirector_test.go b/client/history/redirector_test.go index 8906448787a..afc7890c8a7 100644 --- a/client/history/redirector_test.go +++ b/client/history/redirector_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history import ( @@ -30,16 +6,15 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.temporal.io/api/serviceerror" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/api/historyservicemock/v1" "go.temporal.io/server/common/convert" "go.temporal.io/server/common/membership" serviceerrors "go.temporal.io/server/common/serviceerror" + "go.uber.org/mock/gomock" ) type ( @@ -48,11 +23,25 @@ type ( *require.Assertions controller *gomock.Controller - connections *MockconnectionPool + connections *mockConnectionPool[historyservice.HistoryServiceClient] resolver *membership.MockServiceResolver } ) +type mockConnectionPool[C any] struct { + connectionPool[C] + client C + resetCalls int +} + +func (m *mockConnectionPool[C]) getOrCreateClientConn(testAddr rpcAddress) clientConnection[C] { + return clientConnection[C]{grpcClient: m.client} +} + +func (m *mockConnectionPool[C]) resetConnectBackoff(clientConnection[C]) { + m.resetCalls++ +} + func TestBasicRedirectorSuite(t *testing.T) { s := new(basicRedirectorSuite) suite.Run(t, s) @@ -61,20 +50,15 @@ func TestBasicRedirectorSuite(t *testing.T) { func (s *basicRedirectorSuite) SetupTest() { s.Assertions = require.New(s.T()) s.controller = gomock.NewController(s.T()) - - s.connections = NewMockconnectionPool(s.controller) s.resolver = membership.NewMockServiceResolver(s.controller) -} - -func (s *basicRedirectorSuite) TearDownTest() { - s.controller.Finish() + s.connections = &mockConnectionPool[historyservice.HistoryServiceClient]{} } func (s *basicRedirectorSuite) TestShardCheck() { - r := newBasicRedirector(s.connections, s.resolver) + r := NewBasicRedirector(s.connections, s.resolver) invalErr := &serviceerror.InvalidArgument{} - err := r.execute( + err := r.Execute( context.Background(), -1, func(_ context.Context, _ historyservice.HistoryServiceClient) error { @@ -86,7 +70,7 @@ func (s *basicRedirectorSuite) TestShardCheck() { s.ErrorAs(err, &invalErr) } -func opErrorTest(s *basicRedirectorSuite, clientOp clientOperation, verify func(err error)) { +func opErrorTest(s *basicRedirectorSuite, clientOp ClientOperation[historyservice.HistoryServiceClient], verify func(err error)) { testAddr := rpcAddress("testaddr") shardID := int32(1) @@ -96,20 +80,14 @@ func opErrorTest(s *basicRedirectorSuite, clientOp clientOperation, verify func( Times(1) mockClient := historyservicemock.NewMockHistoryServiceClient(s.controller) - clientConn := clientConnection{ - historyClient: mockClient, - } - s.connections.EXPECT(). - getOrCreateClientConn(testAddr). - Return(clientConn). - Times(1) + s.connections.client = mockClient - r := newBasicRedirector(s.connections, s.resolver) + r := NewBasicRedirector(s.connections, s.resolver) ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond) defer cancel() - err := r.execute(ctx, shardID, clientOp) + err := r.Execute(ctx, shardID, clientOp) verify(err) } @@ -145,47 +123,24 @@ func (s *basicRedirectorSuite) TestShardOwnershipLostErrors() { Return(membership.NewHostInfoFromAddress(string(testAddr1)), nil). Times(2) - mockClient1 := historyservicemock.NewMockHistoryServiceClient(s.controller) - mockClient2 := historyservicemock.NewMockHistoryServiceClient(s.controller) - clientConn1 := clientConnection{ - historyClient: mockClient1, - } - clientConn2 := clientConnection{ - historyClient: mockClient2, - } - s.connections.EXPECT(). - getOrCreateClientConn(testAddr1). - Return(clientConn1). - Times(2) + mockClient := historyservicemock.NewMockHistoryServiceClient(s.controller) + s.connections.client = mockClient - r := newBasicRedirector(s.connections, s.resolver) + r := NewBasicRedirector(s.connections, s.resolver) attempt := 1 doExecute := func() error { - return r.execute( + return r.Execute( context.Background(), shardID, func(ctx context.Context, client historyservice.HistoryServiceClient) error { switch attempt { case 1: - if client != mockClient1 { - return errors.New("wrong client") - } attempt++ return serviceerrors.NewShardOwnershipLost("", "current") case 2: - if client != mockClient1 { - return errors.New("wrong client") - } attempt++ - s.connections.EXPECT(). - getOrCreateClientConn(testAddr2). - Return(clientConn2). - Times(1) return serviceerrors.NewShardOwnershipLost(string(testAddr2), "current") case 3: - if client != mockClient2 { - return errors.New("wrong client") - } attempt++ return nil } @@ -213,14 +168,8 @@ func (s *basicRedirectorSuite) TestClientForTargetByShard() { Times(1) mockClient := historyservicemock.NewMockHistoryServiceClient(s.controller) - clientConn := clientConnection{ - historyClient: mockClient, - } - s.connections.EXPECT(). - getOrCreateClientConn(testAddr). - Return(clientConn) - - r := newBasicRedirector(s.connections, s.resolver) + s.connections.client = mockClient + r := NewBasicRedirector(s.connections, s.resolver) cli, err := r.clientForShardID(shardID) s.NoError(err) s.Equal(mockClient, cli) diff --git a/client/history/retryable_client.go b/client/history/retryable_client.go index c4c0fd3358a..17fe1a0a3c5 100644 --- a/client/history/retryable_client.go +++ b/client/history/retryable_client.go @@ -1,36 +1,11 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package history import ( "context" - "google.golang.org/grpc" - "go.temporal.io/server/api/historyservice/v1" "go.temporal.io/server/common/backoff" + "google.golang.org/grpc" ) var _ historyservice.HistoryServiceClient = (*retryableClient)(nil) diff --git a/client/history/retryable_client_gen.go b/client/history/retryable_client_gen.go index 3d46d785967..2582a04f0a6 100644 --- a/client/history/retryable_client_gen.go +++ b/client/history/retryable_client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package history @@ -50,6 +26,21 @@ func (c *retryableClient) AddTasks( return resp, err } +func (c *retryableClient) CancelNexusOperation( + ctx context.Context, + request *historyservice.CancelNexusOperationRequest, + opts ...grpc.CallOption, +) (*historyservice.CancelNexusOperationResponse, error) { + var resp *historyservice.CancelNexusOperationResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CancelNexusOperation(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) CloseShard( ctx context.Context, request *historyservice.CloseShardRequest, @@ -65,6 +56,51 @@ func (c *retryableClient) CloseShard( return resp, err } +func (c *retryableClient) CompleteNexusOperation( + ctx context.Context, + request *historyservice.CompleteNexusOperationRequest, + opts ...grpc.CallOption, +) (*historyservice.CompleteNexusOperationResponse, error) { + var resp *historyservice.CompleteNexusOperationResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CompleteNexusOperation(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) CompleteNexusOperationChasm( + ctx context.Context, + request *historyservice.CompleteNexusOperationChasmRequest, + opts ...grpc.CallOption, +) (*historyservice.CompleteNexusOperationChasmResponse, error) { + var resp *historyservice.CompleteNexusOperationChasmResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CompleteNexusOperationChasm(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DeepHealthCheck( + ctx context.Context, + request *historyservice.DeepHealthCheckRequest, + opts ...grpc.CallOption, +) (*historyservice.DeepHealthCheckResponse, error) { + var resp *historyservice.DeepHealthCheckResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeepHealthCheck(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DeleteDLQTasks( ctx context.Context, request *historyservice.DeleteDLQTasksRequest, @@ -155,6 +191,21 @@ func (c *retryableClient) DescribeWorkflowExecution( return resp, err } +func (c *retryableClient) ExecuteMultiOperation( + ctx context.Context, + request *historyservice.ExecuteMultiOperationRequest, + opts ...grpc.CallOption, +) (*historyservice.ExecuteMultiOperationResponse, error) { + var resp *historyservice.ExecuteMultiOperationResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ExecuteMultiOperation(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ForceDeleteWorkflowExecution( ctx context.Context, request *historyservice.ForceDeleteWorkflowExecutionRequest, @@ -365,6 +416,21 @@ func (c *retryableClient) ImportWorkflowExecution( return resp, err } +func (c *retryableClient) InvokeStateMachineMethod( + ctx context.Context, + request *historyservice.InvokeStateMachineMethodRequest, + opts ...grpc.CallOption, +) (*historyservice.InvokeStateMachineMethodResponse, error) { + var resp *historyservice.InvokeStateMachineMethodResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.InvokeStateMachineMethod(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) IsActivityTaskValid( ctx context.Context, request *historyservice.IsActivityTaskValidRequest, @@ -440,6 +506,36 @@ func (c *retryableClient) MergeDLQMessages( return resp, err } +func (c *retryableClient) PauseActivity( + ctx context.Context, + request *historyservice.PauseActivityRequest, + opts ...grpc.CallOption, +) (*historyservice.PauseActivityResponse, error) { + var resp *historyservice.PauseActivityResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PauseActivity(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) PauseWorkflowExecution( + ctx context.Context, + request *historyservice.PauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (*historyservice.PauseWorkflowExecutionResponse, error) { + var resp *historyservice.PauseWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.PauseWorkflowExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) PollMutableState( ctx context.Context, request *historyservice.PollMutableStateRequest, @@ -680,6 +776,21 @@ func (c *retryableClient) RequestCancelWorkflowExecution( return resp, err } +func (c *retryableClient) ResetActivity( + ctx context.Context, + request *historyservice.ResetActivityRequest, + opts ...grpc.CallOption, +) (*historyservice.ResetActivityResponse, error) { + var resp *historyservice.ResetActivityResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ResetActivity(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ResetStickyTaskQueue( ctx context.Context, request *historyservice.ResetStickyTaskQueueRequest, @@ -830,6 +941,21 @@ func (c *retryableClient) SignalWorkflowExecution( return resp, err } +func (c *retryableClient) StartNexusOperation( + ctx context.Context, + request *historyservice.StartNexusOperationRequest, + opts ...grpc.CallOption, +) (*historyservice.StartNexusOperationResponse, error) { + var resp *historyservice.StartNexusOperationResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.StartNexusOperation(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) StartWorkflowExecution( ctx context.Context, request *historyservice.StartWorkflowExecutionRequest, @@ -875,6 +1001,21 @@ func (c *retryableClient) SyncShardStatus( return resp, err } +func (c *retryableClient) SyncWorkflowState( + ctx context.Context, + request *historyservice.SyncWorkflowStateRequest, + opts ...grpc.CallOption, +) (*historyservice.SyncWorkflowStateResponse, error) { + var resp *historyservice.SyncWorkflowStateResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.SyncWorkflowState(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) TerminateWorkflowExecution( ctx context.Context, request *historyservice.TerminateWorkflowExecutionRequest, @@ -890,6 +1031,51 @@ func (c *retryableClient) TerminateWorkflowExecution( return resp, err } +func (c *retryableClient) UnpauseActivity( + ctx context.Context, + request *historyservice.UnpauseActivityRequest, + opts ...grpc.CallOption, +) (*historyservice.UnpauseActivityResponse, error) { + var resp *historyservice.UnpauseActivityResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UnpauseActivity(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UnpauseWorkflowExecution( + ctx context.Context, + request *historyservice.UnpauseWorkflowExecutionRequest, + opts ...grpc.CallOption, +) (*historyservice.UnpauseWorkflowExecutionResponse, error) { + var resp *historyservice.UnpauseWorkflowExecutionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UnpauseWorkflowExecution(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateActivityOptions( + ctx context.Context, + request *historyservice.UpdateActivityOptionsRequest, + opts ...grpc.CallOption, +) (*historyservice.UpdateActivityOptionsResponse, error) { + var resp *historyservice.UpdateActivityOptionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateActivityOptions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) UpdateWorkflowExecution( ctx context.Context, request *historyservice.UpdateWorkflowExecutionRequest, @@ -905,6 +1091,21 @@ func (c *retryableClient) UpdateWorkflowExecution( return resp, err } +func (c *retryableClient) UpdateWorkflowExecutionOptions( + ctx context.Context, + request *historyservice.UpdateWorkflowExecutionOptionsRequest, + opts ...grpc.CallOption, +) (*historyservice.UpdateWorkflowExecutionOptionsResponse, error) { + var resp *historyservice.UpdateWorkflowExecutionOptionsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateWorkflowExecutionOptions(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) VerifyChildExecutionCompletionRecorded( ctx context.Context, request *historyservice.VerifyChildExecutionCompletionRecordedRequest, diff --git a/client/matching/README.md b/client/matching/README.md new file mode 100644 index 00000000000..a343e085858 --- /dev/null +++ b/client/matching/README.md @@ -0,0 +1,93 @@ +# Matching Client + +This package provides the client for communicating with the Matching service. +It handles two key concerns: partition selection and routing. + +## Partition Selection (Load Balancing) + +The `LoadBalancer` distributes add/poll API calls across available task queue partitions. + +**Write path**: Partitions are selected uniformly at random from the configured write partition count. + +**Read path**: Partitions are selected to balance poller counts across partitions. +The load balancer tracks outstanding polls per partition +and sends new polls to the partition with the fewest active polls. + +The number of partitions is controlled by one of two mechanisms: + +1. Managed partition scaling +2. Dynamic config (`matching.numTaskqueueWritePartitions`, `matching.numTaskqueueReadPartitions`) + +There are test hooks to force specific partition selection for testing. + +## Routing + +Routing determines which matching node owns a given task queue partition. +All clients (frontend, history) independently perform this computation using +consistent hashing via ringpop. + +### Basic Routing + +Each partition has a routing key of the form: + +``` +namespace_id:queue_name:task_type +``` + +This key is hashed with the consistent hashing algorithm to find the owning node. + +### Spread Routing + +With basic routing, partitions of the same queue are placed independently, + which can cause multiple partitions to land on the same node, creating hot spots. + +Spread routing groups partitions into batches and uses `LookupN` to ensure +partitions within a batch are assigned to different nodes if possible. + +The batch size is controlled by dynamic config `matching.spreadRoutingBatchSize`, +default zero (i.e. use basic routing). + +**Algorithm**: +1. Compute batch number: `batch = partition_id / batch_size` +2. Compute index within batch: `index = partition_id % batch_size` +3. Generate routing key with batch number (batch 0 omits the batch number for backward compatibility): + `namespace_id:queue_name:batch_number:task_type` +4. Call `LookupN(key, index+1)` and take the host at position `index` + +For example, with batch size 8 and partition 25: +- Batch 3 (floor(25/8)), index 1 (25%8) +- Key: `namespace_id:queue_name:3:task_type` +- Call `LookupN(key, 2)`, take host at index 1 + +If fewer hosts are available than the batch size, +wrap around to spread among available hosts. + +**Tradeoff**: Larger batch sizes provide better spread but cause more partition +movement when membership changes. + +### Changes + +Changing partition count dynamically is generally safe and doesn't cause partitions to move. +The caveat is that when reducing, write partitions has to be reduced first, +and then the extra partitions have to be empty before reducing read partitions. + +Changing batch size will cause most partitions to move between nodes. +To avoid moving lots of partitions simultaneously on a live cluster, +spread routing can be rolled out gradually (partition by partition) +using wall-clock-synchronized changes. See the the `GradualChange` mechanism. + +## Interface + +The `Route(partition)` method on the client computes the owning node address for any partition. +This is used internally by the grpc client, and can be used by other code to +determine the owner for other purposes (e.g. matching engine knowing when to +unload non-owned partitions). + +## Managed partition scaling + +If managed partition scaling is in use, the server communicates the current +read/write partition counts to the client (in a grpc trailer). +The client also communicates it back to the server (in a grpc header) to ensure +the client isn't using stale counts. +The client maintains a cache of the current counts for each task queue. + diff --git a/client/matching/client.go b/client/matching/client.go index 2dca5730ce9..b8ff3f434e3 100644 --- a/client/matching/client.go +++ b/client/matching/client.go @@ -1,44 +1,24 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Generates all three generated files in this package: -//go:generate go run ../../cmd/tools/rpcwrappers -service matching +//go:generate go run ../../cmd/tools/genrpcwrappers -service matching package matching import ( "context" + "runtime" "time" enumspb "go.temporal.io/api/enums/v1" taskqueuepb "go.temporal.io/api/taskqueue/v1" - "google.golang.org/grpc" - "go.temporal.io/server/api/matchingservice/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/debug" - "go.temporal.io/server/common/namespace" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/tqid" + "google.golang.org/grpc" ) var _ matchingservice.MatchingServiceClient = (*clientImpl)(nil) @@ -54,45 +34,78 @@ type clientImpl struct { timeout time.Duration longPollTimeout time.Duration clients common.ClientCache + metricsHandler metrics.Handler + logger log.Logger loadBalancer LoadBalancer + spreadRouting dynamicconfig.TypedPropertyFn[dynamicconfig.GradualChange[int]] + partitionCache *partitionCache } -// NewClient creates a new history service gRPC client +// NewClient creates a new matching service gRPC client func NewClient( timeout time.Duration, longPollTimeout time.Duration, clients common.ClientCache, + metricsHandler metrics.Handler, + logger log.Logger, lb LoadBalancer, + spreadRouting dynamicconfig.TypedPropertyFn[dynamicconfig.GradualChange[int]], ) matchingservice.MatchingServiceClient { - return &clientImpl{ + c := &clientImpl{ timeout: timeout, longPollTimeout: longPollTimeout, clients: clients, + metricsHandler: metricsHandler, + logger: logger, loadBalancer: lb, + spreadRouting: spreadRouting, + partitionCache: newPartitionCache(metricsHandler), } + + // Start goroutine to prune partition count cache. + // Clean up on gc, since we can't easily hook into fx here. + c.partitionCache.Start() + runtime.AddCleanup(c, func(cache *partitionCache) { cache.Stop() }, c.partitionCache) + + return c } func (c *clientImpl) AddActivityTask( ctx context.Context, request *matchingservice.AddActivityTaskRequest, - opts ...grpc.CallOption) (*matchingservice.AddActivityTaskResponse, error) { - partition := c.loadBalancer.PickWritePartition( - namespace.ID(request.GetNamespaceId()), - request.GetTaskQueue(), + opts ...grpc.CallOption, +) (*matchingservice.AddActivityTaskResponse, error) { + if !isPartitionAwareKind(request.GetTaskQueue().GetKind()) { + return c.addActivityTask(ctx, PartitionCounts{}, request, opts) + } + pkey := c.partitionCache.makeKey( + request.GetNamespaceId(), + request.GetTaskQueue().GetName(), enumspb.TASK_QUEUE_TYPE_ACTIVITY, - request.GetForwardedSource(), ) - request.TaskQueue.Name = partition - client, err := c.getClientForTaskqueue( - request.NamespaceId, - request.TaskQueue, + return invokeWithPartitionCounts(ctx, c.logger, c.partitionCache, pkey, request, opts, c.addActivityTask) +} + +func (c *clientImpl) addActivityTask( + ctx context.Context, + pc PartitionCounts, + request *matchingservice.AddActivityTaskRequest, + opts []grpc.CallOption, +) (*matchingservice.AddActivityTaskResponse, error) { + request = common.CloneProto(request) + client, err := c.pickClientForWrite( + request.GetTaskQueue(), + request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_ACTIVITY, + request.GetForwardInfo().GetSourcePartition(), + pc, ) if err != nil { return nil, err } ctx, cancel := c.createContext(ctx) defer cancel() + return client.AddActivityTask(ctx, request, opts...) } @@ -100,17 +113,30 @@ func (c *clientImpl) AddWorkflowTask( ctx context.Context, request *matchingservice.AddWorkflowTaskRequest, opts ...grpc.CallOption) (*matchingservice.AddWorkflowTaskResponse, error) { - partition := c.loadBalancer.PickWritePartition( - namespace.ID(request.GetNamespaceId()), - request.GetTaskQueue(), + if !isPartitionAwareKind(request.GetTaskQueue().GetKind()) { + return c.addWorkflowTask(ctx, PartitionCounts{}, request, opts) + } + pkey := c.partitionCache.makeKey( + request.GetNamespaceId(), + request.GetTaskQueue().GetName(), enumspb.TASK_QUEUE_TYPE_WORKFLOW, - request.GetForwardedSource(), ) - request.TaskQueue.Name = partition - client, err := c.getClientForTaskqueue( - request.NamespaceId, - request.TaskQueue, + return invokeWithPartitionCounts(ctx, c.logger, c.partitionCache, pkey, request, opts, c.addWorkflowTask) +} + +func (c *clientImpl) addWorkflowTask( + ctx context.Context, + pc PartitionCounts, + request *matchingservice.AddWorkflowTaskRequest, + opts []grpc.CallOption, +) (*matchingservice.AddWorkflowTaskResponse, error) { + request = common.CloneProto(request) + client, err := c.pickClientForWrite( + request.GetTaskQueue(), + request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW, + request.GetForwardInfo().GetSourcePartition(), + pc, ) if err != nil { return nil, err @@ -123,24 +149,39 @@ func (c *clientImpl) AddWorkflowTask( func (c *clientImpl) PollActivityTaskQueue( ctx context.Context, request *matchingservice.PollActivityTaskQueueRequest, - opts ...grpc.CallOption) (*matchingservice.PollActivityTaskQueueResponse, error) { - pollerToken := c.loadBalancer.PickReadPartition( - namespace.ID(request.GetNamespaceId()), - request.PollRequest.GetTaskQueue(), + opts ...grpc.CallOption, +) (*matchingservice.PollActivityTaskQueueResponse, error) { + if !isPartitionAwareKind(request.GetPollRequest().GetTaskQueue().GetKind()) { + return c.pollActivityTaskQueue(ctx, PartitionCounts{}, request, opts) + } + pkey := c.partitionCache.makeKey( + request.GetNamespaceId(), + request.GetPollRequest().GetTaskQueue().GetName(), enumspb.TASK_QUEUE_TYPE_ACTIVITY, - request.GetForwardedSource(), ) - defer pollerToken.Release() + return invokeWithPartitionCounts(ctx, c.logger, c.partitionCache, pkey, request, opts, c.pollActivityTaskQueue) +} - request.PollRequest.TaskQueue.Name = pollerToken.GetFullName() - client, err := c.getClientForTaskqueue( - request.NamespaceId, - request.PollRequest.TaskQueue, +func (c *clientImpl) pollActivityTaskQueue( + ctx context.Context, + pc PartitionCounts, + request *matchingservice.PollActivityTaskQueueRequest, + opts []grpc.CallOption, +) (*matchingservice.PollActivityTaskQueueResponse, error) { + request = common.CloneProto(request) + client, release, err := c.pickClientForRead( + request.GetPollRequest().GetTaskQueue(), + request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_ACTIVITY, + request.GetForwardedSource(), + pc, ) if err != nil { return nil, err } + if release != nil { + defer release() + } ctx, cancel := c.createLongPollContext(ctx) defer cancel() return client.PollActivityTaskQueue(ctx, request, opts...) @@ -149,41 +190,81 @@ func (c *clientImpl) PollActivityTaskQueue( func (c *clientImpl) PollWorkflowTaskQueue( ctx context.Context, request *matchingservice.PollWorkflowTaskQueueRequest, - opts ...grpc.CallOption) (*matchingservice.PollWorkflowTaskQueueResponse, error) { - pollerToken := c.loadBalancer.PickReadPartition( - namespace.ID(request.GetNamespaceId()), - request.PollRequest.GetTaskQueue(), + opts ...grpc.CallOption, +) (*matchingservice.PollWorkflowTaskQueueResponse, error) { + if !isPartitionAwareKind(request.GetPollRequest().GetTaskQueue().GetKind()) { + return c.pollWorkflowTaskQueue(ctx, PartitionCounts{}, request, opts) + } + pkey := c.partitionCache.makeKey( + request.GetNamespaceId(), + request.GetPollRequest().GetTaskQueue().GetName(), enumspb.TASK_QUEUE_TYPE_WORKFLOW, - request.GetForwardedSource(), ) - defer pollerToken.Release() + return invokeWithPartitionCounts(ctx, c.logger, c.partitionCache, pkey, request, opts, c.pollWorkflowTaskQueue) +} - request.PollRequest.TaskQueue.Name = pollerToken.GetFullName() - client, err := c.getClientForTaskqueue( - request.NamespaceId, - request.PollRequest.TaskQueue, +func (c *clientImpl) pollWorkflowTaskQueue( + ctx context.Context, + pc PartitionCounts, + request *matchingservice.PollWorkflowTaskQueueRequest, + opts []grpc.CallOption, +) (*matchingservice.PollWorkflowTaskQueueResponse, error) { + request = common.CloneProto(request) + client, release, err := c.pickClientForRead( + request.GetPollRequest().GetTaskQueue(), + request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW, + request.GetForwardedSource(), + pc, ) if err != nil { return nil, err } + if release != nil { + defer release() + } ctx, cancel := c.createLongPollContext(ctx) defer cancel() return client.PollWorkflowTaskQueue(ctx, request, opts...) } -func (c *clientImpl) QueryWorkflow(ctx context.Context, request *matchingservice.QueryWorkflowRequest, opts ...grpc.CallOption) (*matchingservice.QueryWorkflowResponse, error) { - partition := c.loadBalancer.PickWritePartition( - namespace.ID(request.GetNamespaceId()), - request.GetTaskQueue(), +func (c *clientImpl) QueryWorkflow( + ctx context.Context, + request *matchingservice.QueryWorkflowRequest, + opts ...grpc.CallOption, +) (*matchingservice.QueryWorkflowResponse, error) { + if !isPartitionAwareKind(request.GetTaskQueue().GetKind()) { + return c.queryWorkflow(ctx, PartitionCounts{}, request, opts) + } + pkey := c.partitionCache.makeKey( + request.GetNamespaceId(), + request.GetTaskQueue().GetName(), enumspb.TASK_QUEUE_TYPE_WORKFLOW, - request.GetForwardedSource(), ) - request.TaskQueue.Name = partition - client, err := c.getClientForTaskqueue( - request.NamespaceId, - request.TaskQueue, + return invokeWithPartitionCounts(ctx, c.logger, c.partitionCache, pkey, request, opts, c.queryWorkflow) +} + +func (c *clientImpl) queryWorkflow( + ctx context.Context, + pc PartitionCounts, + request *matchingservice.QueryWorkflowRequest, + opts []grpc.CallOption, +) (*matchingservice.QueryWorkflowResponse, error) { + // use shallow copy since QueryRequest may contain a large payload + request = &matchingservice.QueryWorkflowRequest{ + NamespaceId: request.NamespaceId, + TaskQueue: common.CloneProto(request.TaskQueue), + QueryRequest: request.QueryRequest, + VersionDirective: request.VersionDirective, + ForwardInfo: request.ForwardInfo, + Priority: request.Priority, + } + client, err := c.pickClientForWrite( + request.GetTaskQueue(), + request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW, + request.GetForwardInfo().GetSourcePartition(), + pc, ) if err != nil { return nil, err @@ -193,6 +274,151 @@ func (c *clientImpl) QueryWorkflow(ctx context.Context, request *matchingservice return client.QueryWorkflow(ctx, request, opts...) } +func (c *clientImpl) DispatchNexusTask( + ctx context.Context, + request *matchingservice.DispatchNexusTaskRequest, + opts ...grpc.CallOption, +) (*matchingservice.DispatchNexusTaskResponse, error) { + if !isPartitionAwareKind(request.GetTaskQueue().GetKind()) { + return c.dispatchNexusTask(ctx, PartitionCounts{}, request, opts) + } + pkey := c.partitionCache.makeKey( + request.GetNamespaceId(), + request.GetTaskQueue().GetName(), + enumspb.TASK_QUEUE_TYPE_NEXUS, + ) + return invokeWithPartitionCounts(ctx, c.logger, c.partitionCache, pkey, request, opts, c.dispatchNexusTask) +} + +func (c *clientImpl) dispatchNexusTask( + ctx context.Context, + pc PartitionCounts, + request *matchingservice.DispatchNexusTaskRequest, + opts []grpc.CallOption, +) (*matchingservice.DispatchNexusTaskResponse, error) { + // use shallow copy since Request may contain a large payload + request = &matchingservice.DispatchNexusTaskRequest{ + NamespaceId: request.NamespaceId, + TaskQueue: common.CloneProto(request.TaskQueue), + Request: request.Request, + ForwardInfo: request.ForwardInfo, + } + client, err := c.pickClientForWrite( + request.GetTaskQueue(), + request.GetNamespaceId(), + enumspb.TASK_QUEUE_TYPE_NEXUS, + request.GetForwardInfo().GetSourcePartition(), + pc, + ) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.DispatchNexusTask(ctx, request, opts...) +} + +func (c *clientImpl) PollNexusTaskQueue( + ctx context.Context, + request *matchingservice.PollNexusTaskQueueRequest, + opts ...grpc.CallOption, +) (*matchingservice.PollNexusTaskQueueResponse, error) { + if !isPartitionAwareKind(request.GetRequest().GetTaskQueue().GetKind()) { + return c.pollNexusTaskQueue(ctx, PartitionCounts{}, request, opts) + } + pkey := c.partitionCache.makeKey( + request.GetNamespaceId(), + request.GetRequest().GetTaskQueue().GetName(), + enumspb.TASK_QUEUE_TYPE_NEXUS, + ) + return invokeWithPartitionCounts(ctx, c.logger, c.partitionCache, pkey, request, opts, c.pollNexusTaskQueue) +} + +func (c *clientImpl) pollNexusTaskQueue( + ctx context.Context, + pc PartitionCounts, + request *matchingservice.PollNexusTaskQueueRequest, + opts []grpc.CallOption, +) (*matchingservice.PollNexusTaskQueueResponse, error) { + request = common.CloneProto(request) + client, release, err := c.pickClientForRead( + request.GetRequest().GetTaskQueue(), + request.GetNamespaceId(), + enumspb.TASK_QUEUE_TYPE_NEXUS, + request.GetForwardedSource(), + pc, + ) + if err != nil { + return nil, err + } + if release != nil { + defer release() + } + ctx, cancel := c.createLongPollContext(ctx) + defer cancel() + return client.PollNexusTaskQueue(ctx, request, opts...) +} + +// processInputPartition returns a partition in certain cases that load balancer involvement is not necessary, +// otherwise, returns a task queue to pass down to the load balancer. +func (c *clientImpl) processInputPartition(proto *taskqueuepb.TaskQueue, nsid string, taskType enumspb.TaskQueueType, forwardedFrom string) (tqid.Partition, *tqid.TaskQueue) { + partition, err := tqid.PartitionFromProto(proto, nsid, taskType) + if err != nil { + // We preserve the old logic (not returning error in case of invalid proto info) until it's verified that + // clients are not sending invalid names. + c.logger.Info("invalid tq partition", tag.Error(err), tag.Stringer("proto", proto)) + metrics.MatchingClientInvalidTaskQueuePartition.With(c.metricsHandler).Record(1) + return tqid.UnsafeTaskQueueFamily(nsid, proto.GetName()).TaskQueue(taskType).RootPartition(), nil + } + + if forwardedFrom != "" || !partition.IsRoot() { + return partition, nil + } + + switch p := partition.(type) { + case *tqid.NormalPartition: + return nil, p.TaskQueue() + default: + return partition, nil + } +} + +// pickClientForWrite mutates the given proto. Callers should copy the proto before if necessary. +func (c *clientImpl) pickClientForWrite( + proto *taskqueuepb.TaskQueue, + nsid string, + taskType enumspb.TaskQueueType, + forwardedFrom string, + pc PartitionCounts, +) (matchingservice.MatchingServiceClient, error) { + p, tq := c.processInputPartition(proto, nsid, taskType, forwardedFrom) + if tq != nil { + p = c.loadBalancer.PickWritePartition(tq, pc) + } + proto.Name = p.RpcName() + return c.getClientForTaskQueuePartition(p) +} + +// pickClientForRead mutates the given proto. Callers should copy the proto before if necessary. +func (c *clientImpl) pickClientForRead( + proto *taskqueuepb.TaskQueue, + nsid string, + taskType enumspb.TaskQueueType, + forwardedFrom string, + pc PartitionCounts, +) (client matchingservice.MatchingServiceClient, release func(), err error) { + p, tq := c.processInputPartition(proto, nsid, taskType, forwardedFrom) + if tq != nil { + token := c.loadBalancer.PickReadPartition(tq, pc) + p = token.TQPartition + release = token.Release + } + + proto.Name = p.RpcName() + client, err = c.getClientForTaskQueuePartition(p) + return client, release, err +} + func (c *clientImpl) createContext(parent context.Context) (context.Context, context.CancelFunc) { return context.WithTimeout(parent, c.timeout) } @@ -201,15 +427,27 @@ func (c *clientImpl) createLongPollContext(parent context.Context) (context.Cont return context.WithTimeout(parent, c.longPollTimeout) } -func (c *clientImpl) getClientForTaskqueue( - namespaceID string, - taskQueue *taskqueuepb.TaskQueue, - taskQueueType enumspb.TaskQueueType, +func (c *clientImpl) Route(p tqid.Partition) (string, error) { + spreadChange := c.spreadRouting() + spread := spreadChange.Value(p.GradualChangeKey(), time.Now()) + return c.clients.Lookup(p.RoutingKey(spread)) +} + +func (c *clientImpl) getClientForTaskQueuePartition( + partition tqid.Partition, ) (matchingservice.MatchingServiceClient, error) { - key := common.TaskQueueRoutingKey(namespaceID, taskQueue.Name, taskQueueType) - client, err := c.clients.GetClientForKey(key) + addr, err := c.Route(partition) + if err != nil { + return nil, err + } + client, err := c.clients.GetClientForClientKey(addr) if err != nil { return nil, err } return client.(matchingservice.MatchingServiceClient), nil } + +func isPartitionAwareKind(kind enumspb.TaskQueueKind) bool { + // only normal partitions participate in scaling + return kind == enumspb.TASK_QUEUE_KIND_NORMAL +} diff --git a/client/matching/client_gen.go b/client/matching/client_gen.go index 7afaed56c7a..5fd2864a912 100644 --- a/client/matching/client_gen.go +++ b/client/matching/client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package matching @@ -32,8 +8,8 @@ import ( "math/rand" enumspb "go.temporal.io/api/enums/v1" - taskqueuepb "go.temporal.io/api/taskqueue/v1" "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/common/tqid" "google.golang.org/grpc" ) @@ -43,7 +19,12 @@ func (c *clientImpl) ApplyTaskQueueUserDataReplicationEvent( opts ...grpc.CallOption, ) (*matchingservice.ApplyTaskQueueUserDataReplicationEventResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), &taskqueuepb.TaskQueue{Name: request.GetTaskQueue()}, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -58,7 +39,12 @@ func (c *clientImpl) CancelOutstandingPoll( opts ...grpc.CallOption, ) (*matchingservice.CancelOutstandingPollResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), request.GetTaskQueue(), request.GetTaskQueueType()) + p, err := tqid.PartitionFromProto(request.GetTaskQueue(), request.GetNamespaceId(), request.GetTaskQueueType()) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -67,34 +53,104 @@ func (c *clientImpl) CancelOutstandingPoll( return client.CancelOutstandingPoll(ctx, request, opts...) } -func (c *clientImpl) CreateNexusIncomingService( +func (c *clientImpl) CancelOutstandingWorkerPolls( + ctx context.Context, + request *matchingservice.CancelOutstandingWorkerPollsRequest, + opts ...grpc.CallOption, +) (*matchingservice.CancelOutstandingWorkerPollsResponse, error) { + + p, err := tqid.PartitionFromProto(request.GetTaskQueue(), request.GetNamespaceId(), request.GetTaskQueueType()) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.CancelOutstandingWorkerPolls(ctx, request, opts...) +} + +func (c *clientImpl) CheckTaskQueueUserDataPropagation( + ctx context.Context, + request *matchingservice.CheckTaskQueueUserDataPropagationRequest, + opts ...grpc.CallOption, +) (*matchingservice.CheckTaskQueueUserDataPropagationResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.CheckTaskQueueUserDataPropagation(ctx, request, opts...) +} + +func (c *clientImpl) CheckTaskQueueVersionMembership( + ctx context.Context, + request *matchingservice.CheckTaskQueueVersionMembershipRequest, + opts ...grpc.CallOption, +) (*matchingservice.CheckTaskQueueVersionMembershipResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), request.GetTaskQueueType()) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.CheckTaskQueueVersionMembership(ctx, request, opts...) +} + +func (c *clientImpl) CreateNexusEndpoint( ctx context.Context, - request *matchingservice.CreateNexusIncomingServiceRequest, + request *matchingservice.CreateNexusEndpointRequest, opts ...grpc.CallOption, -) (*matchingservice.CreateNexusIncomingServiceResponse, error) { +) (*matchingservice.CreateNexusEndpointResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName("not-applicable", "not-applicable", enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } - client, err := c.getClientForTaskqueue("not-applicable", &taskqueuepb.TaskQueue{Name: "not-applicable"}, enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } ctx, cancel := c.createContext(ctx) defer cancel() - return client.CreateNexusIncomingService(ctx, request, opts...) + return client.CreateNexusEndpoint(ctx, request, opts...) } -func (c *clientImpl) DeleteNexusIncomingService( +func (c *clientImpl) DeleteNexusEndpoint( ctx context.Context, - request *matchingservice.DeleteNexusIncomingServiceRequest, + request *matchingservice.DeleteNexusEndpointRequest, opts ...grpc.CallOption, -) (*matchingservice.DeleteNexusIncomingServiceResponse, error) { +) (*matchingservice.DeleteNexusEndpointResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName("not-applicable", "not-applicable", enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } - client, err := c.getClientForTaskqueue("not-applicable", &taskqueuepb.TaskQueue{Name: "not-applicable"}, enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } ctx, cancel := c.createContext(ctx) defer cancel() - return client.DeleteNexusIncomingService(ctx, request, opts...) + return client.DeleteNexusEndpoint(ctx, request, opts...) } func (c *clientImpl) DescribeTaskQueue( @@ -103,7 +159,12 @@ func (c *clientImpl) DescribeTaskQueue( opts ...grpc.CallOption, ) (*matchingservice.DescribeTaskQueueResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), request.GetDescRequest().GetTaskQueue(), request.GetDescRequest().GetTaskQueueType()) + p, err := tqid.PartitionFromProto(request.GetDescRequest().GetTaskQueue(), request.GetNamespaceId(), request.GetDescRequest().GetTaskQueueType()) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -112,19 +173,78 @@ func (c *clientImpl) DescribeTaskQueue( return client.DescribeTaskQueue(ctx, request, opts...) } -func (c *clientImpl) DispatchNexusTask( +func (c *clientImpl) DescribeTaskQueuePartition( ctx context.Context, - request *matchingservice.DispatchNexusTaskRequest, + request *matchingservice.DescribeTaskQueuePartitionRequest, opts ...grpc.CallOption, -) (*matchingservice.DispatchNexusTaskResponse, error) { +) (*matchingservice.DescribeTaskQueuePartitionResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), request.GetTaskQueue(), enumspb.TASK_QUEUE_TYPE_NEXUS) + p := tqid.PartitionFromPartitionProto(request.GetTaskQueuePartition(), request.GetNamespaceId()) + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } ctx, cancel := c.createContext(ctx) defer cancel() - return client.DispatchNexusTask(ctx, request, opts...) + return client.DescribeTaskQueuePartition(ctx, request, opts...) +} + +func (c *clientImpl) DescribeVersionedTaskQueues( + ctx context.Context, + request *matchingservice.DescribeVersionedTaskQueuesRequest, + opts ...grpc.CallOption, +) (*matchingservice.DescribeVersionedTaskQueuesResponse, error) { + + p, err := tqid.PartitionFromProto(request.GetTaskQueue(), request.GetNamespaceId(), request.GetTaskQueueType()) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.DescribeVersionedTaskQueues(ctx, request, opts...) +} + +func (c *clientImpl) DescribeWorker( + ctx context.Context, + request *matchingservice.DescribeWorkerRequest, + opts ...grpc.CallOption, +) (*matchingservice.DescribeWorkerResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName("not-applicable", request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.DescribeWorker(ctx, request, opts...) +} + +func (c *clientImpl) ForceLoadTaskQueuePartition( + ctx context.Context, + request *matchingservice.ForceLoadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*matchingservice.ForceLoadTaskQueuePartitionResponse, error) { + + p := tqid.PartitionFromPartitionProto(request.GetTaskQueuePartition(), request.GetNamespaceId()) + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.ForceLoadTaskQueuePartition(ctx, request, opts...) } func (c *clientImpl) ForceUnloadTaskQueue( @@ -133,7 +253,12 @@ func (c *clientImpl) ForceUnloadTaskQueue( opts ...grpc.CallOption, ) (*matchingservice.ForceUnloadTaskQueueResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), &taskqueuepb.TaskQueue{Name: request.GetTaskQueue()}, request.GetTaskQueueType()) + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), request.GetTaskQueueType()) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -142,13 +267,35 @@ func (c *clientImpl) ForceUnloadTaskQueue( return client.ForceUnloadTaskQueue(ctx, request, opts...) } +func (c *clientImpl) ForceUnloadTaskQueuePartition( + ctx context.Context, + request *matchingservice.ForceUnloadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*matchingservice.ForceUnloadTaskQueuePartitionResponse, error) { + + p := tqid.PartitionFromPartitionProto(request.GetTaskQueuePartition(), request.GetNamespaceId()) + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.ForceUnloadTaskQueuePartition(ctx, request, opts...) +} + func (c *clientImpl) GetBuildIdTaskQueueMapping( ctx context.Context, request *matchingservice.GetBuildIdTaskQueueMappingRequest, opts ...grpc.CallOption, ) (*matchingservice.GetBuildIdTaskQueueMappingResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), &taskqueuepb.TaskQueue{Name: fmt.Sprintf("not-applicable-%d", rand.Int())}, enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + p, err := tqid.NormalPartitionFromRpcName(fmt.Sprintf("not-applicable-%d", rand.Int()), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -163,7 +310,12 @@ func (c *clientImpl) GetTaskQueueUserData( opts ...grpc.CallOption, ) (*matchingservice.GetTaskQueueUserDataResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), &taskqueuepb.TaskQueue{Name: request.GetTaskQueue()}, request.GetTaskQueueType()) + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), request.GetTaskQueueType()) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -178,7 +330,12 @@ func (c *clientImpl) GetWorkerBuildIdCompatibility( opts ...grpc.CallOption, ) (*matchingservice.GetWorkerBuildIdCompatibilityResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), &taskqueuepb.TaskQueue{Name: request.GetRequest().GetTaskQueue()}, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + p, err := tqid.NormalPartitionFromRpcName(request.GetRequest().GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -187,19 +344,44 @@ func (c *clientImpl) GetWorkerBuildIdCompatibility( return client.GetWorkerBuildIdCompatibility(ctx, request, opts...) } -func (c *clientImpl) ListNexusIncomingServices( +func (c *clientImpl) GetWorkerVersioningRules( + ctx context.Context, + request *matchingservice.GetWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (*matchingservice.GetWorkerVersioningRulesResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.GetWorkerVersioningRules(ctx, request, opts...) +} + +func (c *clientImpl) ListNexusEndpoints( ctx context.Context, - request *matchingservice.ListNexusIncomingServicesRequest, + request *matchingservice.ListNexusEndpointsRequest, opts ...grpc.CallOption, -) (*matchingservice.ListNexusIncomingServicesResponse, error) { +) (*matchingservice.ListNexusEndpointsResponse, error) { - client, err := c.getClientForTaskqueue("not-applicable", &taskqueuepb.TaskQueue{Name: "not-applicable"}, enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + p, err := tqid.NormalPartitionFromRpcName("not-applicable", "not-applicable", enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } ctx, cancel := c.createLongPollContext(ctx) defer cancel() - return client.ListNexusIncomingServices(ctx, request, opts...) + return client.ListNexusEndpoints(ctx, request, opts...) } func (c *clientImpl) ListTaskQueuePartitions( @@ -208,7 +390,12 @@ func (c *clientImpl) ListTaskQueuePartitions( opts ...grpc.CallOption, ) (*matchingservice.ListTaskQueuePartitionsResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), request.GetTaskQueue(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + p, err := tqid.PartitionFromProto(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -217,19 +404,44 @@ func (c *clientImpl) ListTaskQueuePartitions( return client.ListTaskQueuePartitions(ctx, request, opts...) } -func (c *clientImpl) PollNexusTaskQueue( +func (c *clientImpl) ListWorkers( ctx context.Context, - request *matchingservice.PollNexusTaskQueueRequest, + request *matchingservice.ListWorkersRequest, opts ...grpc.CallOption, -) (*matchingservice.PollNexusTaskQueueResponse, error) { +) (*matchingservice.ListWorkersResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), request.GetRequest().GetTaskQueue(), enumspb.TASK_QUEUE_TYPE_NEXUS) + p, err := tqid.NormalPartitionFromRpcName("not-applicable", request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } ctx, cancel := c.createContext(ctx) defer cancel() - return client.PollNexusTaskQueue(ctx, request, opts...) + return client.ListWorkers(ctx, request, opts...) +} + +func (c *clientImpl) RecordWorkerHeartbeat( + ctx context.Context, + request *matchingservice.RecordWorkerHeartbeatRequest, + opts ...grpc.CallOption, +) (*matchingservice.RecordWorkerHeartbeatResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName("not-applicable", request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.RecordWorkerHeartbeat(ctx, request, opts...) } func (c *clientImpl) ReplicateTaskQueueUserData( @@ -238,7 +450,12 @@ func (c *clientImpl) ReplicateTaskQueueUserData( opts ...grpc.CallOption, ) (*matchingservice.ReplicateTaskQueueUserDataResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), &taskqueuepb.TaskQueue{Name: "not-applicable"}, enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + p, err := tqid.NormalPartitionFromRpcName("not-applicable", request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -253,7 +470,12 @@ func (c *clientImpl) RespondNexusTaskCompleted( opts ...grpc.CallOption, ) (*matchingservice.RespondNexusTaskCompletedResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), request.GetTaskQueue(), enumspb.TASK_QUEUE_TYPE_NEXUS) + p, err := tqid.PartitionFromProto(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_NEXUS) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -268,7 +490,12 @@ func (c *clientImpl) RespondNexusTaskFailed( opts ...grpc.CallOption, ) (*matchingservice.RespondNexusTaskFailedResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), request.GetTaskQueue(), enumspb.TASK_QUEUE_TYPE_NEXUS) + p, err := tqid.PartitionFromProto(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_NEXUS) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -283,7 +510,12 @@ func (c *clientImpl) RespondQueryTaskCompleted( opts ...grpc.CallOption, ) (*matchingservice.RespondQueryTaskCompletedResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), request.GetTaskQueue(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + p, err := tqid.PartitionFromProto(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -292,19 +524,84 @@ func (c *clientImpl) RespondQueryTaskCompleted( return client.RespondQueryTaskCompleted(ctx, request, opts...) } -func (c *clientImpl) UpdateNexusIncomingService( +func (c *clientImpl) SyncDeploymentUserData( + ctx context.Context, + request *matchingservice.SyncDeploymentUserDataRequest, + opts ...grpc.CallOption, +) (*matchingservice.SyncDeploymentUserDataResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.SyncDeploymentUserData(ctx, request, opts...) +} + +func (c *clientImpl) UpdateFairnessState( + ctx context.Context, + request *matchingservice.UpdateFairnessStateRequest, + opts ...grpc.CallOption, +) (*matchingservice.UpdateFairnessStateResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.UpdateFairnessState(ctx, request, opts...) +} + +func (c *clientImpl) UpdateNexusEndpoint( ctx context.Context, - request *matchingservice.UpdateNexusIncomingServiceRequest, + request *matchingservice.UpdateNexusEndpointRequest, opts ...grpc.CallOption, -) (*matchingservice.UpdateNexusIncomingServiceResponse, error) { +) (*matchingservice.UpdateNexusEndpointResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName("not-applicable", "not-applicable", enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } - client, err := c.getClientForTaskqueue("not-applicable", &taskqueuepb.TaskQueue{Name: "not-applicable"}, enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } ctx, cancel := c.createContext(ctx) defer cancel() - return client.UpdateNexusIncomingService(ctx, request, opts...) + return client.UpdateNexusEndpoint(ctx, request, opts...) +} + +func (c *clientImpl) UpdateTaskQueueConfig( + ctx context.Context, + request *matchingservice.UpdateTaskQueueConfigRequest, + opts ...grpc.CallOption, +) (*matchingservice.UpdateTaskQueueConfigResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName(request.GetUpdateTaskqueueConfig().GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.UpdateTaskQueueConfig(ctx, request, opts...) } func (c *clientImpl) UpdateTaskQueueUserData( @@ -313,7 +610,12 @@ func (c *clientImpl) UpdateTaskQueueUserData( opts ...grpc.CallOption, ) (*matchingservice.UpdateTaskQueueUserDataResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), &taskqueuepb.TaskQueue{Name: "not-applicable"}, enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + p, err := tqid.NormalPartitionFromRpcName("not-applicable", request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_UNSPECIFIED) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -328,7 +630,12 @@ func (c *clientImpl) UpdateWorkerBuildIdCompatibility( opts ...grpc.CallOption, ) (*matchingservice.UpdateWorkerBuildIdCompatibilityResponse, error) { - client, err := c.getClientForTaskqueue(request.GetNamespaceId(), &taskqueuepb.TaskQueue{Name: request.GetTaskQueue()}, enumspb.TASK_QUEUE_TYPE_WORKFLOW) + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) if err != nil { return nil, err } @@ -336,3 +643,23 @@ func (c *clientImpl) UpdateWorkerBuildIdCompatibility( defer cancel() return client.UpdateWorkerBuildIdCompatibility(ctx, request, opts...) } + +func (c *clientImpl) UpdateWorkerVersioningRules( + ctx context.Context, + request *matchingservice.UpdateWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (*matchingservice.UpdateWorkerVersioningRulesResponse, error) { + + p, err := tqid.NormalPartitionFromRpcName(request.GetTaskQueue(), request.GetNamespaceId(), enumspb.TASK_QUEUE_TYPE_WORKFLOW) + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p) + if err != nil { + return nil, err + } + ctx, cancel := c.createContext(ctx) + defer cancel() + return client.UpdateWorkerVersioningRules(ctx, request, opts...) +} diff --git a/client/matching/interface.go b/client/matching/interface.go new file mode 100644 index 00000000000..2b0fc4f4d1d --- /dev/null +++ b/client/matching/interface.go @@ -0,0 +1,7 @@ +package matching + +import "go.temporal.io/server/common/tqid" + +type RoutingClient interface { + Route(p tqid.Partition) (string, error) +} diff --git a/client/matching/loadbalancer.go b/client/matching/loadbalancer.go index 7cc325678d0..56ed046a520 100644 --- a/client/matching/loadbalancer.go +++ b/client/matching/loadbalancer.go @@ -1,39 +1,13 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package matching import ( "math/rand" "sync" - enumspb "go.temporal.io/api/enums/v1" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/tqname" + "go.temporal.io/server/common/testing/testhooks" + "go.temporal.io/server/common/tqid" ) type ( @@ -48,50 +22,38 @@ type ( // to a parent partition in which case, no load balancing should be // performed PickWritePartition( - namespaceID namespace.ID, - taskQueue *taskqueuepb.TaskQueue, - taskQueueType enumspb.TaskQueueType, - forwardedFrom string, - ) string + taskQueue *tqid.TaskQueue, + pc PartitionCounts, + ) *tqid.NormalPartition // PickReadPartition returns the task queue partition to send a poller to. // Input is name of the original task queue as specified by caller. When // forwardedFrom is non-empty, no load balancing should be done. PickReadPartition( - namespaceID namespace.ID, - taskQueue *taskqueuepb.TaskQueue, - taskQueueType enumspb.TaskQueueType, - forwardedFrom string, + taskQueue *tqid.TaskQueue, + pc PartitionCounts, ) *pollToken } defaultLoadBalancer struct { - namespaceIDToName func(id namespace.ID) (namespace.Name, error) - nReadPartitions dynamicconfig.IntPropertyFnWithTaskQueueInfoFilters - nWritePartitions dynamicconfig.IntPropertyFnWithTaskQueueInfoFilters - forceReadPartition dynamicconfig.IntPropertyFn - forceWritePartition dynamicconfig.IntPropertyFn + namespaceIDToName func(id namespace.ID) (namespace.Name, error) + nReadPartitions dynamicconfig.IntPropertyFnWithTaskQueueFilter + nWritePartitions dynamicconfig.IntPropertyFnWithTaskQueueFilter + testHooks testhooks.TestHooks lock sync.RWMutex - taskQueueLBs map[taskQueueKey]*tqLoadBalancer + taskQueueLBs map[tqid.TaskQueue]*tqLoadBalancer } // Keeps track of polls per partition. Sends a poll to the partition with the fewest polls tqLoadBalancer struct { - taskQueue taskQueueKey + taskQueue *tqid.TaskQueue pollerCounts []int // keep track of poller count of each partition lock sync.Mutex } - taskQueueKey struct { - NamespaceID namespace.ID - Name tqname.Name - Type enumspb.TaskQueueType - } - pollToken struct { - fullName string - partitionID int + TQPartition *tqid.NormalPartition balancer *tqLoadBalancer } ) @@ -101,128 +63,118 @@ type ( func NewLoadBalancer( namespaceIDToName func(id namespace.ID) (namespace.Name, error), dc *dynamicconfig.Collection, + testHooks testhooks.TestHooks, ) LoadBalancer { lb := &defaultLoadBalancer{ - namespaceIDToName: namespaceIDToName, - nReadPartitions: dc.GetTaskQueuePartitionsProperty(dynamicconfig.MatchingNumTaskqueueReadPartitions), - nWritePartitions: dc.GetTaskQueuePartitionsProperty(dynamicconfig.MatchingNumTaskqueueWritePartitions), - forceReadPartition: dc.GetIntProperty(dynamicconfig.TestMatchingLBForceReadPartition, -1), - forceWritePartition: dc.GetIntProperty(dynamicconfig.TestMatchingLBForceWritePartition, -1), - lock: sync.RWMutex{}, - taskQueueLBs: make(map[taskQueueKey]*tqLoadBalancer), + namespaceIDToName: namespaceIDToName, + nReadPartitions: dynamicconfig.MatchingNumTaskqueueReadPartitions.Get(dc), + nWritePartitions: dynamicconfig.MatchingNumTaskqueueWritePartitions.Get(dc), + testHooks: testHooks, + taskQueueLBs: make(map[tqid.TaskQueue]*tqLoadBalancer), } return lb } func (lb *defaultLoadBalancer) PickWritePartition( - namespaceID namespace.ID, - taskQueue *taskqueuepb.TaskQueue, - taskQueueType enumspb.TaskQueueType, - forwardedFrom string, -) string { - if forwardedFrom != "" || taskQueue.GetKind() == enumspb.TASK_QUEUE_KIND_STICKY { - return taskQueue.GetName() + taskQueue *tqid.TaskQueue, + pc PartitionCounts, +) *tqid.NormalPartition { + if n, ok := testhooks.Get(lb.testHooks, testhooks.MatchingLBForceWritePartition, namespace.ID(taskQueue.NamespaceId())); ok { + return taskQueue.NormalPartition(n) } - tqName, err := tqname.FromBaseName(taskQueue.GetName()) - - // this should never happen when forwardedFrom is empty + nsName, err := lb.namespaceIDToName(namespace.ID(taskQueue.NamespaceId())) if err != nil { - return taskQueue.GetName() - } - - if n := lb.forceWritePartition(); n >= 0 { - return tqName.WithPartition(n).FullName() + return taskQueue.RootPartition() } - nsName, err := lb.namespaceIDToName(namespaceID) - if err != nil { - return taskQueue.GetName() + var partitionCount int + if pc.Write > 0 { + partitionCount = int(pc.Write) + } else { + partitionCount = max(1, lb.nWritePartitions(nsName.String(), taskQueue.Name(), taskQueue.TaskType())) } - n := max(1, lb.nWritePartitions(nsName.String(), tqName.BaseNameString(), taskQueueType)) - return tqName.WithPartition(rand.Intn(n)).FullName() + return taskQueue.NormalPartition(rand.Intn(partitionCount)) } // PickReadPartition picks a partition for poller to poll task from, and keeps load balanced between partitions. // Caller is responsible to call pollToken.Release() after complete the poll. func (lb *defaultLoadBalancer) PickReadPartition( - namespaceID namespace.ID, - taskQueue *taskqueuepb.TaskQueue, - taskQueueType enumspb.TaskQueueType, - forwardedFrom string, + taskQueue *tqid.TaskQueue, + pc PartitionCounts, ) *pollToken { - if forwardedFrom != "" || taskQueue.Kind == enumspb.TASK_QUEUE_KIND_STICKY { - // no partition for sticky task queue and forwarded request - return &pollToken{fullName: taskQueue.GetName()} - } - - parsedName, err := tqname.Parse(taskQueue.GetName()) - if err != nil || err == nil && !parsedName.IsRoot() { - // parse error or partition already picked, use as-is - return &pollToken{fullName: taskQueue.GetName()} - } - - tqlb := lb.getTaskQueueLoadBalancer(namespaceID, parsedName, taskQueueType) + tqlb := lb.getTaskQueueLoadBalancer(taskQueue) // For read path it's safer to return global default partition count instead of root partition, when we fail to // map namespace ID to name. var partitionCount = dynamicconfig.GlobalDefaultNumTaskQueuePartitions - namespaceName, err := lb.namespaceIDToName(namespaceID) - if err == nil { - partitionCount = lb.nReadPartitions(string(namespaceName), parsedName.BaseNameString(), taskQueueType) + if pc.Read > 0 { + partitionCount = int(pc.Read) + } else { + namespaceName, err := lb.namespaceIDToName(namespace.ID(taskQueue.NamespaceId())) + if err == nil { + partitionCount = lb.nReadPartitions(string(namespaceName), taskQueue.Name(), taskQueue.TaskType()) + } + } + + if n, ok := testhooks.Get(lb.testHooks, testhooks.MatchingLBForceReadPartition, namespace.ID(taskQueue.NamespaceId())); ok { + return tqlb.forceReadPartition(partitionCount, n) } - return tqlb.pickReadPartition(partitionCount, lb.forceReadPartition()) + return tqlb.pickReadPartition(partitionCount) } -func (lb *defaultLoadBalancer) getTaskQueueLoadBalancer( - namespaceID namespace.ID, parsedName tqname.Name, tqType enumspb.TaskQueueType, -) *tqLoadBalancer { - key := taskQueueKey{NamespaceID: namespaceID, Name: parsedName, Type: tqType} - +func (lb *defaultLoadBalancer) getTaskQueueLoadBalancer(tq *tqid.TaskQueue) *tqLoadBalancer { lb.lock.RLock() - tqlb, ok := lb.taskQueueLBs[key] + tqlb, ok := lb.taskQueueLBs[*tq] lb.lock.RUnlock() if ok { return tqlb } lb.lock.Lock() - tqlb, ok = lb.taskQueueLBs[key] + tqlb, ok = lb.taskQueueLBs[*tq] if !ok { - tqlb = newTaskQueueLoadBalancer(key) - lb.taskQueueLBs[key] = tqlb + tqlb = newTaskQueueLoadBalancer(tq) + lb.taskQueueLBs[*tq] = tqlb } lb.lock.Unlock() return tqlb } -func newTaskQueueLoadBalancer(key taskQueueKey) *tqLoadBalancer { +func newTaskQueueLoadBalancer(tq *tqid.TaskQueue) *tqLoadBalancer { return &tqLoadBalancer{ - taskQueue: key, + taskQueue: tq, } } -func (b *tqLoadBalancer) pickReadPartition(partitionCount int, forcedPartition int) *pollToken { +func (b *tqLoadBalancer) pickReadPartition(partitionCount int) *pollToken { b.lock.Lock() defer b.lock.Unlock() - // ensure we reflect dynamic config change if it ever happens b.ensurePartitionCountLocked(partitionCount) + partitionID := b.pickReadPartitionWithFewestPolls(partitionCount) - partitionID := forcedPartition + b.pollerCounts[partitionID]++ - if partitionID < 0 { - partitionID = b.pickReadPartitionWithFewestPolls(partitionCount) + return &pollToken{ + TQPartition: b.taskQueue.NormalPartition(partitionID), + balancer: b, } +} + +func (b *tqLoadBalancer) forceReadPartition(partitionCount, partitionID int) *pollToken { + b.lock.Lock() + defer b.lock.Unlock() + + b.ensurePartitionCountLocked(max(partitionCount, partitionID+1)) b.pollerCounts[partitionID]++ return &pollToken{ - fullName: b.taskQueue.Name.WithPartition(partitionID).FullName(), - partitionID: partitionID, + TQPartition: b.taskQueue.NormalPartition(partitionID), balancer: b, } } @@ -273,10 +225,6 @@ func (b *tqLoadBalancer) Release(partitionID int) { func (t *pollToken) Release() { if t.balancer != nil { // t.balancer == nil is valid for example sticky task queue. - t.balancer.Release(t.partitionID) + t.balancer.Release(t.TQPartition.PartitionId()) } } - -func (t *pollToken) GetFullName() string { - return t.fullName -} diff --git a/client/matching/loadbalancer_test.go b/client/matching/loadbalancer_test.go index 1c712aa66bf..ac73864130a 100644 --- a/client/matching/loadbalancer_test.go +++ b/client/matching/loadbalancer_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package matching import ( @@ -30,88 +6,102 @@ import ( "github.com/stretchr/testify/assert" enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/server/common/tqname" + "go.temporal.io/server/common/tqid" ) +func TestTQLoadBalancerMapping(t *testing.T) { + lb := &defaultLoadBalancer{ + lock: sync.RWMutex{}, + taskQueueLBs: make(map[tqid.TaskQueue]*tqLoadBalancer), + } + + f, err := tqid.NewTaskQueueFamily("fake-namespace-id", "fake-taskqueue") + assert.NoError(t, err) + + taskQueue := f.TaskQueue(enumspb.TASK_QUEUE_TYPE_WORKFLOW) + tqlb := lb.getTaskQueueLoadBalancer(taskQueue) + + tqlb2 := lb.getTaskQueueLoadBalancer(f.TaskQueue(enumspb.TASK_QUEUE_TYPE_WORKFLOW)) + assert.Equal(t, tqlb2, tqlb, "mapping should be based on content, not the pointer value") + + taskQueueClone := *taskQueue + tqlb2 = lb.getTaskQueueLoadBalancer(&taskQueueClone) + assert.Equal(t, tqlb2, tqlb, "mapping should be based on content, not the pointer value") + + tqlb3 := lb.getTaskQueueLoadBalancer(f.TaskQueue(enumspb.TASK_QUEUE_TYPE_ACTIVITY)) + assert.NotEqual(t, tqlb3, tqlb, "separate load LB should be created for each task type") +} + func TestTQLoadBalancer(t *testing.T) { partitionCount := 4 - key := taskQueueKey{ - NamespaceID: "fake-namespace-id", - Name: mustParseTQName("fake-taskqueue"), - Type: enumspb.TASK_QUEUE_TYPE_ACTIVITY, - } - tqlb := newTaskQueueLoadBalancer(key) + f, err := tqid.NewTaskQueueFamily("fake-namespace-id", "fake-taskqueue") + assert.NoError(t, err) + tqlb := newTaskQueueLoadBalancer(f.TaskQueue(enumspb.TASK_QUEUE_TYPE_ACTIVITY)) // pick 4 times, each partition picked would have one poller - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 1, maxPollerCount(tqlb)) - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 1, maxPollerCount(tqlb)) - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 1, maxPollerCount(tqlb)) - p3 := tqlb.pickReadPartition(partitionCount, -1) + p3 := tqlb.pickReadPartition(partitionCount) assert.Equal(t, 1, maxPollerCount(tqlb)) // release one, and pick one, the newly picked one should have one poller p3.Release() - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 1, maxPollerCount(tqlb)) // pick one again, this time it should have 2 pollers - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 2, maxPollerCount(tqlb)) } func TestTQLoadBalancerForce(t *testing.T) { partitionCount := 4 - key := taskQueueKey{ - NamespaceID: "fake-namespace-id", - Name: mustParseTQName("fake-taskqueue"), - Type: enumspb.TASK_QUEUE_TYPE_ACTIVITY, - } - tqlb := newTaskQueueLoadBalancer(key) + f, err := tqid.NewTaskQueueFamily("fake-namespace-id", "fake-taskqueue") + assert.NoError(t, err) + tqlb := newTaskQueueLoadBalancer(f.TaskQueue(enumspb.TASK_QUEUE_TYPE_ACTIVITY)) // pick 4 times, each partition picked would have one poller - p1 := tqlb.pickReadPartition(partitionCount, 1) - assert.Equal(t, 1, p1.partitionID) + p1 := tqlb.forceReadPartition(partitionCount, 1) + assert.Equal(t, 1, p1.TQPartition.PartitionId()) assert.Equal(t, 1, maxPollerCount(tqlb)) - tqlb.pickReadPartition(partitionCount, 1) + tqlb.forceReadPartition(partitionCount, 1) assert.Equal(t, 2, maxPollerCount(tqlb)) // when we don't force it should balance out - tqlb.pickReadPartition(partitionCount, -1) - tqlb.pickReadPartition(partitionCount, -1) - tqlb.pickReadPartition(partitionCount, -1) - tqlb.pickReadPartition(partitionCount, -1) - tqlb.pickReadPartition(partitionCount, -1) - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) + tqlb.pickReadPartition(partitionCount) + tqlb.pickReadPartition(partitionCount) + tqlb.pickReadPartition(partitionCount) + tqlb.pickReadPartition(partitionCount) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 2, maxPollerCount(tqlb)) // releasing the forced one and adding another should still be balanced p1.Release() - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 2, maxPollerCount(tqlb)) - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 3, maxPollerCount(tqlb)) } func TestLoadBalancerConcurrent(t *testing.T) { wg := &sync.WaitGroup{} partitionCount := 4 - key := taskQueueKey{ - NamespaceID: "fake-namespace-id", - Name: mustParseTQName("fake-taskqueue"), - Type: enumspb.TASK_QUEUE_TYPE_ACTIVITY, - } - tqlb := newTaskQueueLoadBalancer(key) + f, err := tqid.NewTaskQueueFamily("fake-namespace-id", "fake-taskqueue") + assert.NoError(t, err) + tqlb := newTaskQueueLoadBalancer(f.TaskQueue(enumspb.TASK_QUEUE_TYPE_ACTIVITY)) concurrentCount := 10 * partitionCount wg.Add(concurrentCount) - for i := 0; i < concurrentCount; i++ { + for range concurrentCount { go func() { defer wg.Done() - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) }() } wg.Wait() @@ -125,29 +115,26 @@ func TestLoadBalancerConcurrent(t *testing.T) { func TestLoadBalancer_ReducedPartitionCount(t *testing.T) { partitionCount := 2 - key := taskQueueKey{ - NamespaceID: "fake-namespace-id", - Name: mustParseTQName("fake-taskqueue"), - Type: enumspb.TASK_QUEUE_TYPE_ACTIVITY, - } - tqlb := newTaskQueueLoadBalancer(key) - p1 := tqlb.pickReadPartition(partitionCount, -1) - p2 := tqlb.pickReadPartition(partitionCount, -1) + f, err := tqid.NewTaskQueueFamily("fake-namespace-id", "fake-taskqueue") + assert.NoError(t, err) + tqlb := newTaskQueueLoadBalancer(f.TaskQueue(enumspb.TASK_QUEUE_TYPE_ACTIVITY)) + p1 := tqlb.pickReadPartition(partitionCount) + p2 := tqlb.pickReadPartition(partitionCount) assert.Equal(t, 1, maxPollerCount(tqlb)) assert.Equal(t, 1, maxPollerCount(tqlb)) partitionCount += 2 // increase partition count - p3 := tqlb.pickReadPartition(partitionCount, -1) - p4 := tqlb.pickReadPartition(partitionCount, -1) + p3 := tqlb.pickReadPartition(partitionCount) + p4 := tqlb.pickReadPartition(partitionCount) assert.Equal(t, 1, maxPollerCount(tqlb)) assert.Equal(t, 1, maxPollerCount(tqlb)) partitionCount -= 2 // reduce partition count - p5 := tqlb.pickReadPartition(partitionCount, -1) - p6 := tqlb.pickReadPartition(partitionCount, -1) + p5 := tqlb.pickReadPartition(partitionCount) + p6 := tqlb.pickReadPartition(partitionCount) assert.Equal(t, 2, maxPollerCount(tqlb)) assert.Equal(t, 2, maxPollerCount(tqlb)) - p7 := tqlb.pickReadPartition(partitionCount, -1) + p7 := tqlb.pickReadPartition(partitionCount) assert.Equal(t, 3, maxPollerCount(tqlb)) // release all of them and it should be ok. @@ -159,22 +146,14 @@ func TestLoadBalancer_ReducedPartitionCount(t *testing.T) { p6.Release() p7.Release() - tqlb.pickReadPartition(partitionCount, -1) - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 1, maxPollerCount(tqlb)) assert.Equal(t, 1, maxPollerCount(tqlb)) - tqlb.pickReadPartition(partitionCount, -1) + tqlb.pickReadPartition(partitionCount) assert.Equal(t, 2, maxPollerCount(tqlb)) } -func mustParseTQName(baseName string) tqname.Name { - n, err := tqname.Parse(baseName) - if err != nil { - panic(err) - } - return n -} - func maxPollerCount(tqlb *tqLoadBalancer) int { res := -1 for _, c := range tqlb.pollerCounts { diff --git a/client/matching/metric_client.go b/client/matching/metric_client.go index 11180c8fae3..53edd2b34b7 100644 --- a/client/matching/metric_client.go +++ b/client/matching/metric_client.go @@ -1,44 +1,20 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package matching import ( "context" + "errors" "time" "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" - "google.golang.org/grpc" - "go.temporal.io/server/api/matchingservice/v1" "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" serviceerrors "go.temporal.io/server/common/serviceerror" - "go.temporal.io/server/common/tqname" + "go.temporal.io/server/common/tqid" + "google.golang.org/grpc" ) var _ matchingservice.MatchingServiceClient = (*metricClient)(nil) @@ -78,7 +54,7 @@ func (c *metricClient) AddActivityTask( c.emitForwardedSourceStats( scope, - request.GetForwardedSource(), + request.GetForwardInfo().GetSourcePartition(), request.TaskQueue, ) @@ -98,7 +74,7 @@ func (c *metricClient) AddWorkflowTask( c.emitForwardedSourceStats( scope, - request.GetForwardedSource(), + request.GetForwardInfo().GetSourcePartition(), request.TaskQueue, ) @@ -162,13 +138,53 @@ func (c *metricClient) QueryWorkflow( c.emitForwardedSourceStats( scope, - request.GetForwardedSource(), + request.GetForwardInfo().GetSourcePartition(), request.TaskQueue, ) return c.client.QueryWorkflow(ctx, request, opts...) } +func (c *metricClient) DispatchNexusTask( + ctx context.Context, + request *matchingservice.DispatchNexusTaskRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.DispatchNexusTaskResponse, retError error) { + scope, stopwatch := c.startMetricsRecording(ctx, metrics.MatchingClientDispatchNexusTaskScope) + defer func() { + c.finishMetricsRecording(scope, stopwatch, retError) + }() + + c.emitForwardedSourceStats( + scope, + request.GetForwardInfo().GetSourcePartition(), + request.TaskQueue, + ) + + return c.client.DispatchNexusTask(ctx, request, opts...) +} + +func (c *metricClient) PollNexusTaskQueue( + ctx context.Context, + request *matchingservice.PollNexusTaskQueueRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.PollNexusTaskQueueResponse, retError error) { + scope, stopwatch := c.startMetricsRecording(ctx, metrics.MatchingClientPollNexusTaskQueueScope) + defer func() { + c.finishMetricsRecording(scope, stopwatch, retError) + }() + + if request.Request != nil { + c.emitForwardedSourceStats( + scope, + request.GetForwardedSource(), + request.Request.TaskQueue, + ) + } + + return c.client.PollNexusTaskQueue(ctx, request, opts...) +} + func (c *metricClient) emitForwardedSourceStats( metricsHandler metrics.Handler, forwardedFrom string, @@ -182,8 +198,11 @@ func (c *metricClient) emitForwardedSourceStats( case forwardedFrom != "": metrics.MatchingClientForwardedCounter.With(metricsHandler).Record(1) default: - _, err := tqname.FromBaseName(taskQueue.GetName()) + // TODO: confirmed from metrics, it seems this error does happen at the moment... + // it means some mangled name come here; need to check why + _, err := tqid.NewTaskQueueFamily("", taskQueue.GetName()) if err != nil { + c.logger.Info("invalid tq name", tag.Error(err), tag.String("proto", taskQueue.GetName())) metrics.MatchingClientInvalidTaskQueueName.With(metricsHandler).Record(1) } } @@ -213,7 +232,8 @@ func (c *metricClient) finishMetricsRecording( *serviceerror.QueryFailed, *serviceerror.NamespaceNotFound, *serviceerror.NewerBuildExists, - *serviceerror.WorkflowExecutionAlreadyStarted: + *serviceerror.WorkflowExecutionAlreadyStarted, + *serviceerror.ResourceExhausted: // noop - not interest and too many logs default: c.throttledLogger.Info("matching client encountered error", tag.Error(err), tag.ServiceErrorType(err)) @@ -222,3 +242,14 @@ func (c *metricClient) finishMetricsRecording( } metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime)) } + +func (c *metricClient) Route(p tqid.Partition) (string, error) { + // Ideally we wouldn't do a type-check here and require c.client to have + // Route, but it would require changing too many types all over the place. + // This isn't called in a hot path. + rc, ok := c.client.(RoutingClient) + if !ok { + return "", errors.New("not routing client") + } + return rc.Route(p) +} diff --git a/client/matching/metric_client_gen.go b/client/matching/metric_client_gen.go index 40c98c10aff..2f7dead614a 100644 --- a/client/matching/metric_client_gen.go +++ b/client/matching/metric_client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package matching @@ -61,32 +37,74 @@ func (c *metricClient) CancelOutstandingPoll( return c.client.CancelOutstandingPoll(ctx, request, opts...) } -func (c *metricClient) CreateNexusIncomingService( +func (c *metricClient) CancelOutstandingWorkerPolls( ctx context.Context, - request *matchingservice.CreateNexusIncomingServiceRequest, + request *matchingservice.CancelOutstandingWorkerPollsRequest, opts ...grpc.CallOption, -) (_ *matchingservice.CreateNexusIncomingServiceResponse, retError error) { +) (_ *matchingservice.CancelOutstandingWorkerPollsResponse, retError error) { - metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientCreateNexusIncomingService") + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientCancelOutstandingWorkerPolls") defer func() { c.finishMetricsRecording(metricsHandler, startTime, retError) }() - return c.client.CreateNexusIncomingService(ctx, request, opts...) + return c.client.CancelOutstandingWorkerPolls(ctx, request, opts...) } -func (c *metricClient) DeleteNexusIncomingService( +func (c *metricClient) CheckTaskQueueUserDataPropagation( ctx context.Context, - request *matchingservice.DeleteNexusIncomingServiceRequest, + request *matchingservice.CheckTaskQueueUserDataPropagationRequest, opts ...grpc.CallOption, -) (_ *matchingservice.DeleteNexusIncomingServiceResponse, retError error) { +) (_ *matchingservice.CheckTaskQueueUserDataPropagationResponse, retError error) { - metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientDeleteNexusIncomingService") + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientCheckTaskQueueUserDataPropagation") defer func() { c.finishMetricsRecording(metricsHandler, startTime, retError) }() - return c.client.DeleteNexusIncomingService(ctx, request, opts...) + return c.client.CheckTaskQueueUserDataPropagation(ctx, request, opts...) +} + +func (c *metricClient) CheckTaskQueueVersionMembership( + ctx context.Context, + request *matchingservice.CheckTaskQueueVersionMembershipRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.CheckTaskQueueVersionMembershipResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientCheckTaskQueueVersionMembership") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CheckTaskQueueVersionMembership(ctx, request, opts...) +} + +func (c *metricClient) CreateNexusEndpoint( + ctx context.Context, + request *matchingservice.CreateNexusEndpointRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.CreateNexusEndpointResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientCreateNexusEndpoint") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.CreateNexusEndpoint(ctx, request, opts...) +} + +func (c *metricClient) DeleteNexusEndpoint( + ctx context.Context, + request *matchingservice.DeleteNexusEndpointRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.DeleteNexusEndpointResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientDeleteNexusEndpoint") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DeleteNexusEndpoint(ctx, request, opts...) } func (c *metricClient) DescribeTaskQueue( @@ -103,18 +121,60 @@ func (c *metricClient) DescribeTaskQueue( return c.client.DescribeTaskQueue(ctx, request, opts...) } -func (c *metricClient) DispatchNexusTask( +func (c *metricClient) DescribeTaskQueuePartition( ctx context.Context, - request *matchingservice.DispatchNexusTaskRequest, + request *matchingservice.DescribeTaskQueuePartitionRequest, opts ...grpc.CallOption, -) (_ *matchingservice.DispatchNexusTaskResponse, retError error) { +) (_ *matchingservice.DescribeTaskQueuePartitionResponse, retError error) { - metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientDispatchNexusTask") + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientDescribeTaskQueuePartition") defer func() { c.finishMetricsRecording(metricsHandler, startTime, retError) }() - return c.client.DispatchNexusTask(ctx, request, opts...) + return c.client.DescribeTaskQueuePartition(ctx, request, opts...) +} + +func (c *metricClient) DescribeVersionedTaskQueues( + ctx context.Context, + request *matchingservice.DescribeVersionedTaskQueuesRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.DescribeVersionedTaskQueuesResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientDescribeVersionedTaskQueues") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeVersionedTaskQueues(ctx, request, opts...) +} + +func (c *metricClient) DescribeWorker( + ctx context.Context, + request *matchingservice.DescribeWorkerRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.DescribeWorkerResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientDescribeWorker") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.DescribeWorker(ctx, request, opts...) +} + +func (c *metricClient) ForceLoadTaskQueuePartition( + ctx context.Context, + request *matchingservice.ForceLoadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.ForceLoadTaskQueuePartitionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientForceLoadTaskQueuePartition") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ForceLoadTaskQueuePartition(ctx, request, opts...) } func (c *metricClient) ForceUnloadTaskQueue( @@ -131,6 +191,20 @@ func (c *metricClient) ForceUnloadTaskQueue( return c.client.ForceUnloadTaskQueue(ctx, request, opts...) } +func (c *metricClient) ForceUnloadTaskQueuePartition( + ctx context.Context, + request *matchingservice.ForceUnloadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.ForceUnloadTaskQueuePartitionResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientForceUnloadTaskQueuePartition") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ForceUnloadTaskQueuePartition(ctx, request, opts...) +} + func (c *metricClient) GetBuildIdTaskQueueMapping( ctx context.Context, request *matchingservice.GetBuildIdTaskQueueMappingRequest, @@ -173,18 +247,32 @@ func (c *metricClient) GetWorkerBuildIdCompatibility( return c.client.GetWorkerBuildIdCompatibility(ctx, request, opts...) } -func (c *metricClient) ListNexusIncomingServices( +func (c *metricClient) GetWorkerVersioningRules( ctx context.Context, - request *matchingservice.ListNexusIncomingServicesRequest, + request *matchingservice.GetWorkerVersioningRulesRequest, opts ...grpc.CallOption, -) (_ *matchingservice.ListNexusIncomingServicesResponse, retError error) { +) (_ *matchingservice.GetWorkerVersioningRulesResponse, retError error) { - metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientListNexusIncomingServices") + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientGetWorkerVersioningRules") defer func() { c.finishMetricsRecording(metricsHandler, startTime, retError) }() - return c.client.ListNexusIncomingServices(ctx, request, opts...) + return c.client.GetWorkerVersioningRules(ctx, request, opts...) +} + +func (c *metricClient) ListNexusEndpoints( + ctx context.Context, + request *matchingservice.ListNexusEndpointsRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.ListNexusEndpointsResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientListNexusEndpoints") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.ListNexusEndpoints(ctx, request, opts...) } func (c *metricClient) ListTaskQueuePartitions( @@ -201,18 +289,32 @@ func (c *metricClient) ListTaskQueuePartitions( return c.client.ListTaskQueuePartitions(ctx, request, opts...) } -func (c *metricClient) PollNexusTaskQueue( +func (c *metricClient) ListWorkers( ctx context.Context, - request *matchingservice.PollNexusTaskQueueRequest, + request *matchingservice.ListWorkersRequest, opts ...grpc.CallOption, -) (_ *matchingservice.PollNexusTaskQueueResponse, retError error) { +) (_ *matchingservice.ListWorkersResponse, retError error) { - metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientPollNexusTaskQueue") + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientListWorkers") defer func() { c.finishMetricsRecording(metricsHandler, startTime, retError) }() - return c.client.PollNexusTaskQueue(ctx, request, opts...) + return c.client.ListWorkers(ctx, request, opts...) +} + +func (c *metricClient) RecordWorkerHeartbeat( + ctx context.Context, + request *matchingservice.RecordWorkerHeartbeatRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.RecordWorkerHeartbeatResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientRecordWorkerHeartbeat") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.RecordWorkerHeartbeat(ctx, request, opts...) } func (c *metricClient) ReplicateTaskQueueUserData( @@ -271,18 +373,60 @@ func (c *metricClient) RespondQueryTaskCompleted( return c.client.RespondQueryTaskCompleted(ctx, request, opts...) } -func (c *metricClient) UpdateNexusIncomingService( +func (c *metricClient) SyncDeploymentUserData( + ctx context.Context, + request *matchingservice.SyncDeploymentUserDataRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.SyncDeploymentUserDataResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientSyncDeploymentUserData") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.SyncDeploymentUserData(ctx, request, opts...) +} + +func (c *metricClient) UpdateFairnessState( ctx context.Context, - request *matchingservice.UpdateNexusIncomingServiceRequest, + request *matchingservice.UpdateFairnessStateRequest, opts ...grpc.CallOption, -) (_ *matchingservice.UpdateNexusIncomingServiceResponse, retError error) { +) (_ *matchingservice.UpdateFairnessStateResponse, retError error) { - metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientUpdateNexusIncomingService") + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientUpdateFairnessState") defer func() { c.finishMetricsRecording(metricsHandler, startTime, retError) }() - return c.client.UpdateNexusIncomingService(ctx, request, opts...) + return c.client.UpdateFairnessState(ctx, request, opts...) +} + +func (c *metricClient) UpdateNexusEndpoint( + ctx context.Context, + request *matchingservice.UpdateNexusEndpointRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.UpdateNexusEndpointResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientUpdateNexusEndpoint") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateNexusEndpoint(ctx, request, opts...) +} + +func (c *metricClient) UpdateTaskQueueConfig( + ctx context.Context, + request *matchingservice.UpdateTaskQueueConfigRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.UpdateTaskQueueConfigResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientUpdateTaskQueueConfig") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateTaskQueueConfig(ctx, request, opts...) } func (c *metricClient) UpdateTaskQueueUserData( @@ -312,3 +456,17 @@ func (c *metricClient) UpdateWorkerBuildIdCompatibility( return c.client.UpdateWorkerBuildIdCompatibility(ctx, request, opts...) } + +func (c *metricClient) UpdateWorkerVersioningRules( + ctx context.Context, + request *matchingservice.UpdateWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (_ *matchingservice.UpdateWorkerVersioningRulesResponse, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "MatchingClientUpdateWorkerVersioningRules") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.UpdateWorkerVersioningRules(ctx, request, opts...) +} diff --git a/client/matching/partition_cache.go b/client/matching/partition_cache.go new file mode 100644 index 00000000000..3d6c4f3b57a --- /dev/null +++ b/client/matching/partition_cache.go @@ -0,0 +1,142 @@ +package matching + +import ( + "context" + "sync" + "time" + + "github.com/google/uuid" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/common/goro" + "go.temporal.io/server/common/metrics" +) + +// Shards should be a power of 2. +const partitionCacheNumShards = 8 + +// How long for a full rotation of the cache. +const partitionCacheRotateInterval = time.Hour + +// partitionCache is a cache of PartitionCounts values for task queues. +// It uses a sharded map+mutex and periodic rotation for efficiency. +type partitionCache struct { + shards [partitionCacheNumShards]partitionCacheShard + + metricsHandler metrics.Handler + rotate *goro.Handle +} + +type partitionCacheShard struct { + lock sync.RWMutex + active map[string]PartitionCounts + prev map[string]PartitionCounts + _ [64 - 24 - 8 - 8]byte // force to different cache lines to eliminate false sharing +} + +// newPartitionCache returns a new partitionCache. Start() must be called before using it. +func newPartitionCache( + metricsHandler metrics.Handler, +) *partitionCache { + return &partitionCache{ + metricsHandler: metricsHandler, + } +} + +func (c *partitionCache) Start() { + for i := range c.shards { + c.shards[i].rotate() + } + c.rotate = goro.NewHandle(context.Background()).Go(func(ctx context.Context) error { + t := time.NewTicker(partitionCacheRotateInterval / partitionCacheNumShards) + defer t.Stop() + for i := 0; ; i = (i + 1) % partitionCacheNumShards { + select { + case <-t.C: + c.shards[i].rotate() + c.emitMetrics() + case <-ctx.Done(): + return ctx.Err() + } + } + }) +} + +func (c *partitionCache) Stop() { + c.rotate.Cancel() + <-c.rotate.Done() +} + +func (c *partitionCache) emitMetrics() { + totalSize := 0 + for i := range c.shards { + totalSize += c.shards[i].size() + } + metrics.PartitionCacheSize.With(c.metricsHandler).Record(float64(totalSize)) +} + +func (*partitionCache) makeKey( + nsid, tqname string, tqtype enumspb.TaskQueueType, +) string { + // note we don't need delimiters to make unambiguous keys: nsid is always the same length, + // the last byte is tqtype, and everything in between is the name. + nsidBytes, err := uuid.Parse(nsid) + if err != nil { + // this shouldn't fail, but use the string form as a backup, append a 0xff to differentiate + return nsid + string([]byte{0xff}) + tqname + string([]byte{byte(tqtype), 0xff}) + } + return string(nsidBytes[:]) + tqname + string([]byte{byte(tqtype)}) +} + +func (*partitionCache) shardFromKey(key string) int { + // mix a few bits to pick a shard + l := len(key) + shard := int(key[min(14, l-3)] ^ key[l-2] ^ key[l-1]) + return shard % partitionCacheNumShards +} + +func (c *partitionCache) lookup(key string) PartitionCounts { + return c.shards[c.shardFromKey(key)].lookup(key) +} + +func (c *partitionCache) put(key string, pc PartitionCounts) { + c.shards[c.shardFromKey(key)].put(key, pc) +} + +func (s *partitionCacheShard) lookup(key string) PartitionCounts { + s.lock.RLock() + if pc, ok := s.active[key]; ok { + s.lock.RUnlock() + return pc + } else if pc, ok := s.prev[key]; ok { + s.lock.RUnlock() + s.put(key, pc) // promote to active + return pc + } + s.lock.RUnlock() + return PartitionCounts{} +} + +func (s *partitionCacheShard) put(key string, pc PartitionCounts) { + s.lock.Lock() + if pc.Valid() { + s.active[key] = pc + } else { + // invalid PartitionCounts means disable this mechanism, so remove this key entirely + delete(s.active, key) + } + delete(s.prev, key) + s.lock.Unlock() +} + +func (s *partitionCacheShard) rotate() { + s.lock.Lock() + defer s.lock.Unlock() + s.prev = s.active + s.active = make(map[string]PartitionCounts) +} + +func (s *partitionCacheShard) size() int { + s.lock.RLock() + defer s.lock.RUnlock() + return len(s.prev) + len(s.active) +} diff --git a/client/matching/partition_cache_test.go b/client/matching/partition_cache_test.go new file mode 100644 index 00000000000..c0789d980ce --- /dev/null +++ b/client/matching/partition_cache_test.go @@ -0,0 +1,97 @@ +package matching + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/common/metrics" +) + +// Using a fixed UUID so tests are deterministic. Must be valid UUID format for makeKey. +const testNsID = "f47ac10b-58cc-4372-a567-0e02b2c3d479" + +func TestPartitionCache_BasicPutLookup(t *testing.T) { + t.Parallel() + c := newPartitionCache(metrics.NoopMetricsHandler) + c.Start() + defer c.Stop() + + key := c.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + keyb := c.makeKey(testNsID, "my-tq-b", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + + pc4 := PartitionCounts{Read: 4, Write: 4} + pc8 := PartitionCounts{Read: 8, Write: 8} + + c.put(key, pc4) + c.put(keyb, pc8) + require.Equal(t, pc4, c.lookup(key)) + require.Equal(t, pc8, c.lookup(keyb)) + + c.put(key, pc8) + require.Equal(t, pc8, c.lookup(key)) + + // missing key + keyc := c.makeKey(testNsID, "nonexistent", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + require.Equal(t, PartitionCounts{}, c.lookup(keyc)) + + // invalid PartitionCounts removes + c.put(key, PartitionCounts{Read: -3, Write: -5}) + require.Equal(t, PartitionCounts{}, c.lookup(key)) +} + +func TestPartitionCache_Rotate(t *testing.T) { + t.Parallel() + c := newPartitionCache(metrics.NoopMetricsHandler) + c.Start() + defer c.Stop() + + key := c.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + pc := PartitionCounts{Read: 8, Write: 8} + c.put(key, pc) + + // Rotate the shard — entry moves to prev + c.shards[c.shardFromKey(key)].rotate() + + // Lookup should still find it (promotes from prev to active) + require.Equal(t, pc, c.lookup(key)) + + // After promotion, rotate again — it should still be in active + c.shards[c.shardFromKey(key)].rotate() + require.Equal(t, pc, c.lookup(key)) + + // Rotate twice without any lookup — entry should be gone + c.shards[c.shardFromKey(key)].rotate() + c.shards[c.shardFromKey(key)].rotate() + + require.Equal(t, PartitionCounts{}, c.lookup(key)) + + // Put another value after rotate + pc4 := PartitionCounts{Read: 4, Write: 4} + c.put(key, pc4) + + // Lookup should find it + require.Equal(t, pc4, c.lookup(key)) +} + +func TestPartitionCache_ConcurrentAccess(t *testing.T) { + t.Parallel() + c := newPartitionCache(metrics.NoopMetricsHandler) + c.Start() + defer c.Stop() + + var wg sync.WaitGroup + for g := range 20 { + wg.Go(func() { + for range 1000 { + key := c.makeKey(testNsID, "tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + pc := PartitionCounts{Read: int32(g + 1), Write: int32(g + 1)} + c.put(key, pc) + c.lookup(key) + } + }) + } + wg.Wait() + // no panic or data races detected +} diff --git a/client/matching/partition_counts.go b/client/matching/partition_counts.go new file mode 100644 index 00000000000..861beaf205a --- /dev/null +++ b/client/matching/partition_counts.go @@ -0,0 +1,135 @@ +package matching + +import ( + "context" + "errors" + "slices" + + taskqueuespb "go.temporal.io/server/api/taskqueue/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/log/tag" + serviceerrors "go.temporal.io/server/common/serviceerror" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" +) + +// The "-bin" suffix instructs grpc to base64-encode the value, so we can use binary. +const partitionCountsHeaderName = "pcnt-bin" +const partitionCountsTrailerName = "pcnt-bin" + +// PartitionCounts is a smaller version of taskqueuespb.ClientPartitionCounts that we can more +// easily pass around and put in a map. +type PartitionCounts struct { + Read, Write int32 +} + +func (pc PartitionCounts) Valid() bool { + return pc.Read > 0 && pc.Write > 0 +} + +func (pc PartitionCounts) encode() (string, error) { + b, err := proto.Marshal(&taskqueuespb.ClientPartitionCounts{ + Read: pc.Read, + Write: pc.Write, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +func (pc PartitionCounts) appendToOutgoingContext(ctx context.Context) context.Context { + v, err := pc.encode() + if err != nil { + return ctx + } + return metadata.AppendToOutgoingContext(ctx, partitionCountsHeaderName, v) +} + +func (pc PartitionCounts) SetTrailer(ctx context.Context) error { + v, err := pc.encode() + if err != nil { + return err + } + return grpc.SetTrailer(ctx, metadata.Pairs(partitionCountsTrailerName, v)) +} + +func parsePartitionCounts(hdr string) (PartitionCounts, error) { + var cpc taskqueuespb.ClientPartitionCounts + err := proto.Unmarshal([]byte(hdr), &cpc) + if err != nil { + return PartitionCounts{}, err + } + return PartitionCounts{ + Read: cpc.Read, + Write: cpc.Write, + }, nil +} + +func ParsePartitionCountsFromIncomingContext(ctx context.Context) (PartitionCounts, error) { + vals := metadata.ValueFromIncomingContext(ctx, partitionCountsHeaderName) + if len(vals) == 0 { + return PartitionCounts{}, nil + } + return parsePartitionCounts(vals[0]) +} + +func parsePartitionCountsFromTrailer(trailer metadata.MD) (PartitionCounts, error) { + vals := trailer.Get(partitionCountsTrailerName) + if len(vals) == 0 { + return PartitionCounts{}, nil + } + return parsePartitionCounts(vals[0]) +} + +// invokeWithPartitionCounts wraps a partition-aware matchingservice RPC call: +// - attaches the client's cached counts to the outgoing request (as header) +// - updates the cache from the server's response (trailer) +// - retries once if it receives StalePartitionCounts error +func invokeWithPartitionCounts[Req, Res any]( + ctx context.Context, + logger log.Logger, + cache *partitionCache, + pkey string, + request Req, + opts []grpc.CallOption, + op func( + ctx context.Context, + pc PartitionCounts, + request Req, + opts []grpc.CallOption, + ) (Res, error), +) (Res, error) { + // capture trailer + var trailer metadata.MD + opts = append(slices.Clone(opts), grpc.Trailer(&trailer)) + + // get current idea of partition counts. if missing from the cache, this will send zeros + // for counts, which the server will always accept as not-stale. + pc := cache.lookup(pkey) + + for attempt := 0; ; attempt++ { + res, err := op(pc.appendToOutgoingContext(ctx), pc, request, opts) + + // update cache on trailer on both success and error. if the trailer has no data, + // this removes the key from the cache. + newPc, parseErr := parsePartitionCountsFromTrailer(trailer) + trailer = nil + if parseErr != nil { + logger.Info("partition count trailer parse error", tag.Error(parseErr)) + // continue with zero value for newPc + } + if newPc != pc { + cache.put(pkey, newPc) + pc = newPc + } + + if _, ok := errors.AsType[*serviceerrors.StalePartitionCounts](err); ok && attempt == 0 { + // if we got a StalePartitionCounts on the first attempt, retry once + continue + } + + return res, err + } +} diff --git a/client/matching/partition_counts_test.go b/client/matching/partition_counts_test.go new file mode 100644 index 00000000000..03633ab21c2 --- /dev/null +++ b/client/matching/partition_counts_test.go @@ -0,0 +1,265 @@ +package matching + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + serviceerrors "go.temporal.io/server/common/serviceerror" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// setTrailerInOpts finds the grpc.TrailerCallOption in opts and populates it. +func setTrailerInOpts(opts []grpc.CallOption, pc PartitionCounts) { + v, _ := pc.encode() + md := metadata.Pairs(partitionCountsTrailerName, v) + for _, opt := range opts { + if t, ok := opt.(grpc.TrailerCallOption); ok { + *t.TrailerAddr = md + return + } + } +} + +type hpcReq struct{} +type hpcRes struct{ value string } + +func newTestCache(t *testing.T) *partitionCache { + cache := newPartitionCache(metrics.NoopMetricsHandler) + cache.Start() + t.Cleanup(cache.Stop) + return cache +} + +func TestInvokeWithPartitionCounts_CacheMissSuccess(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + serverPC := PartitionCounts{Read: 4, Write: 4} + + calls := 0 + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + calls++ + require.Equal(t, PartitionCounts{}, pc) // cache miss + setTrailerInOpts(opts, serverPC) + return &hpcRes{value: "ok"}, nil + } + + res, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.NoError(t, err) + require.Equal(t, "ok", res.value) + require.Equal(t, 1, calls) + + // cache should be updated + require.Equal(t, serverPC, cache.lookup(pkey)) +} + +func TestInvokeWithPartitionCounts_CacheHitSuccess(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + cachedPC := PartitionCounts{Read: 4, Write: 4} + cache.put(pkey, cachedPC) + + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + require.Equal(t, cachedPC, pc) + // server confirms same counts + setTrailerInOpts(opts, cachedPC) + return &hpcRes{value: "ok"}, nil + } + + res, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.NoError(t, err) + require.Equal(t, "ok", res.value) + require.Equal(t, cachedPC, cache.lookup(pkey)) +} + +func TestInvokeWithPartitionCounts_ServerUpdatesCount(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + cache.put(pkey, PartitionCounts{Read: 4, Write: 4}) + newPC := PartitionCounts{Read: 8, Write: 8} + + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + // server returns different counts + setTrailerInOpts(opts, newPC) + return &hpcRes{value: "ok"}, nil + } + + _, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.NoError(t, err) + + // cache should be updated + require.Equal(t, newPC, cache.lookup(pkey)) +} + +func TestInvokeWithPartitionCounts_StaleRetry_Succeeds(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + serverPC := PartitionCounts{Read: 8, Write: 8} + + calls := 0 + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + calls++ + setTrailerInOpts(opts, serverPC) + if calls == 1 { + require.Equal(t, PartitionCounts{}, pc) // cache miss + return nil, serviceerrors.NewStalePartitionCounts("stale") + } + require.Equal(t, serverPC, pc) // retry with updated counts + return &hpcRes{value: "ok"}, nil + } + + res, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.NoError(t, err) + require.Equal(t, "ok", res.value) + require.Equal(t, 2, calls) + require.Equal(t, serverPC, cache.lookup(pkey)) +} + +func TestInvokeWithPartitionCounts_StaleRetry_Fails(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + serverPC := PartitionCounts{Read: 8, Write: 8} + + calls := 0 + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + calls++ + setTrailerInOpts(opts, serverPC) + if calls == 1 { + return nil, serviceerrors.NewStalePartitionCounts("stale first") + } + // second attempt: different non-stale error + return nil, errors.New("error") + } + + _, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.Error(t, err) + require.Equal(t, 2, calls) +} + +func TestInvokeWithPartitionCounts_OtherErrorNoRetry(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + serverPC := PartitionCounts{Read: 4, Write: 4} + + calls := 0 + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + calls++ + // even on error, server sends trailer + setTrailerInOpts(opts, serverPC) + return nil, errors.New("error") + } + + _, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.Error(t, err) + require.Equal(t, 1, calls) // no retry + + // cache should still be updated from trailer + require.Equal(t, serverPC, cache.lookup(pkey)) +} + +func TestInvokeWithPartitionCounts_ZeroTrailerRemovesCache(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + cache.put(pkey, PartitionCounts{Read: 4, Write: 4}) + + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + // server signals "dynamic partitioning off" + setTrailerInOpts(opts, PartitionCounts{Read: 0, Write: 0}) + return &hpcRes{value: "ok"}, nil + } + + _, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.NoError(t, err) + + // cache entry should be removed + require.Equal(t, PartitionCounts{}, cache.lookup(pkey)) +} + +func TestInvokeWithPartitionCounts_NoTrailerRemovesCache(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + originalPC := PartitionCounts{Read: 4, Write: 4} + cache.put(pkey, originalPC) + + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + // no trailer set + return &hpcRes{value: "ok"}, nil + } + + _, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.NoError(t, err) + + // cache entry should be removed + require.Equal(t, PartitionCounts{}, cache.lookup(pkey)) +} + +func TestInvokeWithPartitionCounts_ParseErrorRemovesCache(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + originalPC := PartitionCounts{Read: 4, Write: 4} + cache.put(pkey, originalPC) + + op := func(ctx context.Context, pc PartitionCounts, req *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + for _, opt := range opts { + if t, ok := opt.(grpc.TrailerCallOption); ok { + *t.TrailerAddr = metadata.Pairs(partitionCountsTrailerName, "this is an invalid proto message") + } + } + return &hpcRes{value: "ok"}, nil + } + + _, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.NoError(t, err) + + // cache entry should be removed + require.Equal(t, PartitionCounts{}, cache.lookup(pkey)) +} + +func TestInvokeWithPartitionCounts_OutgoingContextHasHeader(t *testing.T) { + t.Parallel() + cache := newTestCache(t) + + pkey := cache.makeKey(testNsID, "my-tq", enumspb.TASK_QUEUE_TYPE_WORKFLOW) + cachedPC := PartitionCounts{Read: 6, Write: 4} + cache.put(pkey, cachedPC) + + op := func(ctx context.Context, _ PartitionCounts, _ *hpcReq, opts []grpc.CallOption) (*hpcRes, error) { + // verify the outgoing context has the partition counts header + md, ok := metadata.FromOutgoingContext(ctx) + require.True(t, ok) + vals := md.Get(partitionCountsHeaderName) + require.Len(t, vals, 1) + parsed, err := parsePartitionCounts(vals[0]) + require.NoError(t, err) + require.Equal(t, cachedPC, parsed) + + setTrailerInOpts(opts, cachedPC) + return &hpcRes{value: "ok"}, nil + } + + _, err := invokeWithPartitionCounts(context.Background(), log.NewNoopLogger(), cache, pkey, &hpcReq{}, nil, op) + require.NoError(t, err) +} diff --git a/client/matching/retryable_client.go b/client/matching/retryable_client.go index 9b1de22c500..760342cb96a 100644 --- a/client/matching/retryable_client.go +++ b/client/matching/retryable_client.go @@ -1,32 +1,11 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package matching import ( + "errors" + "go.temporal.io/server/api/matchingservice/v1" "go.temporal.io/server/common/backoff" + "go.temporal.io/server/common/tqid" ) var _ matchingservice.MatchingServiceClient = (*retryableClient)(nil) @@ -34,14 +13,32 @@ var _ matchingservice.MatchingServiceClient = (*retryableClient)(nil) type retryableClient struct { client matchingservice.MatchingServiceClient policy backoff.RetryPolicy + pollPolicy backoff.RetryPolicy isRetryable backoff.IsRetryable } // NewRetryableClient creates a new instance of matchingservice.MatchingServiceClient with retry policy -func NewRetryableClient(client matchingservice.MatchingServiceClient, policy backoff.RetryPolicy, isRetryable backoff.IsRetryable) matchingservice.MatchingServiceClient { +func NewRetryableClient( + client matchingservice.MatchingServiceClient, + policy, + pollPolicy backoff.RetryPolicy, + isRetryable backoff.IsRetryable, +) matchingservice.MatchingServiceClient { return &retryableClient{ client: client, policy: policy, + pollPolicy: pollPolicy, isRetryable: isRetryable, } } + +func (c *retryableClient) Route(p tqid.Partition) (string, error) { + // Ideally we wouldn't do a type-check here and require c.client to have + // Route, but it would require changing too many types all over the place. + // This isn't called in a hot path. + rc, ok := c.client.(RoutingClient) + if !ok { + return "", errors.New("not routing client") + } + return rc.Route(p) +} diff --git a/client/matching/retryable_client_gen.go b/client/matching/retryable_client_gen.go index aa636756510..29d183d11ae 100644 --- a/client/matching/retryable_client_gen.go +++ b/client/matching/retryable_client_gen.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT. +// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. package matching @@ -95,30 +71,75 @@ func (c *retryableClient) CancelOutstandingPoll( return resp, err } -func (c *retryableClient) CreateNexusIncomingService( +func (c *retryableClient) CancelOutstandingWorkerPolls( ctx context.Context, - request *matchingservice.CreateNexusIncomingServiceRequest, + request *matchingservice.CancelOutstandingWorkerPollsRequest, opts ...grpc.CallOption, -) (*matchingservice.CreateNexusIncomingServiceResponse, error) { - var resp *matchingservice.CreateNexusIncomingServiceResponse +) (*matchingservice.CancelOutstandingWorkerPollsResponse, error) { + var resp *matchingservice.CancelOutstandingWorkerPollsResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.CreateNexusIncomingService(ctx, request, opts...) + resp, err = c.client.CancelOutstandingWorkerPolls(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } -func (c *retryableClient) DeleteNexusIncomingService( +func (c *retryableClient) CheckTaskQueueUserDataPropagation( ctx context.Context, - request *matchingservice.DeleteNexusIncomingServiceRequest, + request *matchingservice.CheckTaskQueueUserDataPropagationRequest, opts ...grpc.CallOption, -) (*matchingservice.DeleteNexusIncomingServiceResponse, error) { - var resp *matchingservice.DeleteNexusIncomingServiceResponse +) (*matchingservice.CheckTaskQueueUserDataPropagationResponse, error) { + var resp *matchingservice.CheckTaskQueueUserDataPropagationResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.DeleteNexusIncomingService(ctx, request, opts...) + resp, err = c.client.CheckTaskQueueUserDataPropagation(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) CheckTaskQueueVersionMembership( + ctx context.Context, + request *matchingservice.CheckTaskQueueVersionMembershipRequest, + opts ...grpc.CallOption, +) (*matchingservice.CheckTaskQueueVersionMembershipResponse, error) { + var resp *matchingservice.CheckTaskQueueVersionMembershipResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CheckTaskQueueVersionMembership(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) CreateNexusEndpoint( + ctx context.Context, + request *matchingservice.CreateNexusEndpointRequest, + opts ...grpc.CallOption, +) (*matchingservice.CreateNexusEndpointResponse, error) { + var resp *matchingservice.CreateNexusEndpointResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.CreateNexusEndpoint(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DeleteNexusEndpoint( + ctx context.Context, + request *matchingservice.DeleteNexusEndpointRequest, + opts ...grpc.CallOption, +) (*matchingservice.DeleteNexusEndpointResponse, error) { + var resp *matchingservice.DeleteNexusEndpointResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DeleteNexusEndpoint(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) @@ -140,6 +161,51 @@ func (c *retryableClient) DescribeTaskQueue( return resp, err } +func (c *retryableClient) DescribeTaskQueuePartition( + ctx context.Context, + request *matchingservice.DescribeTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*matchingservice.DescribeTaskQueuePartitionResponse, error) { + var resp *matchingservice.DescribeTaskQueuePartitionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeTaskQueuePartition(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DescribeVersionedTaskQueues( + ctx context.Context, + request *matchingservice.DescribeVersionedTaskQueuesRequest, + opts ...grpc.CallOption, +) (*matchingservice.DescribeVersionedTaskQueuesResponse, error) { + var resp *matchingservice.DescribeVersionedTaskQueuesResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeVersionedTaskQueues(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) DescribeWorker( + ctx context.Context, + request *matchingservice.DescribeWorkerRequest, + opts ...grpc.CallOption, +) (*matchingservice.DescribeWorkerResponse, error) { + var resp *matchingservice.DescribeWorkerResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.DescribeWorker(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) DispatchNexusTask( ctx context.Context, request *matchingservice.DispatchNexusTaskRequest, @@ -155,6 +221,21 @@ func (c *retryableClient) DispatchNexusTask( return resp, err } +func (c *retryableClient) ForceLoadTaskQueuePartition( + ctx context.Context, + request *matchingservice.ForceLoadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*matchingservice.ForceLoadTaskQueuePartitionResponse, error) { + var resp *matchingservice.ForceLoadTaskQueuePartitionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ForceLoadTaskQueuePartition(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ForceUnloadTaskQueue( ctx context.Context, request *matchingservice.ForceUnloadTaskQueueRequest, @@ -170,6 +251,21 @@ func (c *retryableClient) ForceUnloadTaskQueue( return resp, err } +func (c *retryableClient) ForceUnloadTaskQueuePartition( + ctx context.Context, + request *matchingservice.ForceUnloadTaskQueuePartitionRequest, + opts ...grpc.CallOption, +) (*matchingservice.ForceUnloadTaskQueuePartitionResponse, error) { + var resp *matchingservice.ForceUnloadTaskQueuePartitionResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ForceUnloadTaskQueuePartition(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) GetBuildIdTaskQueueMapping( ctx context.Context, request *matchingservice.GetBuildIdTaskQueueMappingRequest, @@ -215,15 +311,30 @@ func (c *retryableClient) GetWorkerBuildIdCompatibility( return resp, err } -func (c *retryableClient) ListNexusIncomingServices( +func (c *retryableClient) GetWorkerVersioningRules( ctx context.Context, - request *matchingservice.ListNexusIncomingServicesRequest, + request *matchingservice.GetWorkerVersioningRulesRequest, opts ...grpc.CallOption, -) (*matchingservice.ListNexusIncomingServicesResponse, error) { - var resp *matchingservice.ListNexusIncomingServicesResponse +) (*matchingservice.GetWorkerVersioningRulesResponse, error) { + var resp *matchingservice.GetWorkerVersioningRulesResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.ListNexusIncomingServices(ctx, request, opts...) + resp, err = c.client.GetWorkerVersioningRules(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) ListNexusEndpoints( + ctx context.Context, + request *matchingservice.ListNexusEndpointsRequest, + opts ...grpc.CallOption, +) (*matchingservice.ListNexusEndpointsResponse, error) { + var resp *matchingservice.ListNexusEndpointsResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListNexusEndpoints(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) @@ -245,6 +356,21 @@ func (c *retryableClient) ListTaskQueuePartitions( return resp, err } +func (c *retryableClient) ListWorkers( + ctx context.Context, + request *matchingservice.ListWorkersRequest, + opts ...grpc.CallOption, +) (*matchingservice.ListWorkersResponse, error) { + var resp *matchingservice.ListWorkersResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.ListWorkers(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) PollActivityTaskQueue( ctx context.Context, request *matchingservice.PollActivityTaskQueueRequest, @@ -256,7 +382,7 @@ func (c *retryableClient) PollActivityTaskQueue( resp, err = c.client.PollActivityTaskQueue(ctx, request, opts...) return err } - err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + err := backoff.ThrottleRetryContext(ctx, op, c.pollPolicy, c.isRetryable) return resp, err } @@ -271,7 +397,7 @@ func (c *retryableClient) PollNexusTaskQueue( resp, err = c.client.PollNexusTaskQueue(ctx, request, opts...) return err } - err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + err := backoff.ThrottleRetryContext(ctx, op, c.pollPolicy, c.isRetryable) return resp, err } @@ -286,7 +412,7 @@ func (c *retryableClient) PollWorkflowTaskQueue( resp, err = c.client.PollWorkflowTaskQueue(ctx, request, opts...) return err } - err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + err := backoff.ThrottleRetryContext(ctx, op, c.pollPolicy, c.isRetryable) return resp, err } @@ -305,6 +431,21 @@ func (c *retryableClient) QueryWorkflow( return resp, err } +func (c *retryableClient) RecordWorkerHeartbeat( + ctx context.Context, + request *matchingservice.RecordWorkerHeartbeatRequest, + opts ...grpc.CallOption, +) (*matchingservice.RecordWorkerHeartbeatResponse, error) { + var resp *matchingservice.RecordWorkerHeartbeatResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.RecordWorkerHeartbeat(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) ReplicateTaskQueueUserData( ctx context.Context, request *matchingservice.ReplicateTaskQueueUserDataRequest, @@ -365,15 +506,60 @@ func (c *retryableClient) RespondQueryTaskCompleted( return resp, err } -func (c *retryableClient) UpdateNexusIncomingService( +func (c *retryableClient) SyncDeploymentUserData( + ctx context.Context, + request *matchingservice.SyncDeploymentUserDataRequest, + opts ...grpc.CallOption, +) (*matchingservice.SyncDeploymentUserDataResponse, error) { + var resp *matchingservice.SyncDeploymentUserDataResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.SyncDeploymentUserData(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateFairnessState( + ctx context.Context, + request *matchingservice.UpdateFairnessStateRequest, + opts ...grpc.CallOption, +) (*matchingservice.UpdateFairnessStateResponse, error) { + var resp *matchingservice.UpdateFairnessStateResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateFairnessState(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateNexusEndpoint( + ctx context.Context, + request *matchingservice.UpdateNexusEndpointRequest, + opts ...grpc.CallOption, +) (*matchingservice.UpdateNexusEndpointResponse, error) { + var resp *matchingservice.UpdateNexusEndpointResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateNexusEndpoint(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) UpdateTaskQueueConfig( ctx context.Context, - request *matchingservice.UpdateNexusIncomingServiceRequest, + request *matchingservice.UpdateTaskQueueConfigRequest, opts ...grpc.CallOption, -) (*matchingservice.UpdateNexusIncomingServiceResponse, error) { - var resp *matchingservice.UpdateNexusIncomingServiceResponse +) (*matchingservice.UpdateTaskQueueConfigResponse, error) { + var resp *matchingservice.UpdateTaskQueueConfigResponse op := func(ctx context.Context) error { var err error - resp, err = c.client.UpdateNexusIncomingService(ctx, request, opts...) + resp, err = c.client.UpdateTaskQueueConfig(ctx, request, opts...) return err } err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) @@ -409,3 +595,18 @@ func (c *retryableClient) UpdateWorkerBuildIdCompatibility( err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) return resp, err } + +func (c *retryableClient) UpdateWorkerVersioningRules( + ctx context.Context, + request *matchingservice.UpdateWorkerVersioningRulesRequest, + opts ...grpc.CallOption, +) (*matchingservice.UpdateWorkerVersioningRulesResponse, error) { + var resp *matchingservice.UpdateWorkerVersioningRulesResponse + op := func(ctx context.Context) error { + var err error + resp, err = c.client.UpdateWorkerVersioningRules(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) + return resp, err +} diff --git a/cmd/server/main.go b/cmd/server/main.go index fee18397895..a1f8e8de277 100644 --- a/cmd/server/main.go +++ b/cmd/server/main.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package main import ( @@ -30,11 +6,10 @@ import ( "os" "path" "strings" + "text/template" _ "time/tzdata" // embed tzdata as a fallback "github.com/urfave/cli/v2" - "go.uber.org/automaxprocs/maxprocs" - "go.temporal.io/server/common/authorization" "go.temporal.io/server/common/build" "go.temporal.io/server/common/config" @@ -67,29 +42,34 @@ func buildCLI() *cli.App { Name: "root", Aliases: []string{"r"}, Value: ".", - Usage: "root directory of execution environment", + Usage: "root directory of execution environment (deprecated)", EnvVars: []string{config.EnvKeyRoot}, }, &cli.StringFlag{ Name: "config", Aliases: []string{"c"}, Value: "config", - Usage: "config dir path relative to root", + Usage: "config dir path relative to root (deprecated)", EnvVars: []string{config.EnvKeyConfigDir}, }, &cli.StringFlag{ Name: "env", Aliases: []string{"e"}, Value: "development", - Usage: "runtime environment", + Usage: "runtime environment (deprecated)", EnvVars: []string{config.EnvKeyEnvironment}, }, &cli.StringFlag{ Name: "zone", Aliases: []string{"az"}, - Usage: "availability zone", + Usage: "availability zone (deprecated)", EnvVars: []string{config.EnvKeyAvailabilityZone, config.EnvKeyAvailabilityZoneTypo}, }, + &cli.StringFlag{ + Name: "config-file", + Usage: "path to config file (absolute or relative to current working directory)", + EnvVars: []string{config.EnvKeyConfigFile}, + }, &cli.BoolFlag{ Name: "allow-no-auth", Usage: "allow no authorizer", @@ -98,6 +78,50 @@ func buildCLI() *cli.App { } app.Commands = []*cli.Command{ + { + Name: "validate-dynamic-config", + Usage: "Validate a dynamic config file[s] with known keys and types", + ArgsUsage: " ...", + Action: func(c *cli.Context) error { + total := 0 + for _, fileName := range c.Args().Slice() { + contents, err := os.ReadFile(fileName) + if err != nil { + return err + } + result := dynamicconfig.LoadYamlFile(contents) + total += len(result.Errors) + fmt.Println(fileName) + t := template.Must(template.New("").Parse( + "{{range .Errors}} error: {{.}}\n" + + "{{end}}{{range .Warnings}} warning: {{.}}\n" + + "{{end}}", + )) + _ = t.Execute(os.Stdout, result) + } + if total > 0 { + return fmt.Errorf("%d total errors", total) + } + return nil + }, + }, + { + Name: "render-config", + Usage: "Render server config template", + ArgsUsage: " ", + Action: func(c *cli.Context) error { + cfg, err := config.Load( + config.WithEnv(c.String("env")), + config.WithConfigDir(c.String("config")), + config.WithZone(c.String("zone")), + ) + if err != nil { + return cli.Exit(fmt.Errorf("Unable to load configuration: %w", err), 1) + } + fmt.Println(cfg.String()) + return nil + }, + }, { Name: "start", Usage: "Start Temporal server", @@ -114,6 +138,7 @@ func buildCLI() *cli.App { Aliases: []string{"svc"}, Value: cli.NewStringSlice(temporal.DefaultServices...), Usage: "service(s) to start", + EnvVars: []string{"TEMPORAL_SERVICES"}, }, }, Before: func(c *cli.Context) error { @@ -121,15 +146,12 @@ func buildCLI() *cli.App { return cli.Exit("ERROR: start command doesn't support arguments. Use --service flag instead.", 1) } - if _, err := maxprocs.Set(); err != nil { - stdlog.Println(fmt.Sprintf("WARNING: failed to set GOMAXPROCS: %v.", err)) + if c.IsSet("config-file") && (c.IsSet("config") || c.IsSet("env") || c.IsSet("zone") || c.IsSet("root")) { + return cli.Exit("ERROR: can not use --config, --env, --zone, or --root with --config-file", 1) } return nil }, Action: func(c *cli.Context) error { - env := c.String("env") - zone := c.String("zone") - configDir := path.Join(c.String("root"), c.String("config")) services := c.StringSlice("service") allowNoAuth := c.Bool("allow-no-auth") @@ -139,35 +161,39 @@ func buildCLI() *cli.App { services = strings.Split(c.String("services"), ",") } - cfg, err := config.LoadConfig(env, configDir, zone) + var cfg *config.Config + var err error + + switch { + case c.IsSet("config-file"): + cfg, err = config.Load(config.WithConfigFile(c.String("config-file"))) + case c.IsSet("config") || c.IsSet("env") || c.IsSet("zone"): + cfg, err = config.Load( + config.WithEnv(c.String("env")), + config.WithConfigDir(path.Join(c.String("root"), c.String("config"))), + config.WithZone(c.String("zone")), + ) + default: + cfg, err = config.Load(config.WithEmbedded()) + } + if err != nil { return cli.Exit(fmt.Sprintf("Unable to load configuration: %v.", err), 1) } logger := log.NewZapLogger(log.BuildZapLogger(cfg.Log)) logger.Info("Build info.", - tag.NewTimeTag("git-time", build.InfoData.GitTime), - tag.NewStringTag("git-revision", build.InfoData.GitRevision), - tag.NewBoolTag("git-modified", build.InfoData.GitModified), - tag.NewStringTag("go-arch", build.InfoData.GoArch), - tag.NewStringTag("go-os", build.InfoData.GoOs), - tag.NewStringTag("go-version", build.InfoData.GoVersion), - tag.NewBoolTag("cgo-enabled", build.InfoData.CgoEnabled), - tag.NewStringTag("server-version", headers.ServerVersion), - tag.NewBoolTag("debug-mode", debug.Enabled), + tag.Time("git-time", build.InfoData.GitTime), + tag.String("git-revision", build.InfoData.GitRevision), + tag.Bool("git-modified", build.InfoData.GitModified), + tag.String("go-arch", build.InfoData.GoArch), + tag.String("go-os", build.InfoData.GoOs), + tag.String("go-version", build.InfoData.GoVersion), + tag.Bool("cgo-enabled", build.InfoData.CgoEnabled), + tag.String("server-version", headers.ServerVersion), + tag.Bool("debug-mode", debug.Enabled), ) - var dynamicConfigClient dynamicconfig.Client - if cfg.DynamicConfigClient != nil { - dynamicConfigClient, err = dynamicconfig.NewFileBasedClient(cfg.DynamicConfigClient, logger, temporal.InterruptCh()) - if err != nil { - return cli.Exit(fmt.Sprintf("Unable to create dynamic config client. Error: %v", err), 1) - } - } else { - dynamicConfigClient = dynamicconfig.NewNoopClient() - logger.Info("Dynamic config client is not configured. Using noop client.") - } - authorizer, err := authorization.GetAuthorizerFromConfig( &cfg.Global.Authorization, ) @@ -182,20 +208,29 @@ func buildCLI() *cli.App { ) } + // Authorization mappers: claim and audience claimMapper, err := authorization.GetClaimMapperFromConfig(&cfg.Global.Authorization, logger) if err != nil { return cli.Exit(fmt.Sprintf("Unable to instantiate claim mapper: %v.", err), 1) } + + audienceMapper, err := authorization.GetAudienceMapperFromConfig(&cfg.Global.Authorization) + if err != nil { + return cli.Exit(fmt.Sprintf("Unable to instantiate audience mapper: %v.", err), 1) + } + s, err := temporal.NewServer( temporal.ForServices(services), temporal.WithConfig(cfg), - temporal.WithDynamicConfigClient(dynamicConfigClient), temporal.WithLogger(logger), temporal.InterruptOn(temporal.InterruptCh()), temporal.WithAuthorizer(authorizer), temporal.WithClaimMapper(func(cfg *config.Config) authorization.ClaimMapper { return claimMapper }), + temporal.WithAudienceGetter(func(cfg *config.Config) authorization.JWTAudienceMapper { + return audienceMapper + }), ) if err != nil { return cli.Exit(fmt.Sprintf("Unable to create server. Error: %v.", err), 1) diff --git a/cmd/tools/cassandra/main.go b/cmd/tools/cassandra/main.go index 8870ee82668..26bd4e40d76 100644 --- a/cmd/tools/cassandra/main.go +++ b/cmd/tools/cassandra/main.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package main import ( diff --git a/cmd/tools/check-dependencies/main.go b/cmd/tools/check-dependencies/main.go new file mode 100644 index 00000000000..b9bf5488f15 --- /dev/null +++ b/cmd/tools/check-dependencies/main.go @@ -0,0 +1,225 @@ +// check-dependencies validates that key Go module dependencies (go.temporal.io/api +// and go.temporal.io/sdk) meet version policies for the PR's base branch: +// +// - release/* and cloud/* branches: dependencies must be tagged semver releases. +// - main: tagged releases are accepted; pseudo-versions must reference a commit +// on the dependency's default branch. +// - Other branches: no policy enforced. +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "os" + "os/exec" + "strings" + "time" + + "golang.org/x/mod/modfile" + "golang.org/x/mod/module" + "golang.org/x/mod/semver" +) + +const defaultGoModPath = "go.mod" + +type moduleSpec struct { + modulePath string + repoURL string + defaultBranch string +} + +var knownModules = []moduleSpec{ + { + modulePath: "go.temporal.io/api", + repoURL: "https://github.com/temporalio/api-go.git", + defaultBranch: "master", + }, + { + modulePath: "go.temporal.io/sdk", + repoURL: "https://github.com/temporalio/sdk-go.git", + defaultBranch: "master", + }, +} + +func main() { + baseBranch := flag.String("base-branch", "", "PR base branch (e.g. main, release/v1.31)") + goModPath := flag.String("go-mod", defaultGoModPath, "Path to go.mod") + flag.Parse() + + branch := strings.TrimSpace(*baseBranch) + if branch == "" { + fmt.Fprintln(os.Stderr, "Error: base branch is required; pass --base-branch") + os.Exit(1) + } + + modPath := strings.TrimSpace(*goModPath) + goModData, err := os.ReadFile(modPath) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: failed to read %s: %v\n", modPath, err) + os.Exit(1) + } + + modFile, err := modfile.Parse(modPath, goModData, nil) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: failed to parse %s: %v\n", modPath, err) + os.Exit(1) + } + + var validateErr error + switch { + case strings.HasPrefix(branch, "release/") || strings.HasPrefix(branch, "cloud/"): + validateErr = validateReleaseBranch(modFile) + case branch == "main": + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + validateErr = validateMainBranch(ctx, modFile) + default: + fmt.Printf("No dependency policy for base branch %q; skipping validation\n", branch) + } + + if validateErr != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", validateErr) + os.Exit(1) + } +} + +func validateReleaseBranch(modFile *modfile.File) error { + var failures []string + for _, mod := range knownModules { + modVersion, ok := findRequiredModuleVersion(modFile, mod.modulePath) + if !ok { + failures = append(failures, fmt.Sprintf("%s: dependency not found in go.mod", mod.modulePath)) + continue + } + + if !semver.IsValid(modVersion.Version) || module.IsPseudoVersion(modVersion.Version) { + failures = append(failures, fmt.Sprintf("%s: version %q must be a tagged semver release", mod.modulePath, modVersion.Version)) + continue + } + + fmt.Printf(" - %s@%s (ok)\n", mod.modulePath, modVersion.Version) + } + + if len(failures) > 0 { + return fmt.Errorf("release dependency validation failed:\n - %s", strings.Join(failures, "\n - ")) + } + + fmt.Println("All required dependencies use tagged releases") + return nil +} + +func validateMainBranch( + ctx context.Context, + modFile *modfile.File, +) error { + var failures []string + for _, mod := range knownModules { + if err := validateMainModule(ctx, modFile, mod); err != nil { + failures = append(failures, err.Error()) + } + } + + if len(failures) > 0 { + return fmt.Errorf("main branch dependency validation failed:\n - %s", strings.Join(failures, "\n - ")) + } + + fmt.Println("All required dependencies are valid for main branch") + return nil +} + +func validateMainModule( + ctx context.Context, + modFile *modfile.File, + mod moduleSpec, +) error { + modVersion, ok := findRequiredModuleVersion(modFile, mod.modulePath) + if !ok { + return fmt.Errorf("%s: dependency not found in go.mod", mod.modulePath) + } + version := modVersion.Version + + fmt.Printf("Found %s version: %s\n", mod.modulePath, version) + + if !module.IsPseudoVersion(version) { + if !semver.IsValid(version) { + return fmt.Errorf("%s@%s: not a valid semver tag", mod.modulePath, version) + } + fmt.Printf(" - %s@%s is a tagged release (ok)\n", mod.modulePath, version) + return nil + } + + shortHash, err := module.PseudoVersionRev(version) + if err != nil { + return fmt.Errorf("%s@%s: failed to parse pseudo-version revision: %v", mod.modulePath, version, err) + } + + onDefault, err := resolveModuleOriginForSpec(ctx, mod, shortHash) + if err != nil { + return fmt.Errorf("%s@%s: failed to resolve module origin: %v", mod.modulePath, version, err) + } + + if !onDefault { + return fmt.Errorf("%s@%s: commit %s is not on the default branch (%s) of %s", + mod.modulePath, version, shortHash, mod.defaultBranch, mod.repoURL) + } + + fmt.Printf(" - %s@%s is on %s (ok)\n", mod.modulePath, version, mod.defaultBranch) + return nil +} + +func findRequiredModuleVersion(modFile *modfile.File, modulePath string) (module.Version, bool) { + for _, req := range modFile.Require { + if req.Mod.Path == modulePath { + return req.Mod, true + } + } + return module.Version{}, false +} + +// resolveModuleOriginForSpec reports whether shortHash is reachable from the +// default branch of mod's repository. +// +// It runs two git commands: +// +// 1. git clone --bare --filter=blob:none --single-branch --branch +// --bare: clone without a working tree; only the git object store and refs +// are written to tmpDir. +// --filter=blob:none: partial clone — fetch commits and trees but skip file +// blobs entirely, since we only need commit graph reachability. +// --single-branch: fetch only the ref for --branch, not all remote branches. +// --branch : which branch to fetch. +// +// 2. git -C merge-base --is-ancestor refs/heads/ +// -C : run in the cloned bare repo. +// merge-base --is-ancestor: tests reachability rather than finding a common +// ancestor — exits 0 if is an ancestor of (or equal to) the +// branch tip, exits 1 if it is not. +// : the abbreviated commit hash extracted from the pseudo-version. +// refs/heads/: the branch tip to check ancestry against. +// Any other exit code indicates an error (e.g. the object does not exist). +func resolveModuleOriginForSpec(ctx context.Context, mod moduleSpec, shortHash string) (bool, error) { + tmpRepo, err := os.MkdirTemp("", "check-dependencies-*") + if err != nil { + return false, fmt.Errorf("failed to create temp repo dir: %w", err) + } + defer func() { _ = os.RemoveAll(tmpRepo) }() + + cmd := exec.CommandContext(ctx, "git", "clone", "--bare", "--filter=blob:none", "--single-branch", "--branch", mod.defaultBranch, mod.repoURL, tmpRepo) + out, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("git clone failed: %w: %s", err, strings.TrimSpace(string(out))) + } + + out, err = exec.CommandContext(ctx, "git", "-C", tmpRepo, "merge-base", "--is-ancestor", shortHash, "refs/heads/"+mod.defaultBranch).CombinedOutput() + if err == nil { + return true, nil + } + var exitErr *exec.ExitError + if errors.As(err, &exitErr) && exitErr.ExitCode() == 1 { + return false, nil + } + fmt.Printf("git merge-base --is-ancestor output: %s\n", strings.TrimSpace(string(out))) + return false, fmt.Errorf("git merge-base --is-ancestor failed: %w", err) +} diff --git a/cmd/tools/check-dependencies/main_test.go b/cmd/tools/check-dependencies/main_test.go new file mode 100644 index 00000000000..13353936cda --- /dev/null +++ b/cmd/tools/check-dependencies/main_test.go @@ -0,0 +1,252 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/mod/modfile" +) + +func parseGoMod(t *testing.T, content string) *modfile.File { + t.Helper() + f, err := modfile.Parse("go.mod", []byte(content), nil) + require.NoError(t, err) + return f +} + +func makeGoMod(deps map[string]string) string { + s := "module test\n\ngo 1.21\n\nrequire (\n" + for mod, ver := range deps { + s += fmt.Sprintf("\t%s %s\n", mod, ver) + } + return s + ")\n" +} + +func TestFindRequiredModuleVersion(t *testing.T) { + f := parseGoMod(t, makeGoMod(map[string]string{ + "go.temporal.io/api": "v1.2.3", + "go.temporal.io/sdk": "v1.4.0", + })) + + t.Run("found", func(t *testing.T) { + v, ok := findRequiredModuleVersion(f, "go.temporal.io/api") + require.True(t, ok) + require.Equal(t, "v1.2.3", v.Version) + }) + + t.Run("not found", func(t *testing.T) { + _, ok := findRequiredModuleVersion(f, "go.temporal.io/missing") + require.False(t, ok) + }) +} + +func TestValidateReleaseBranch(t *testing.T) { + tests := []struct { + name string + deps map[string]string + wantErr bool + errContains []string + errNotContains []string + }{ + { + name: "tagged semver passes", + deps: map[string]string{ + "go.temporal.io/api": "v1.40.0", + "go.temporal.io/sdk": "v1.31.0", + }, + }, + { + name: "pseudo-version fails", + deps: map[string]string{ + "go.temporal.io/api": "v1.40.1-0.20240101000000-abcdef012345", + "go.temporal.io/sdk": "v1.31.0", + }, + wantErr: true, + errContains: []string{"go.temporal.io/api", "tagged semver release"}, + }, + { + name: "both modules missing fails", + deps: nil, // empty go.mod + wantErr: true, + errContains: []string{"go.temporal.io/api", "go.temporal.io/sdk"}, + }, + { + name: "one pseudo one tagged fails with one error", + deps: map[string]string{ + "go.temporal.io/api": "v1.40.0", + "go.temporal.io/sdk": "v1.31.1-0.20240101000000-abcdef012345", + }, + wantErr: true, + errContains: []string{"go.temporal.io/sdk"}, + errNotContains: []string{"go.temporal.io/api"}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var content string + if tc.deps == nil { + content = "module test\n\ngo 1.21\n" + } else { + content = makeGoMod(tc.deps) + } + f := parseGoMod(t, content) + err := validateReleaseBranch(f) + if !tc.wantErr { + require.NoError(t, err) + return + } + require.Error(t, err) + for _, s := range tc.errContains { + require.Contains(t, err.Error(), s) + } + for _, s := range tc.errNotContains { + require.NotContains(t, err.Error(), s) + } + }) + } +} + +// localRepo is a bare git repo with commits for testing. +type localRepo struct { + // Path to the bare repo. + path string + // Hash of the commit on the default branch. + onBranchHash string + // Hash of a commit that exists in the repo but is NOT on the default branch. + offBranchHash string +} + +// initLocalRepo creates a bare git repo with one commit on the default branch +// and one commit on a side branch. Both commits exist as objects in the bare +// repo, but only onBranchHash is reachable from refs/heads/. +func initLocalRepo(t *testing.T, branch string) localRepo { + t.Helper() + + work := t.TempDir() + run := func(args ...string) string { + t.Helper() + cmd := exec.Command("git", args...) + cmd.Dir = work + out, err := cmd.CombinedOutput() + require.NoError(t, err, "git %v: %s", args, out) + return string(out) + } + + run("init", "-b", branch) + run("config", "user.email", "test@test.com") + run("config", "user.name", "Test") + + require.NoError(t, os.WriteFile(filepath.Join(work, "file.txt"), []byte("hello"), 0o600)) + run("add", ".") + run("commit", "-m", "initial") + onHash := run("rev-parse", "HEAD") + + // Create a side branch with its own commit. + run("checkout", "-b", "side") + require.NoError(t, os.WriteFile(filepath.Join(work, "side.txt"), []byte("side"), 0o600)) + run("add", ".") + run("commit", "-m", "side commit") + offHash := run("rev-parse", "HEAD") + run("checkout", branch) + + // Clone to a bare repo without --single-branch so that git fetches all + // branches, making the side-branch commit reachable as an object. This + // mirrors the scenario where a pseudo-version references a commit that + // exists in the repo but is not on the default branch. + bare := t.TempDir() + cmd := exec.Command("git", "clone", "--bare", work, bare) + out, err := cmd.CombinedOutput() + require.NoError(t, err, "git clone --bare: %s", out) + + return localRepo{ + path: bare, + onBranchHash: onHash[:len(onHash)-1], + offBranchHash: offHash[:len(offHash)-1], + } +} + +func TestResolveModuleOriginForSpec(t *testing.T) { + const branch = "master" + repo := initLocalRepo(t, branch) + + spec := moduleSpec{ + modulePath: "go.temporal.io/api", + repoURL: repo.path, + defaultBranch: branch, + } + + tests := []struct { + name string + hash string + onDefault bool + }{ + {"commit on default branch", repo.onBranchHash[:12], true}, + {"commit not on default branch", repo.offBranchHash[:12], false}, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + onDefault, err := resolveModuleOriginForSpec(context.Background(), spec, tc.hash) + require.NoError(t, err) + require.Equal(t, tc.onDefault, onDefault) + }) + } +} + +// setupLocalKnownModules replaces knownModules with specs pointing at the local +// bare repo, restoring the original on cleanup. +func setupLocalKnownModules(t *testing.T, repo localRepo, branch string) { + t.Helper() + orig := knownModules + t.Cleanup(func() { knownModules = orig }) + knownModules = []moduleSpec{ + {modulePath: "go.temporal.io/api", repoURL: repo.path, defaultBranch: branch}, + {modulePath: "go.temporal.io/sdk", repoURL: repo.path, defaultBranch: branch}, + } +} + +func TestValidateMainBranch(t *testing.T) { + t.Run("tagged release passes", func(t *testing.T) { + f := parseGoMod(t, makeGoMod(map[string]string{ + "go.temporal.io/api": "v1.40.0", + "go.temporal.io/sdk": "v1.31.0", + })) + require.NoError(t, validateMainBranch(context.Background(), f)) + }) + + t.Run("missing module fails", func(t *testing.T) { + f := parseGoMod(t, "module test\n\ngo 1.21\n") + err := validateMainBranch(context.Background(), f) + require.Error(t, err) + require.Contains(t, err.Error(), "go.temporal.io/api") + }) + + const branch = "master" + repo := initLocalRepo(t, branch) + setupLocalKnownModules(t, repo, branch) + + t.Run("pseudo-version on default branch passes", func(t *testing.T) { + ver := fmt.Sprintf("v0.0.0-20240101000000-%s", repo.onBranchHash[:12]) + f := parseGoMod(t, makeGoMod(map[string]string{ + "go.temporal.io/api": ver, + "go.temporal.io/sdk": ver, + })) + require.NoError(t, validateMainBranch(context.Background(), f)) + }) + + t.Run("pseudo-version not on default branch fails", func(t *testing.T) { + ver := fmt.Sprintf("v0.0.0-20240101000000-%s", repo.offBranchHash[:12]) + f := parseGoMod(t, makeGoMod(map[string]string{ + "go.temporal.io/api": ver, + "go.temporal.io/sdk": ver, + })) + err := validateMainBranch(context.Background(), f) + require.Error(t, err) + require.Contains(t, err.Error(), "not on the default branch") + }) +} diff --git a/cmd/tools/ci-notify/main.go b/cmd/tools/ci-notify/main.go new file mode 100644 index 00000000000..7f7b02a6685 --- /dev/null +++ b/cmd/tools/ci-notify/main.go @@ -0,0 +1,14 @@ +package main + +import ( + "os" + + cinotify "go.temporal.io/server/tools/ci-notify" +) + +func main() { + app := cinotify.NewCliApp() + if err := app.Run(os.Args); err != nil { + os.Exit(1) + } +} diff --git a/cmd/tools/codegen/helpers.go b/cmd/tools/codegen/helpers.go new file mode 100644 index 00000000000..846a5cf174f --- /dev/null +++ b/cmd/tools/codegen/helpers.go @@ -0,0 +1,96 @@ +package codegen + +import ( + "go/parser" + "go/token" + "io" + "log" + "os" + "path/filepath" + "strings" + "text/template" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +func GenerateToFile[T any]( + generator func(io.Writer, T) error, + generatorArg T, + outPath string, + outFileName string, +) { + filename := filepath.Join(outPath, outFileName+"_gen.go") + w, err := os.Create(filename) + FatalIfErr(err) + defer func() { + FatalIfErr(w.Close()) + _, err = parser.ParseFile(token.NewFileSet(), filename, nil, parser.SkipObjectResolution) + FatalIfErr(err) + }() + err = generator(w, generatorArg) + FatalIfErr(err) +} + +func GenerateTemplateToWriter(tmpl string, data any, w io.Writer) error { + t, err := template.New("code").Parse(tmpl) + if err != nil { + return err + } + return t.Execute(w, data) +} + +func GenerateTemplateToFile( + tmpl string, + data any, + outPath string, + outFileName string, +) { + GenerateToFile(func(w io.Writer, data any) error { + return GenerateTemplateToWriter(tmpl, data, w) + }, data, outPath, outFileName) +} + +func FatalIfErr(err error) { + if err != nil { + //nolint:revive // okay to call Fatal here since this is part of the build process, not the server. + log.Fatal(err) + } +} + +func Fatalf(format string, v ...any) { + //nolint:revive // okay to call Fatal here since this is part of the build process, not the server. + log.Fatalf(format, v...) +} + +func CamelCaseToSnakeCase(s string) string { + if s == "" { + return "" + } + t := make([]rune, 0, len(s)+5) + for i, c := range s { + if IsASCIIUpper(c) { + if i != 0 { + t = append(t, '_') + } + c ^= ' ' // Make it a lower letter. + } + t = append(t, c) + } + return string(t) +} + +func SnakeCaseToPascalCase(s string) string { + var b strings.Builder + // Capitalize the first letter of each word split by underscore + for word := range strings.SplitSeq(s, "_") { + // Convert first rune to upper and the rest to lower case + b.WriteString(cases.Title(language.AmericanEnglish).String(strings.ToLower(word))) + } + // Join them back into a single string + return b.String() +} + +func IsASCIIUpper(c rune) bool { + return 'A' <= c && c <= 'Z' +} diff --git a/cmd/tools/codegen/helpers_test.go b/cmd/tools/codegen/helpers_test.go new file mode 100644 index 00000000000..b4da3781c59 --- /dev/null +++ b/cmd/tools/codegen/helpers_test.go @@ -0,0 +1,33 @@ +package codegen + +import "testing" + +func TestSnakeCaseToPascalCase(t *testing.T) { + tests := []struct { + name string + in string + want string + }{ + {name: "empty", in: "", want: ""}, + {name: "single_lower", in: "a", want: "A"}, + {name: "single_upper", in: "A", want: "A"}, + {name: "simple", in: "hello", want: "Hello"}, + {name: "two_words", in: "hello_world", want: "HelloWorld"}, + {name: "all_caps", in: "HELLO_WORLD", want: "HelloWorld"}, + {name: "leading_underscore", in: "_leading", want: "Leading"}, + {name: "trailing_underscore", in: "trailing_", want: "Trailing"}, + {name: "double_underscore", in: "a__b", want: "AB"}, + {name: "only_underscores", in: "__", want: ""}, + {name: "common_id", in: "user_id", want: "UserId"}, + {name: "with_digits", in: "http_server_v2", want: "HttpServerV2"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := SnakeCaseToPascalCase(tt.in) + if got != tt.want { + t.Fatalf("SnakeCaseToPascalCase(%q) = %q, want %q", tt.in, got, tt.want) + } + }) + } +} diff --git a/cmd/tools/copyright/licensegen.go b/cmd/tools/copyright/licensegen.go deleted file mode 100644 index 9bbcd2469b2..00000000000 --- a/cmd/tools/copyright/licensegen.go +++ /dev/null @@ -1,196 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package main - -import ( - "bufio" - "flag" - "fmt" - "os" - "path/filepath" - "strings" -) - -type ( - // task that adds license header to source - // files, if they don't already exist - addLicenseHeaderTask struct { - license string // license header string to add - config *config // root directory of the project source - } - - // command line config params - config struct { - licenseFile string - scanDir string - verifyOnly bool - } -) - -// licenseFileName is the name of the license file -const licenseFileName = "./LICENSE" - -// unique prefix that identifies a license header -const licenseHeaderPrefix = "// The MIT License" - -var ( - // directories to be excluded - dirBlocklist = []string{".gen/", ".git/", ".vscode/", ".idea/"} - // default perms for the newly created files - defaultFilePerms = os.FileMode(0644) -) - -// command line utility that adds license header -// to the source files. Usage as follows: -// -// ./cmd/tools/copyright/licensegen.go -func main() { - var cfg config - flag.StringVar(&cfg.licenseFile, "licenseFile", licenseFileName, "directory to scan") - flag.StringVar(&cfg.scanDir, "scanDir", ".", "directory to scan") - flag.BoolVar(&cfg.verifyOnly, "verifyOnly", false, "don't automatically add headers, just verify all files") - flag.Parse() - - task := newAddLicenseHeaderTask(&cfg) - if err := task.run(); err != nil { - fmt.Println(err) - os.Exit(-1) - } -} - -func newAddLicenseHeaderTask(cfg *config) *addLicenseHeaderTask { - return &addLicenseHeaderTask{ - config: cfg, - } -} - -func (task *addLicenseHeaderTask) run() error { - data, err := os.ReadFile(task.config.licenseFile) - if err != nil { - return fmt.Errorf("error reading license file, errr=%v", err.Error()) - } - - task.license, err = commentOutLines(string(data)) - if err != nil { - return fmt.Errorf("copyright header failed to comment out lines, err=%v", err.Error()) - } - - err = filepath.Walk(task.config.scanDir, task.handleFile) - if err != nil { - return fmt.Errorf("copyright header check failed, err=%v", err.Error()) - } - return nil -} - -func (task *addLicenseHeaderTask) handleFile(path string, fileInfo os.FileInfo, err error) error { - if err != nil { - return err - } - - if fileInfo.IsDir() { - return nil - } - - if !mustProcessPath(path) { - return nil - } - - if !strings.HasSuffix(fileInfo.Name(), ".go") { - return nil - } - - // Used as part of the cli to write licence headers on files, does not use user supplied input so marked as nosec - // #nosec - f, err := os.Open(path) - if err != nil { - return err - } - - scanner := bufio.NewScanner(f) - readLineSucc := scanner.Scan() - if !readLineSucc { - return fmt.Errorf("fail to read first line of file %v", path) - } - firstLine := strings.TrimSpace(scanner.Text()) - if err := scanner.Err(); err != nil { - return err - } - err = f.Close() - if err != nil { - return err - } - - if strings.Contains(firstLine, licenseHeaderPrefix) { - return nil // file already has the copyright header - } - - // at this point, src file is missing the header - if task.config.verifyOnly { - if !isFileAutogenerated(path) { - return fmt.Errorf("%v missing license header", path) - } - } - - // Used as part of the cli to write licence headers on files, does not use user supplied input so marked as nosec - // #nosec - data, err := os.ReadFile(path) - if err != nil { - return err - } - - return os.WriteFile(path, []byte(task.license+string(data)), defaultFilePerms) -} - -func isFileAutogenerated(path string) bool { - return false -} - -func mustProcessPath(path string) bool { - for _, d := range dirBlocklist { - if strings.HasPrefix(path, d) { - return false - } - } - return true -} - -func commentOutLines(str string) (string, error) { - var lines []string - scanner := bufio.NewScanner(strings.NewReader(str)) - for scanner.Scan() { - line := scanner.Text() - if line == "" { - lines = append(lines, "//\n") - } else { - lines = append(lines, fmt.Sprintf("// %s\n", line)) - } - } - lines = append(lines, "\n") - - if err := scanner.Err(); err != nil { - return "", err - } - return strings.Join(lines, ""), nil -} diff --git a/cmd/tools/elasticsearch/main.go b/cmd/tools/elasticsearch/main.go new file mode 100644 index 00000000000..b5297a8ce0f --- /dev/null +++ b/cmd/tools/elasticsearch/main.go @@ -0,0 +1,13 @@ +package main + +import ( + "os" + + "go.temporal.io/server/tools/elasticsearch" +) + +func main() { + if err := elasticsearch.RunTool(os.Args); err != nil { + os.Exit(1) + } +} diff --git a/cmd/tools/fairsim/main.go b/cmd/tools/fairsim/main.go new file mode 100644 index 00000000000..df64e00157b --- /dev/null +++ b/cmd/tools/fairsim/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + "os" + + "go.temporal.io/server/tools/fairsim" +) + +func main() { + if err := fairsim.RunTool(os.Args[1:]); err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + os.Exit(1) + } +} diff --git a/cmd/tools/flakereport/main.go b/cmd/tools/flakereport/main.go new file mode 100644 index 00000000000..874d16589b8 --- /dev/null +++ b/cmd/tools/flakereport/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + "os" + + "go.temporal.io/server/tools/flakereport" +) + +func main() { + if err := flakereport.NewCliApp().Run(os.Args); err != nil { + fmt.Fprintf(os.Stderr, "Error running flakereport: %v\n", err) + os.Exit(1) + } +} diff --git a/cmd/tools/gendynamicconfig/dynamic_config.tmpl b/cmd/tools/gendynamicconfig/dynamic_config.tmpl new file mode 100644 index 00000000000..04e135f6a79 --- /dev/null +++ b/cmd/tools/gendynamicconfig/dynamic_config.tmpl @@ -0,0 +1,225 @@ +{{- /*gotype: go.temporal.io/server/cmd/tools/gendynamicconfig.dynamicConfigData*/ -}} +// Code generated by cmd/tools/gendynamicconfig. DO NOT EDIT. + +package dynamicconfig + +import ( + "time" + + enumspb "go.temporal.io/api/enums/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/common/namespace" +) + +const ( + PrecedenceUnknown Precedence = iota +{{- range .Precedences}} + Precedence{{.Name}} +{{- end}} +) +{{$Precedences := .Precedences }} +{{- range $T :=.Types}} +{{- range $P := $Precedences}} +{{ if $T.IsGeneric -}} +type {{$P.Name}}TypedSetting[T any] setting[T, func({{$P.GoArgs}})] +type {{$P.Name}}TypedConstrainedDefaultSetting[T any] constrainedDefaultSetting[T, func({{$P.GoArgs}})] + +// New{{$P.Name}}TypedSetting creates a setting that uses mapstructure to handle complex structured +// values. The value from dynamic config will be _merged_ over a deep copy of 'def'. Be very careful +// when using non-empty maps or slices as defaults, the result may not be what you want. +func New{{$P.Name}}TypedSetting[T any](key string, def T, description string) {{$P.Name}}TypedSetting[T] { + // Warn on any shared structure used with ConvertStructure, even though we handle it by deep copying. + warnDefaultSharedStructure(key, def) + // If even deep copy won't even work, we should panic early. Do that by calling deep copy once here. + _ = deepCopyForMapstructure(def) + + s := {{$P.Name}}TypedSetting[T]{ + key: MakeKey(key), + def: def, + convert: ConvertStructure[T](def), + description: description, + } + register(s) + return s +} + +// New{{$P.Name}}TypedSettingWithConverter creates a setting with a custom converter function. +func New{{$P.Name}}TypedSettingWithConverter[T any](key string, convert func(any) (T, error), def T, description string) {{$P.Name}}TypedSetting[T] { + {{/* Do not warn on shared structure here, it's the converter's concern. */ -}} + s := {{$P.Name}}TypedSetting[T]{ + key: MakeKey(key), + def: def, + convert: convert, + description: description, + } + register(s) + return s +} + +// New{{$P.Name}}TypedSettingWithConstrainedDefault creates a setting with a compound default value. +func New{{$P.Name}}TypedSettingWithConstrainedDefault[T any](key string, convert func(any) (T, error), cdef []TypedConstrainedValue[T], description string) {{$P.Name}}TypedConstrainedDefaultSetting[T] { + {{/* Do not warn on shared structure here, it's the converter's concern. */ -}} + s := {{$P.Name}}TypedConstrainedDefaultSetting[T]{ + key: MakeKey(key), + cdef: cdef, + convert: convert, + description: description, + } + register(s) + return s +} + +func (s {{$P.Name}}TypedSetting[T]) Key() Key { return s.key } +func (s {{$P.Name}}TypedSetting[T]) Precedence() Precedence { return Precedence{{$P.Name}} } +func (s {{$P.Name}}TypedSetting[T]) Validate(v any) error { + _, err := s.convert(v) + return err +} + +func (s {{$P.Name}}TypedConstrainedDefaultSetting[T]) Key() Key { return s.key } +func (s {{$P.Name}}TypedConstrainedDefaultSetting[T]) Precedence() Precedence { return Precedence{{$P.Name}} } +func (s {{$P.Name}}TypedConstrainedDefaultSetting[T]) Validate(v any) error { + _, err := s.convert(v) + return err +} + +func (s {{$P.Name}}TypedSetting[T]) WithDefault(v T) {{$P.Name}}TypedSetting[T] { + newS := s + newS.def = v + {{/* The base setting should be registered so we do not register the return value here */ -}} + return newS +} + +{{if eq $P.Name "Global" -}} +type TypedPropertyFn[T any] func({{$P.GoArgs}}) T +{{- else -}} +type TypedPropertyFnWith{{$P.Name}}Filter[T any] func({{$P.GoArgs}}) T +{{- end}} + +{{if eq $P.Name "Global" -}} +func (s {{$P.Name}}TypedSetting[T]) Get(c *Collection) TypedPropertyFn[T] { +{{- else -}} +func (s {{$P.Name}}TypedSetting[T]) Get(c *Collection) TypedPropertyFnWith{{$P.Name}}Filter[T] { +{{- end}} + return func({{$P.GoArgs}}) T { + prec := {{$P.Expr}} + return matchAndConvert( + c, + s.key, + s.def, + s.convert, + prec, + ) + } +} + +{{if eq $P.Name "Global" -}} +func (s {{$P.Name}}TypedConstrainedDefaultSetting[T]) Get(c *Collection) TypedPropertyFn[T] { +{{- else -}} +func (s {{$P.Name}}TypedConstrainedDefaultSetting[T]) Get(c *Collection) TypedPropertyFnWith{{$P.Name}}Filter[T] { +{{- end}} + return func({{$P.GoArgs}}) T { + prec := {{$P.Expr}} + return matchAndConvertWithConstrainedDefault( + c, + s.key, + s.cdef, + s.convert, + prec, + ) + } +} + +{{if eq $P.Name "Global" -}} +type TypedSubscribable[T any] func(callback func(T)) (v T, cancel func()) +{{- else -}} +type TypedSubscribableWith{{$P.Name}}Filter[T any] func({{$P.GoArgs}}, callback func(T)) (v T, cancel func()) +{{- end}} + +{{if eq $P.Name "Global" -}} +func (s {{$P.Name}}TypedSetting[T]) Subscribe(c *Collection) TypedSubscribable[T] { + return func(callback func(T)) (T, func()) { +{{- else -}} +func (s {{$P.Name}}TypedSetting[T]) Subscribe(c *Collection) TypedSubscribableWith{{$P.Name}}Filter[T] { + return func({{$P.GoArgs}}, callback func(T)) (T, func()) { +{{- end}} + prec := {{$P.Expr}} + return subscribe(c, s.key, s.def, s.convert, prec, callback) + } +} + +func (s {{$P.Name}}TypedSetting[T]) dispatchUpdate(c *Collection, sub any, cvs []ConstrainedValue) { + dispatchUpdate( + c, + s.key, + s.convert, + sub.(*subscription[T]), + cvs, + ) +} + +{{if eq $P.Name "Global" -}} +func (s {{$P.Name}}TypedConstrainedDefaultSetting[T]) Subscribe(c *Collection) TypedSubscribable[T] { + return func(callback func(T)) (T, func()) { + prec := {{$P.Expr}} + return subscribeWithConstrainedDefault(c, s.key, s.cdef, s.convert, prec, callback) + } +} +{{- else -}} +func (s {{$P.Name}}TypedConstrainedDefaultSetting[T]) Subscribe(c *Collection) TypedSubscribableWith{{$P.Name}}Filter[T] { + return func({{$P.GoArgs}}, callback func(T)) (T, func()) { + prec := {{$P.Expr}} + return subscribeWithConstrainedDefault(c, s.key, s.cdef, s.convert, prec, callback) + } +} +{{- end}} + +func (s {{$P.Name}}TypedConstrainedDefaultSetting[T]) dispatchUpdate(c *Collection, sub any, cvs []ConstrainedValue) { + dispatchUpdateWithConstrainedDefault( + c, + s.key, + s.convert, + sub.(*subscription[T]), + cvs, + ) +} + +{{if eq $P.Name "Global" -}} +func GetTypedPropertyFn[T any](value T) TypedPropertyFn[T] { +{{- else -}} +func GetTypedPropertyFnFilteredBy{{$P.Name}}[T any](value T) TypedPropertyFnWith{{$P.Name}}Filter[T] { +{{- end}} + return func({{$P.GoArgs}}) T { + return value + } +} +{{else -}} +type {{$P.Name}}{{$T.Name}}Setting = {{$P.Name}}TypedSetting[{{$T.GoType}}] +type {{$P.Name}}{{$T.Name}}ConstrainedDefaultSetting = {{$P.Name}}TypedConstrainedDefaultSetting[{{$T.GoType}}] + +func New{{$P.Name}}{{$T.Name}}Setting(key string, def {{$T.GoType}}, description string) {{$P.Name}}{{$T.Name}}Setting { + return New{{$P.Name}}TypedSettingWithConverter[{{$T.GoType}}](key, convert{{$T.Name}}, def, description) +} + +func New{{$P.Name}}{{$T.Name}}SettingWithConstrainedDefault(key string, cdef []TypedConstrainedValue[{{$T.GoType}}], description string) {{$P.Name}}{{$T.Name}}ConstrainedDefaultSetting { + return New{{$P.Name}}TypedSettingWithConstrainedDefault[{{$T.GoType}}](key, convert{{$T.Name}}, cdef, description) +} + +{{if eq $P.Name "Global" -}} +type {{$T.Name}}PropertyFn = TypedPropertyFn[{{$T.GoType}}] +{{- else -}} +type {{$T.Name}}PropertyFnWith{{$P.Name}}Filter = TypedPropertyFnWith{{$P.Name}}Filter[{{$T.GoType}}] +{{- end}} + +{{if eq $P.Name "Global" -}} +func Get{{$T.Name}}PropertyFn(value {{$T.GoType}}) {{$T.Name}}PropertyFn { + return GetTypedPropertyFn(value) +} +{{- else -}} +func Get{{$T.Name}}PropertyFnFilteredBy{{$P.Name}}(value {{$T.GoType}}) {{$T.Name}}PropertyFnWith{{$P.Name}}Filter { + return GetTypedPropertyFnFilteredBy{{$P.Name}}(value) +} +{{- end}} +{{end }}{{/* if $T.IsGeneric */}} +{{- end}}{{/* range $T :=.Types */}} +{{- end}}{{/* range $P := $Precedences */}} diff --git a/cmd/tools/gendynamicconfig/main.go b/cmd/tools/gendynamicconfig/main.go new file mode 100644 index 00000000000..ef4c127bb58 --- /dev/null +++ b/cmd/tools/gendynamicconfig/main.go @@ -0,0 +1,123 @@ +package main + +import ( + _ "embed" + + "go.temporal.io/server/cmd/tools/codegen" +) + +type ( + settingType struct { + Name string + GoType string + IsGeneric bool + } + settingPrecedence struct { + Name string + GoArgs string + Expr string + } + + dynamicConfigData struct { + Types []settingType + Precedences []settingPrecedence + } +) + +var ( + //go:embed dynamic_config.tmpl + dynamicConfigTemplate string + + data = dynamicConfigData{ + Types: []settingType{ + { + Name: "Bool", + GoType: "bool", + }, + { + Name: "Int", + GoType: "int", + }, + { + Name: "Float", + GoType: "float64", + }, + { + Name: "String", + GoType: "string", + }, + { + Name: "Duration", + GoType: "time.Duration", + }, + { + Name: "Map", + GoType: "map[string]any", + }, + { + Name: "Typed", + GoType: "", + IsGeneric: true, // this one is treated differently + }, + }, + Precedences: []settingPrecedence{ + { + Name: "Global", + GoArgs: "", + Expr: "[]Constraints{{}}", + }, + { + Name: "Namespace", + GoArgs: "namespace string", + Expr: "[]Constraints{{Namespace: namespace}, {}}", + }, + { + Name: "NamespaceID", + GoArgs: "namespaceID namespace.ID", + Expr: "[]Constraints{{NamespaceID: namespaceID.String()}, {}}", + }, + { + Name: "TaskQueue", + GoArgs: "namespace string, taskQueue string, taskQueueType enumspb.TaskQueueType", + // A task-queue-name-only filter applies to a single task queue name across all + // namespaces, with higher precedence than a namespace-only filter. This is intended to + // be used by the default partition count and is probably not useful otherwise. + Expr: `[]Constraints{ + {Namespace: namespace, TaskQueueName: taskQueue, TaskQueueType: taskQueueType}, + {Namespace: namespace, TaskQueueName: taskQueue}, + {TaskQueueName: taskQueue}, + {Namespace: namespace}, + {}, + }`, + }, + { + Name: "ShardID", + GoArgs: "shardID int32", + Expr: "[]Constraints{{ShardID: shardID}, {}}", + }, + { + Name: "TaskType", + GoArgs: "taskType enumsspb.TaskType", + Expr: "[]Constraints{{TaskType: taskType}, {}}", + }, + { + Name: "Destination", + GoArgs: "namespace string, destination string", + Expr: `[]Constraints{ + {Namespace: namespace, Destination: destination}, + {Destination: destination}, + {Namespace: namespace}, + {}, + }`, + }, + { + Name: "ChasmTaskType", + GoArgs: "chasmTaskType string", + Expr: "[]Constraints{{ChasmTaskType: chasmTaskType}, {}}", + }, + }} +) + +func main() { + codegen.GenerateTemplateToFile(dynamicConfigTemplate, data, "", "setting") +} diff --git a/cmd/tools/genroutingkeyextractor/main.go b/cmd/tools/genroutingkeyextractor/main.go new file mode 100644 index 00000000000..57f26a6d4d9 --- /dev/null +++ b/cmd/tools/genroutingkeyextractor/main.go @@ -0,0 +1,169 @@ +package main + +import ( + _ "embed" + "flag" + "fmt" + "regexp" + "slices" + "strings" + + protometapb "go.temporal.io/api/protometa/v1" + _ "go.temporal.io/api/workflowservice/v1" // trigger proto file registration + "go.temporal.io/server/cmd/tools/codegen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +//go:embed template.tmpl +var templateStr string + +const ( + workflowServiceFile = "temporal/api/workflowservice/v1/service.proto" + resourceIDHeader = "temporal-resource-id" + pollerGroupIDField = "poller_group_id" + pollerGroupStrategy = "namespace.RoutingStrategyPollerGroup" +) + +var fieldPathRegex = regexp.MustCompile(`\{([^}]+)\}`) + +type methodEntry struct { + RequestType string + Accessor string + Strategy string +} + +type templateData struct { + Methods []methodEntry +} + +func main() { + outFlag := flag.String("out", ".", "output directory") + flag.Parse() + + data := buildTemplateData() + codegen.GenerateTemplateToFile(templateStr, data, *outFlag, "routing_key_extractor") +} + +func buildTemplateData() templateData { + fd, err := protoregistry.GlobalFiles.FindFileByPath(workflowServiceFile) + if err != nil { + codegen.Fatalf("finding %s: %v", workflowServiceFile, err) + } + + if fd.Services().Len() == 0 { + codegen.Fatalf("no services found in %s", workflowServiceFile) + } + svc := fd.Services().Get(0) + + var entries []methodEntry + for i := range svc.Methods().Len() { + method := svc.Methods().Get(i) + if entry, ok := methodEntryFromDescriptor(svc, method); ok { + entries = append(entries, entry) + } + } + + // Sort by request type for deterministic output. + slices.SortFunc(entries, func(a, b methodEntry) int { + return strings.Compare(a.RequestType, b.RequestType) + }) + + return templateData{Methods: entries} +} + +// methodEntryFromDescriptor extracts the routing key accessor for a method. +// Returns (entry, ok=true) if the method has a concrete field path annotation, +// or (zero, ok=false) if the method should be skipped (no annotation). +func methodEntryFromDescriptor( + svc protoreflect.ServiceDescriptor, + method protoreflect.MethodDescriptor, +) (methodEntry, bool) { + opts := method.Options() + if opts == nil || !proto.HasExtension(opts, protometapb.E_RequestHeader) { + return methodEntry{}, false + } + + //nolint:revive // unchecked-type-assertion + annotations := proto.GetExtension(opts, protometapb.E_RequestHeader).([]*protometapb.RequestHeaderAnnotation) + for _, ann := range annotations { + if ann.GetHeader() != resourceIDHeader { + continue + } + + matches := fieldPathRegex.FindAllStringSubmatch(ann.GetValue(), -1) + if len(matches) != 1 { + codegen.Fatalf("%s.%s: expected exactly one field interpolation in %s value %q, got %d", + svc.Name(), method.Name(), resourceIDHeader, ann.GetValue(), len(matches)) + } + fieldPath := matches[0][1] + + accessor, err := fieldPathToAccessor(fieldPath, method.Input()) + if err != nil { + codegen.Fatalf("generating accessor for %s.%s field path %q: %v", + svc.Name(), method.Name(), fieldPath, err) + } + + // The resource_id field has the format "prefix:". + // Wrap the accessor to extract just the business ID part. + if fieldPath == "resource_id" { + accessor = "routingIDFromResourceID(" + accessor + ")" + } + + // Fields whose terminal segment is `poller_group_id` route via the + // poller-group strategy; everything else uses the default strategy. + strategy := "" + if terminalField(fieldPath) == pollerGroupIDField { + strategy = pollerGroupStrategy + } + + return methodEntry{ + RequestType: string(method.Input().Name()), + Accessor: accessor, + Strategy: strategy, + }, true + } + + return methodEntry{}, false +} + +func terminalField(fieldPath string) string { + parts := strings.Split(fieldPath, ".") + return parts[len(parts)-1] +} + +// fieldPathToAccessor converts a dot-separated proto field path to a Go getter chain. +// E.g. "workflow_execution.workflow_id" → "r.GetWorkflowExecution().GetWorkflowId()" +func fieldPathToAccessor(fieldPath string, msgDesc protoreflect.MessageDescriptor) (string, error) { + parts := strings.Split(fieldPath, ".") + accessor := "r" + currentMsg := msgDesc + + for _, part := range parts { + field := currentMsg.Fields().ByName(protoreflect.Name(part)) + if field == nil { + return "", fmt.Errorf("field %q not found in message %s", part, currentMsg.FullName()) + } + + goName := protoFieldToGoName(part) + accessor += ".Get" + goName + "()" + + if field.Kind() == protoreflect.MessageKind && field.Message() != nil { + currentMsg = field.Message() + } + } + + return accessor, nil +} + +// protoFieldToGoName converts a snake_case proto field name to PascalCase Go name. +func protoFieldToGoName(name string) string { + parts := strings.Split(name, "_") + for i := range parts { + if len(parts[i]) > 0 { + parts[i] = strings.ToUpper(parts[i][:1]) + parts[i][1:] + } + } + return strings.Join(parts, "") +} diff --git a/cmd/tools/genroutingkeyextractor/template.tmpl b/cmd/tools/genroutingkeyextractor/template.tmpl new file mode 100644 index 00000000000..93a0ec55a66 --- /dev/null +++ b/cmd/tools/genroutingkeyextractor/template.tmpl @@ -0,0 +1,22 @@ +// Code generated by genroutingkeyextractor. DO NOT EDIT. + +package interceptor + +import ( + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/namespace" +) + +// workflowServiceRequestRoutingKey extracts the routing key +// from a WorkflowService request using field paths declared in the +// temporal.api.protometa.v1.request_header proto annotation. +func workflowServiceRequestRoutingKey(req any) namespace.RoutingKey { + switch r := req.(type) { +{{- range .Methods}} + case *workflowservice.{{.RequestType}}: + return namespace.RoutingKey{ID: {{.Accessor}}{{if .Strategy}}, Strategy: {{.Strategy}}{{end}}} +{{- end}} + default: + return namespace.RoutingKey{} + } +} diff --git a/cmd/tools/genrpcserverinterceptors/main.go b/cmd/tools/genrpcserverinterceptors/main.go new file mode 100644 index 00000000000..5b8cf40d8f9 --- /dev/null +++ b/cmd/tools/genrpcserverinterceptors/main.go @@ -0,0 +1,207 @@ +package main + +import ( + "cmp" + _ "embed" + "flag" + "fmt" + "reflect" + "regexp" + + commonpb "go.temporal.io/api/common/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/adminservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" + "go.temporal.io/server/cmd/tools/codegen" +) + +const maxMessageDepth = 5 + +type ( + messageData struct { + Type string + + WorkflowIDGetter string + RunIDGetter string + TaskTokenGetter string + ActivityIDGetter string + OperationIDGetter string + ChasmRunIDGetter string + } + + grpcServerData struct { + Server string + Imports []string + Messages []messageData + } +) + +var ( + //go:embed server_interceptors.tmpl + serverInterceptorsTemplate string + + // List of types for which Workflow tag getters are generated. + grpcServers = []reflect.Type{ + reflect.TypeOf((*workflowservice.WorkflowServiceServer)(nil)).Elem(), + reflect.TypeOf((*adminservice.AdminServiceServer)(nil)).Elem(), + reflect.TypeOf((*historyservice.HistoryServiceServer)(nil)).Elem(), + reflect.TypeOf((*matchingservice.MatchingServiceServer)(nil)).Elem(), + } + + // Only request fields that match the pattern are eligible for deeper inspection. + fieldNameRegex = regexp.MustCompile("^(?:.*Request|Completion|UpdateRef|ParentExecution|WorkflowState|ExecutionInfo|ExecutionState)$") + + // These types have task_token field, but it is not of type *tokenspb.Task and doesn't have Workflow tags. + excludeTaskTokenTypes = []reflect.Type{ + reflect.TypeOf((*workflowservice.RespondQueryTaskCompletedRequest)(nil)), + reflect.TypeOf((*workflowservice.RespondNexusTaskCompletedRequest)(nil)), + reflect.TypeOf((*workflowservice.RespondNexusTaskFailedRequest)(nil)), + } + + executionGetterT = reflect.TypeOf((*interface { + GetExecution() *commonpb.WorkflowExecution + })(nil)).Elem() + + workflowExecutionGetterT = reflect.TypeOf((*interface { + GetWorkflowExecution() *commonpb.WorkflowExecution + })(nil)).Elem() + + taskTokenGetterT = reflect.TypeOf((*interface { + GetTaskToken() []byte + })(nil)).Elem() + + workflowIDGetterT = reflect.TypeOf((*interface { + GetWorkflowId() string + })(nil)).Elem() + + runIDGetterT = reflect.TypeOf((*interface { + GetRunId() string + })(nil)).Elem() + + activityIDGetterT = reflect.TypeOf((*interface { + GetActivityId() string + })(nil)).Elem() + + operationIDGetterT = reflect.TypeOf((*interface { + GetOperationId() string + })(nil)).Elem() +) + +func main() { + outPathFlag := flag.String("out", ".", "path to write generated files") + flag.Parse() + + for _, grpcServerT := range grpcServers { + codegen.GenerateTemplateToFile(serverInterceptorsTemplate, getGrpcServerData(grpcServerT), *outPathFlag, codegen.CamelCaseToSnakeCase(grpcServerT.Name())) + } +} + +func getGrpcServerData(grpcServerT reflect.Type) grpcServerData { + sd := grpcServerData{ + Server: grpcServerT.Name(), + Imports: []string{grpcServerT.PkgPath()}, + } + + for i := 0; i < grpcServerT.NumMethod(); i++ { + rpcT := grpcServerT.Method(i).Type + if rpcT.NumIn() < 2 { + continue + } + + requestT := rpcT.In(1) // Assume request is always the second parameter. + requestMd := workflowTagGetters(requestT, 0) + requestMd.Type = requestT.String() + sd.Messages = append(sd.Messages, requestMd) + + respT := rpcT.Out(0) // Assume response is always the first parameter. + responseMd := workflowTagGetters(respT, 0) + responseMd.Type = respT.String() + sd.Messages = append(sd.Messages, responseMd) + } + + return sd +} + +//nolint:revive // cognitive complexity 37 (> max enabled 25) +func workflowTagGetters(messageType reflect.Type, depth int) messageData { + pd := messageData{} + if depth > maxMessageDepth { + return pd + } + + switch { + case messageType.AssignableTo(executionGetterT): + pd.WorkflowIDGetter = "GetExecution().GetWorkflowId()" + pd.RunIDGetter = "GetExecution().GetRunId()" + case messageType.AssignableTo(workflowExecutionGetterT): + pd.WorkflowIDGetter = "GetWorkflowExecution().GetWorkflowId()" + pd.RunIDGetter = "GetWorkflowExecution().GetRunId()" + case messageType.AssignableTo(taskTokenGetterT): + for _, ert := range excludeTaskTokenTypes { + if messageType.AssignableTo(ert) { + return pd + } + } + pd.TaskTokenGetter = "GetTaskToken()" + default: + // Might have any combination of these, or none. + if messageType.AssignableTo(workflowIDGetterT) { + pd.WorkflowIDGetter = "GetWorkflowId()" + } + if messageType.AssignableTo(runIDGetterT) { + pd.RunIDGetter = "GetRunId()" + } + if messageType.AssignableTo(activityIDGetterT) { + pd.ActivityIDGetter = "GetActivityId()" + } + if messageType.AssignableTo(operationIDGetterT) { + pd.OperationIDGetter = "GetOperationId()" + } + } + + // Iterates over fields in order they defined in proto file, not proto index. + // Order is important because the first match wins. + for fieldNum := 0; fieldNum < messageType.Elem().NumField(); fieldNum++ { + nestedRequest := messageType.Elem().Field(fieldNum) + if nestedRequest.Type.Kind() != reflect.Ptr { + continue + } + if nestedRequest.Type.Elem().Kind() != reflect.Struct { + continue + } + if !fieldNameRegex.MatchString(nestedRequest.Name) { + continue + } + + nestedRd := workflowTagGetters(nestedRequest.Type, depth+1) + // First match wins: if getter is already set, it won't be overwritten. + if pd.WorkflowIDGetter == "" && nestedRd.WorkflowIDGetter != "" { + pd.WorkflowIDGetter = fmt.Sprintf("Get%s().%s", nestedRequest.Name, nestedRd.WorkflowIDGetter) + } + if pd.RunIDGetter == "" && nestedRd.RunIDGetter != "" { + pd.RunIDGetter = fmt.Sprintf("Get%s().%s", nestedRequest.Name, nestedRd.RunIDGetter) + } + if pd.TaskTokenGetter == "" && nestedRd.TaskTokenGetter != "" { + pd.TaskTokenGetter = fmt.Sprintf("Get%s().%s", nestedRequest.Name, nestedRd.TaskTokenGetter) + } + if pd.ActivityIDGetter == "" && nestedRd.ActivityIDGetter != "" { + pd.ActivityIDGetter = fmt.Sprintf("Get%s().%s", nestedRequest.Name, nestedRd.ActivityIDGetter) + } + if pd.OperationIDGetter == "" && nestedRd.OperationIDGetter != "" { + pd.OperationIDGetter = fmt.Sprintf("Get%s().%s", nestedRequest.Name, nestedRd.OperationIDGetter) + } + } + + // When a business ID (activity or operation) is present without a workflow ID, + // the run_id is not a workflow run ID. Only apply at the top level. + if depth == 0 { + hasChasmBusinessID := pd.WorkflowIDGetter == "" && cmp.Or(pd.ActivityIDGetter, pd.OperationIDGetter) != "" + if hasChasmBusinessID && pd.RunIDGetter != "" { + pd.ChasmRunIDGetter = pd.RunIDGetter + pd.RunIDGetter = "" + } + } + + return pd +} diff --git a/cmd/tools/genrpcserverinterceptors/main_test.go b/cmd/tools/genrpcserverinterceptors/main_test.go new file mode 100644 index 00000000000..bfd43b3880e --- /dev/null +++ b/cmd/tools/genrpcserverinterceptors/main_test.go @@ -0,0 +1,104 @@ +package main + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" +) + +func TestWorkflowTagGetters(t *testing.T) { + testCases := []struct { + name string + reqT reflect.Type + workflowIDGetter string + runIDGetter string + taskTokenGetter string + activityIDGetter string + operationIDGetter string + chasmRunIDGetter string + }{ + { + name: "Request with only workflowID", + reqT: reflect.TypeOf(&workflowservice.StartWorkflowExecutionRequest{}), + workflowIDGetter: "GetWorkflowId()", + }, + { + name: "Request with workflowID and runID", + reqT: reflect.TypeOf(&workflowservice.RecordActivityTaskHeartbeatByIdRequest{}), + workflowIDGetter: "GetWorkflowId()", + runIDGetter: "GetRunId()", + activityIDGetter: "GetActivityId()", + }, + { + name: "Request with execution", + reqT: reflect.TypeOf(&workflowservice.GetWorkflowExecutionHistoryRequest{}), + workflowIDGetter: "GetExecution().GetWorkflowId()", + runIDGetter: "GetExecution().GetRunId()", + }, + { + name: "Request with workflow_execution", + reqT: reflect.TypeOf(&workflowservice.RequestCancelWorkflowExecutionRequest{}), + workflowIDGetter: "GetWorkflowExecution().GetWorkflowId()", + runIDGetter: "GetWorkflowExecution().GetRunId()", + }, + { + name: "Request with task_token", + reqT: reflect.TypeOf(&workflowservice.RespondActivityTaskCompletedRequest{}), + taskTokenGetter: "GetTaskToken()", + }, + { + name: "Special handling for RespondQueryTaskCompletedRequest", + reqT: reflect.TypeOf(&workflowservice.RespondQueryTaskCompletedRequest{}), + }, + { + name: "Matching request", + reqT: reflect.TypeOf(&matchingservice.QueryWorkflowRequest{}), + workflowIDGetter: "GetQueryRequest().GetExecution().GetWorkflowId()", + runIDGetter: "GetQueryRequest().GetExecution().GetRunId()", + }, + { + name: "History request", + reqT: reflect.TypeOf(&historyservice.SignalWorkflowExecutionRequest{}), + workflowIDGetter: "GetSignalRequest().GetWorkflowExecution().GetWorkflowId()", + runIDGetter: "GetSignalRequest().GetWorkflowExecution().GetRunId()", + }, + { + name: "History request overrides", + reqT: reflect.TypeOf(&historyservice.ReplicateWorkflowStateRequest{}), + workflowIDGetter: "GetWorkflowState().GetExecutionInfo().GetWorkflowId()", + runIDGetter: "GetWorkflowState().GetExecutionState().GetRunId()", + }, + { + name: "Chasm activity request with activity_id and run_id", + reqT: reflect.TypeOf(&workflowservice.DescribeActivityExecutionRequest{}), + activityIDGetter: "GetActivityId()", + chasmRunIDGetter: "GetRunId()", + }, + { + name: "Chasm activity request with only activity_id", + reqT: reflect.TypeOf(&workflowservice.StartActivityExecutionRequest{}), + activityIDGetter: "GetActivityId()", + }, + { + name: "History request with nested operation_id", + reqT: reflect.TypeOf(&historyservice.CancelNexusOperationRequest{}), + operationIDGetter: "GetRequest().GetOperationId()", + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + rd := workflowTagGetters(tt.reqT, 0) + assert.Equal(t, tt.workflowIDGetter, rd.WorkflowIDGetter, "WorkflowIDGetter") + assert.Equal(t, tt.runIDGetter, rd.RunIDGetter, "RunIDGetter") + assert.Equal(t, tt.taskTokenGetter, rd.TaskTokenGetter, "TaskTokenGetter") + assert.Equal(t, tt.activityIDGetter, rd.ActivityIDGetter, "ActivityIDGetter") + assert.Equal(t, tt.operationIDGetter, rd.OperationIDGetter, "OperationIDGetter") + assert.Equal(t, tt.chasmRunIDGetter, rd.ChasmRunIDGetter, "ChasmRunIDGetter") + }) + } +} diff --git a/cmd/tools/genrpcserverinterceptors/server_interceptors.tmpl b/cmd/tools/genrpcserverinterceptors/server_interceptors.tmpl new file mode 100644 index 00000000000..0c99a67e315 --- /dev/null +++ b/cmd/tools/genrpcserverinterceptors/server_interceptors.tmpl @@ -0,0 +1,41 @@ +{{- /*gotype: go.temporal.io/server/cmd/tools/genrpcserverinterceptors.grpcServerData*/ -}} +// Code generated by cmd/tools/genrpcserverinterceptors. DO NOT EDIT. + +package logtags + +import ( +{{- range .Imports}} + {{printf "%q" .}} +{{- end}} + "go.temporal.io/server/common/log/tag" +) + +func (wt *WorkflowTags) extractFrom{{.Server}}Message(message any) []tag.Tag { + switch r := message.(type) { + {{- range .Messages}} + case {{.Type}}: + {{- if or .TaskTokenGetter .WorkflowIDGetter .RunIDGetter .ActivityIDGetter .OperationIDGetter .ChasmRunIDGetter}} + {{- if .TaskTokenGetter}} + return wt.fromTaskToken(r.{{ .TaskTokenGetter}}) + {{- else}} + return []tag.Tag{ + {{if .WorkflowIDGetter}} tag.WorkflowID(r.{{.WorkflowIDGetter}}), + {{end -}} + {{if .ActivityIDGetter}} tag.ActivityID(r.{{.ActivityIDGetter}}), + {{end -}} + {{if .OperationIDGetter}} tag.OperationID(r.{{.OperationIDGetter}}), + {{end -}} + {{if .RunIDGetter}} tag.WorkflowRunID(r.{{.RunIDGetter}}), + {{end -}} + {{if .ChasmRunIDGetter}} tag.ChasmRunID(r.{{.ChasmRunIDGetter}}), + {{end -}} + } + {{- end}} + {{- else}} + return nil + {{- end -}} + {{- end}} + default: + return nil + } +} diff --git a/cmd/tools/genrpcwrappers/main.go b/cmd/tools/genrpcwrappers/main.go new file mode 100644 index 00000000000..0c18629f84c --- /dev/null +++ b/cmd/tools/genrpcwrappers/main.go @@ -0,0 +1,643 @@ +package main + +import ( + "cmp" + "flag" + "fmt" + "io" + "log" + "reflect" + "slices" + "strings" + + taskqueuepb "go.temporal.io/api/taskqueue/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/cmd/tools/codegen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + "go.temporal.io/server/api/adminservice/v1" + "go.temporal.io/server/api/historyservice/v1" + "go.temporal.io/server/api/matchingservice/v1" +) + +type ( + service struct { + name string + clientType reflect.Type + clientGenerator func(io.Writer, service) error + } + + fieldWithPath struct { + field *reflect.StructField + path string + } +) + +func (f fieldWithPath) found() bool { + return f.path != "" +} + +var ( + services = []service{ + { + name: "frontend", + clientType: reflect.TypeOf((*workflowservice.WorkflowServiceClient)(nil)), + clientGenerator: generateFrontendOrAdminClient, + }, + { + name: "admin", + clientType: reflect.TypeOf((*adminservice.AdminServiceClient)(nil)), + clientGenerator: generateFrontendOrAdminClient, + }, + { + name: "history", + clientType: reflect.TypeOf((*historyservice.HistoryServiceClient)(nil)), + clientGenerator: generateHistoryClient, + }, + { + name: "matching", + clientType: reflect.TypeOf((*matchingservice.MatchingServiceClient)(nil)), + clientGenerator: generateMatchingClient, + }, + } + + longPollContext = map[string]bool{ + "client.frontend.ListArchivedWorkflowExecutions": true, + "client.frontend.PollActivityTaskQueue": true, + "client.frontend.PollWorkflowTaskQueue": true, + "client.matching.GetTaskQueueUserData": true, + "client.matching.ListNexusEndpoints": true, + } + largeTimeoutContext = map[string]bool{ + "client.admin.GetReplicationMessages": true, + } + longPollRetryPolicy = map[string]string{ + "retryableClient.matching.PollWorkflowTaskQueue": "pollPolicy", + "retryableClient.matching.PollActivityTaskQueue": "pollPolicy", + "retryableClient.matching.PollNexusTaskQueue": "pollPolicy", + } + ignoreMethod = map[string]bool{ + // TODO stream APIs are not supported. do not generate. + "client.admin.StreamWorkflowReplicationMessages": true, + "metricsClient.admin.StreamWorkflowReplicationMessages": true, + "retryableClient.admin.StreamWorkflowReplicationMessages": true, + // TODO(bergundy): Allow specifying custom routing for streaming messages. + "client.history.StreamWorkflowReplicationMessages": true, + "metricsClient.history.StreamWorkflowReplicationMessages": true, + "retryableClient.history.StreamWorkflowReplicationMessages": true, + + // Nexus metrics are an exception since they use the information from the request. + "metricsClient.history.StartNexusOperation": true, + "metricsClient.history.CancelNexusOperation": true, + + // these need to pick a partition. too complicated. + "client.matching.AddActivityTask": true, + "client.matching.AddWorkflowTask": true, + "client.matching.PollActivityTaskQueue": true, + "client.matching.PollWorkflowTaskQueue": true, + "client.matching.QueryWorkflow": true, + "client.matching.DispatchNexusTask": true, + "client.matching.PollNexusTaskQueue": true, + + // these do forwarding stats. too complicated. + "metricsClient.matching.AddActivityTask": true, + "metricsClient.matching.AddWorkflowTask": true, + "metricsClient.matching.PollActivityTaskQueue": true, + "metricsClient.matching.PollWorkflowTaskQueue": true, + "metricsClient.matching.QueryWorkflow": true, + "metricsClient.matching.DispatchNexusTask": true, + "metricsClient.matching.PollNexusTaskQueue": true, + } + // Fields to ignore when looking for the routing fields in a request object. + ignoreField = map[string]bool{ + // this is the workflow that sent a signal + "SignalWorkflowExecutionRequest.ExternalWorkflowExecution": true, + // this is the workflow that sent a cancel request + "RequestCancelWorkflowExecutionRequest.ExternalWorkflowExecution": true, + // this is the workflow that sent a terminate + "TerminateWorkflowExecutionRequest.ExternalWorkflowExecution": true, + // this is the parent for starting a child workflow + "StartWorkflowExecutionRequest.ParentExecutionInfo": true, + // this is the root for starting a child workflow + "StartWorkflowExecutionRequest.RootExecutionInfo": true, + // these get routed to the parent + "RecordChildExecutionCompletedRequest.ChildExecution": true, + "VerifyChildExecutionCompletionRecordedRequest.ChildExecution": true, + } +) + +var historyRoutingProtoExtension = func() protoreflect.ExtensionType { + ext, err := protoregistry.GlobalTypes.FindExtensionByName("temporal.server.api.historyservice.v1.routing") + if err != nil { + log.Fatalf("Error finding extension: %s", err) + } + return ext +}() + +func writeTemplatedCode(w io.Writer, service service, tmpl string) { + codegen.FatalIfErr(codegen.GenerateTemplateToWriter(tmpl, map[string]string{ + "ServiceName": service.name, + "ServicePackagePath": service.clientType.Elem().PkgPath(), + }, w)) +} + +func verifyFieldExists(t reflect.Type, path string) { + pathPrefix := t.String() + parts := strings.Split(path, ".") + for i, part := range parts { + if t.Kind() != reflect.Struct { + codegen.Fatalf("%s is not a struct", pathPrefix) + } + fieldName := codegen.SnakeCaseToPascalCase(part) + f, ok := t.FieldByName(fieldName) + if !ok { + codegen.Fatalf("%s has no field named %s", pathPrefix, fieldName) + } + if i == len(parts)-1 { + return + } + ft := f.Type + if ft.Kind() != reflect.Pointer { + codegen.Fatalf("%s.%s is not a struct pointer", pathPrefix, fieldName) + } + t = ft.Elem() + pathPrefix += "." + fieldName + } +} + +func findNestedField(t reflect.Type, name string, path string, maxDepth int) []fieldWithPath { + if t.Kind() != reflect.Struct || maxDepth <= 0 { + return nil + } + var out []fieldWithPath + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if ignoreField[t.Name()+"."+f.Name] { + continue + } + if f.Name == name { + out = append(out, fieldWithPath{field: &f, path: path + ".Get" + name + "()"}) + } + ft := f.Type + if ft.Kind() == reflect.Pointer { + out = append(out, findNestedField(ft.Elem(), name, path+".Get"+f.Name+"()", maxDepth-1)...) + } + } + return out +} + +func findOneNestedField(t reflect.Type, name string, path string, maxDepth int) fieldWithPath { + fields := findNestedField(t, name, path, maxDepth) + if len(fields) == 0 { + codegen.Fatalf("couldn't find %s in %s", name, t) + } else if len(fields) > 1 { + codegen.Fatalf("found more than one %s in %s (%v)", name, t, fields) + } + return fields[0] +} + +func tryFindOneNestedField(t reflect.Type, name string, path string, maxDepth int) fieldWithPath { + fields := findNestedField(t, name, path, maxDepth) + if len(fields) == 0 { + return fieldWithPath{} + } else if len(fields) > 1 { + codegen.Fatalf("found more than one %s in %s (%v)", name, t, fields) + } + return fields[0] +} + +func historyRoutingOptions(reqType reflect.Type) *historyservice.RoutingOptions { + t := reqType.Elem() // we know it's a pointer + + inst := reflect.New(t) + reflectable, ok := inst.Interface().(interface{ ProtoReflect() protoreflect.Message }) + if !ok { + log.Fatalf("Request has no ProtoReflect method %s", t) + } + opts := reflectable.ProtoReflect().Descriptor().Options() + + // Retrieve the value of the custom option + optionValue := proto.GetExtension(opts, historyRoutingProtoExtension) + if optionValue == nil { + log.Fatalf("Got nil while retrieving extension from options") + } + + routingOptions := optionValue.(*historyservice.RoutingOptions) + if routingOptions == nil { + log.Fatalf("Request has no routing options: %s", t) + } + return routingOptions +} + +func toGetter(snake string) string { + parts := strings.Split(snake, ".") + for i, part := range parts { + parts[i] = "Get" + codegen.SnakeCaseToPascalCase(part) + "()" + } + return "request." + strings.Join(parts, ".") +} + +func makeGetHistoryClient(reqType reflect.Type, routingOptions *historyservice.RoutingOptions) string { + t := reqType.Elem() // we know it's a pointer + + if routingOptions.AnyHost && routingOptions.ShardId != "" && routingOptions.WorkflowId != "" && routingOptions.TaskToken != "" && routingOptions.TaskInfos != "" && routingOptions.ChasmComponentRef != "" { + log.Fatalf("Found more than one routing directive in %s", t) + } + if routingOptions.AnyHost { + return "shardID := c.getRandomShard()" + } + if routingOptions.ShardId != "" { + verifyFieldExists(t, routingOptions.ShardId) + return "shardID := " + toGetter(routingOptions.ShardId) + } + if routingOptions.WorkflowId != "" { + namespaceIdField := routingOptions.NamespaceId + if namespaceIdField == "" { + namespaceIdField = "namespace_id" + } + verifyFieldExists(t, namespaceIdField) + verifyFieldExists(t, routingOptions.WorkflowId) + return fmt.Sprintf("shardID := c.shardIDFromWorkflowID(%s, %s)", toGetter(namespaceIdField), toGetter(routingOptions.WorkflowId)) + } + if routingOptions.TaskToken != "" { + namespaceIdField := routingOptions.NamespaceId + if namespaceIdField == "" { + namespaceIdField = "namespace_id" + } + + verifyFieldExists(t, namespaceIdField) + verifyFieldExists(t, routingOptions.TaskToken) + return fmt.Sprintf(`taskToken, err := c.tokenSerializer.Deserialize(%s) + if err != nil { + return nil, serviceerror.NewInvalidArgument("error deserializing task token") + } + var namespaceID string + var businessID string + if len(taskToken.GetComponentRef()) > 0 { + ref, err := c.tokenSerializer.DeserializeChasmComponentRef(taskToken.GetComponentRef()) + if err != nil { + return nil, err + } + namespaceID = ref.GetNamespaceId() + businessID = ref.GetBusinessId() + } else { + namespaceID = %s + businessID = taskToken.GetWorkflowId() + } + shardID := c.shardIDFromWorkflowID(namespaceID, businessID) + `, toGetter(routingOptions.TaskToken), toGetter(namespaceIdField)) + } + if routingOptions.ChasmComponentRef != "" { + verifyFieldExists(t, routingOptions.ChasmComponentRef) + return fmt.Sprintf(`ref, err := c.tokenSerializer.DeserializeChasmComponentRef(%s) + if err != nil { + return nil, serviceerror.NewInvalidArgument("error deserializing component ref") + } + shardID := c.shardIDFromWorkflowID(ref.GetNamespaceId(), ref.GetBusinessId()) + `, toGetter(routingOptions.ChasmComponentRef)) + } + if routingOptions.TaskInfos != "" { + verifyFieldExists(t, routingOptions.TaskInfos) + p := toGetter(routingOptions.TaskInfos) + // slice needs a tiny bit of extra handling for namespace + return fmt.Sprintf(`// All workflow IDs are in the same shard per request + if len(%s) == 0 { + return nil, serviceerror.NewInvalidArgument("missing TaskInfos") + } + shardID := c.shardIDFromWorkflowID(%s[0].NamespaceId, %s[0].WorkflowId)`, p, p, p) + } + + log.Fatalf("No routing directive specified on %s", t) + return "" +} + +func makeGetMatchingClient(reqType reflect.Type) string { + // this magically figures out how to get a MatchingServiceClient from a request + t := reqType.Elem() // we know it's a pointer + + var nsID, tqp, tq, tqt fieldWithPath + + switch t.Name() { + case "GetBuildIdTaskQueueMappingRequest": + // Pick a random node for this request, it's not associated with a specific task queue. + tq = fieldWithPath{path: "fmt.Sprintf(\"not-applicable-%d\", rand.Int())"} + tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED"} + nsID = findOneNestedField(t, "NamespaceId", "request", 1) + case "UpdateTaskQueueUserDataRequest", + "ReplicateTaskQueueUserDataRequest", + "RecordWorkerHeartbeatRequest", + "ListWorkersRequest", + "DescribeWorkerRequest": + // Always route these requests to the same matching node by namespace. + tq = fieldWithPath{path: "\"not-applicable\""} + tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED"} + nsID = findOneNestedField(t, "NamespaceId", "request", 1) + case "GetWorkerBuildIdCompatibilityRequest", + "UpdateWorkerBuildIdCompatibilityRequest", + "RespondQueryTaskCompletedRequest", + "ListTaskQueuePartitionsRequest", + "SyncDeploymentUserDataRequest", + "CheckTaskQueueUserDataPropagationRequest", + "ApplyTaskQueueUserDataReplicationEventRequest", + "GetWorkerVersioningRulesRequest", + "UpdateWorkerVersioningRulesRequest", + "UpdateFairnessStateRequest", + "UpdateTaskQueueConfigRequest": + tq = findOneNestedField(t, "TaskQueue", "request", 2) + tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_WORKFLOW"} + nsID = findOneNestedField(t, "NamespaceId", "request", 1) + case "DispatchNexusTaskRequest", + "PollNexusTaskQueueRequest", + "RespondNexusTaskCompletedRequest", + "RespondNexusTaskFailedRequest": + tq = findOneNestedField(t, "TaskQueue", "request", 2) + tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_NEXUS"} + nsID = findOneNestedField(t, "NamespaceId", "request", 1) + case "CreateNexusEndpointRequest", + "UpdateNexusEndpointRequest", + "ListNexusEndpointsRequest", + "DeleteNexusEndpointRequest": + // Always route these requests to the same matching node for all namespaces. + tq = fieldWithPath{path: `"not-applicable"`} + tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED"} + nsID = fieldWithPath{path: `"not-applicable"`} + default: + tqp = tryFindOneNestedField(t, "TaskQueuePartition", "request", 1) + tq = findOneNestedField(t, "TaskQueue", "request", 2) + tqt = findOneNestedField(t, "TaskQueueType", "request", 2) + nsID = findOneNestedField(t, "NamespaceId", "request", 1) + } + + if !nsID.found() { + codegen.Fatalf("I don't know how to get a client from a %s", t) + } + + if tqp.found() { + return fmt.Sprintf( + `p := tqid.PartitionFromPartitionProto(%s, %s) + + client, err := c.getClientForTaskQueuePartition(p)`, + tqp.path, nsID.path) + } + if tq.found() && tqt.found() { + partitionMaker := fmt.Sprintf("tqid.PartitionFromProto(%s, %s, %s)", tq.path, nsID.path, tqt.path) + // Some task queue fields are full messages, some are just strings + isTaskQueueMessage := tq.field != nil && tq.field.Type == reflect.TypeOf((*taskqueuepb.TaskQueue)(nil)) + if !isTaskQueueMessage { + partitionMaker = fmt.Sprintf("tqid.NormalPartitionFromRpcName(%s, %s, %s)", tq.path, nsID.path, tqt.path) + } + + return fmt.Sprintf( + `p, err := %s + if err != nil { + return nil, err + } + + client, err := c.getClientForTaskQueuePartition(p)`, + partitionMaker) + } + + panic("I don't know how to get a client from a " + t.String()) +} + +func writeTemplatedMethod(w io.Writer, service service, impl string, m reflect.Method, tmpl string) { + key := fmt.Sprintf("%s.%s.%s", impl, service.name, m.Name) + if ignoreMethod[key] { + return + } + + mt := m.Type // should look like: func(context.Context, request reqType, opts []grpc.CallOption) (respType, error) + if !mt.IsVariadic() || + mt.NumIn() != 3 || + mt.NumOut() != 2 || + mt.In(0).String() != "context.Context" || + mt.Out(1).String() != "error" { + panic(key + " doesn't look like a grpc handler method") + } + + reqType := mt.In(1) + respType := mt.Out(0) + + fields := map[string]string{ + "Method": m.Name, + "RequestType": reqType.String(), + "ResponseType": respType.String(), + "MetricPrefix": fmt.Sprintf("%s%sClient", strings.ToUpper(service.name[:1]), service.name[1:]), + "RetryPolicy": cmp.Or(longPollRetryPolicy[key], "policy"), + } + if longPollContext[key] { + fields["LongPoll"] = "LongPoll" + } + if largeTimeoutContext[key] { + fields["WithLargeTimeout"] = "WithLargeTimeout" + } + if impl == "client" { + if service.name == "history" { + routingOptions := historyRoutingOptions(reqType) + if routingOptions.Custom { + return + } + fields["GetClient"] = makeGetHistoryClient(reqType, routingOptions) + } else if service.name == "matching" { + fields["GetClient"] = makeGetMatchingClient(reqType) + } + } + + codegen.FatalIfErr(codegen.GenerateTemplateToWriter(tmpl, fields, w)) +} + +func writeTemplatedMethods(w io.Writer, service service, impl string, tmpl string) { + sType := service.clientType.Elem() + for n := 0; n < sType.NumMethod(); n++ { + writeTemplatedMethod(w, service, impl, sType.Method(n), tmpl) + } +} + +func generateFrontendOrAdminClient(w io.Writer, service service) error { + writeTemplatedCode(w, service, `// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. + +package {{.ServiceName}} + +import ( + "context" + + "{{.ServicePackagePath}}" + "google.golang.org/grpc" +) +`) + + writeTemplatedMethods(w, service, "client", ` +func (c *clientImpl) {{.Method}}( + ctx context.Context, + request {{.RequestType}}, + opts ...grpc.CallOption, +) ({{.ResponseType}}, error) { + ctx, cancel := c.create{{or .LongPoll ""}}Context{{or .WithLargeTimeout ""}}(ctx) + defer cancel() + return c.client.{{.Method}}(ctx, request, opts...) +} +`) + return nil +} + +func generateHistoryClient(w io.Writer, service service) error { + writeTemplatedCode(w, service, `// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. + +package {{.ServiceName}} + +import ( + "context" + + "go.temporal.io/api/serviceerror" + "{{.ServicePackagePath}}" + "google.golang.org/grpc" +) +`) + + writeTemplatedMethods(w, service, "client", ` +func (c *clientImpl) {{.Method}}( + ctx context.Context, + request {{.RequestType}}, + opts ...grpc.CallOption, +) ({{.ResponseType}}, error) { + {{.GetClient}} + var response {{.ResponseType}} + op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { + var err error + ctx, cancel := c.createContext(ctx) + defer cancel() + response, err = client.{{.Method}}(ctx, request, opts...) + return err + } + if err := c.executeWithRedirect(ctx, shardID, op); err != nil { + return nil, err + } + return response, nil +} +`) + // TODO: some methods call client.{{.Method}} directly and do not use executeWithRedirect. should we preserve this? + // GetDLQReplicationMessages + // GetDLQMessages + // PurgeDLQMessages + // MergeDLQMessages + + return nil +} + +func generateMatchingClient(w io.Writer, service service) error { + writeTemplatedCode(w, service, `// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. + +package {{.ServiceName}} + +import ( + "context" + "fmt" + "math/rand" + + enumspb "go.temporal.io/api/enums/v1" + "{{.ServicePackagePath}}" + "go.temporal.io/server/common/tqid" + "google.golang.org/grpc" +) +`) + + writeTemplatedMethods(w, service, "client", ` +func (c *clientImpl) {{.Method}}( + ctx context.Context, + request {{.RequestType}}, + opts ...grpc.CallOption, +) ({{.ResponseType}}, error) { + + {{.GetClient}} + if err != nil { + return nil, err + } + ctx, cancel := c.create{{or .LongPoll ""}}Context(ctx) + defer cancel() + return client.{{.Method}}(ctx, request, opts...) +} +`) + return nil +} + +func generateMetricClient(w io.Writer, service service) error { + writeTemplatedCode(w, service, `// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. + +package {{.ServiceName}} + +import ( + "context" + + "{{.ServicePackagePath}}" + "google.golang.org/grpc" +) +`) + + writeTemplatedMethods(w, service, "metricsClient", ` +func (c *metricClient) {{.Method}}( + ctx context.Context, + request {{.RequestType}}, + opts ...grpc.CallOption, +) (_ {{.ResponseType}}, retError error) { + + metricsHandler, startTime := c.startMetricsRecording(ctx, "{{.MetricPrefix}}{{.Method}}") + defer func() { + c.finishMetricsRecording(metricsHandler, startTime, retError) + }() + + return c.client.{{.Method}}(ctx, request, opts...) +} +`) + return nil +} + +func generateRetryableClient(w io.Writer, service service) error { + writeTemplatedCode(w, service, `// Code generated by cmd/tools/genrpcwrappers. DO NOT EDIT. + +package {{.ServiceName}} + +import ( + "context" + + "{{.ServicePackagePath}}" + "google.golang.org/grpc" + + "go.temporal.io/server/common/backoff" +) +`) + + writeTemplatedMethods(w, service, "retryableClient", ` +func (c *retryableClient) {{.Method}}( + ctx context.Context, + request {{.RequestType}}, + opts ...grpc.CallOption, +) ({{.ResponseType}}, error) { + var resp {{.ResponseType}} + op := func(ctx context.Context) error { + var err error + resp, err = c.client.{{.Method}}(ctx, request, opts...) + return err + } + err := backoff.ThrottleRetryContext(ctx, op, c.{{.RetryPolicy}}, c.isRetryable) + return resp, err +} +`) + return nil +} + +func main() { + serviceFlag := flag.String("service", "", "which service to generate rpc client wrappers for") + flag.Parse() + + i := slices.IndexFunc(services, func(s service) bool { return s.name == *serviceFlag }) + if i < 0 { + codegen.Fatalf("unknown service: %s", *serviceFlag) + } + svc := services[i] + + codegen.GenerateToFile(svc.clientGenerator, svc, "", "client") + codegen.GenerateToFile(generateMetricClient, svc, "", "metric_client") + codegen.GenerateToFile(generateRetryableClient, svc, "", "retryable_client") +} diff --git a/cmd/tools/gensearchattributehelpers/main.go b/cmd/tools/gensearchattributehelpers/main.go new file mode 100644 index 00000000000..1b386f64f57 --- /dev/null +++ b/cmd/tools/gensearchattributehelpers/main.go @@ -0,0 +1,62 @@ +package main + +import ( + _ "embed" + "flag" + "reflect" + "regexp" + + historypb "go.temporal.io/api/history/v1" + "go.temporal.io/server/cmd/tools/codegen" +) + +type ( + eventData struct { + AttributesTypeName string + } + + searchAttributesHelpersData struct { + Events []eventData + } +) + +var ( + //go:embed search_attribute_helpers.tmpl + searchAttributeHelpersTemplate string + + // Is used to find attribute getters and extract the event type (match[1]). + attributesGetterRegex = regexp.MustCompile("^Get(.+EventAttributes)$") +) + +func main() { + outPathFlag := flag.String("out", ".", "path to write generated files") + flag.Parse() + + codegen.GenerateTemplateToFile(searchAttributeHelpersTemplate, getSearchAttributesHelpersData(), *outPathFlag, "event") +} + +func getSearchAttributesHelpersData() searchAttributesHelpersData { + sahd := searchAttributesHelpersData{} + + historyEventT := reflect.TypeOf((*historypb.HistoryEvent)(nil)) + + for i := 0; i < historyEventT.NumMethod(); i++ { + attributesGetter := historyEventT.Method(i) + matches := attributesGetterRegex.FindStringSubmatch(attributesGetter.Name) + if len(matches) < 2 { + continue + } + if attributesGetter.Type.NumOut() != 1 { + continue + } + if _, found := attributesGetter.Type.Out(0).MethodByName("GetSearchAttributes"); !found { + continue + } + + ed := eventData{ + AttributesTypeName: matches[1], + } + sahd.Events = append(sahd.Events, ed) + } + return sahd +} diff --git a/cmd/tools/gensearchattributehelpers/search_attribute_helpers.tmpl b/cmd/tools/gensearchattributehelpers/search_attribute_helpers.tmpl new file mode 100644 index 00000000000..a8a8e4209ec --- /dev/null +++ b/cmd/tools/gensearchattributehelpers/search_attribute_helpers.tmpl @@ -0,0 +1,32 @@ +{{- /*gotype: go.temporal.io/server/cmd/tools/gensearchattributehelpers.searchAttributesHelpersData*/ -}} +// Code generated by cmd/tools/gensearchattributehelpers. DO NOT EDIT. + +package searchattribute + +import ( + commonpb "go.temporal.io/api/common/v1" + historypb "go.temporal.io/api/history/v1" +) + +func SetToEvent(event *historypb.HistoryEvent, sas *commonpb.SearchAttributes) bool { + switch e := event.GetAttributes().(type) { + {{- range .Events}} + case *historypb.HistoryEvent_{{.AttributesTypeName}}: + e.{{.AttributesTypeName}}.SearchAttributes = sas + return true + {{- end}} + default: + return false + } +} + +func GetFromEvent(event *historypb.HistoryEvent) (*commonpb.SearchAttributes, bool) { + switch e := event.GetAttributes().(type) { + {{- range .Events}} + case *historypb.HistoryEvent_{{.AttributesTypeName}}: + return e.{{.AttributesTypeName}}.GetSearchAttributes(), true + {{- end}} + default: + return nil, false + } +} diff --git a/cmd/tools/getproto/files.go b/cmd/tools/getproto/files.go new file mode 100644 index 00000000000..e333a06f297 --- /dev/null +++ b/cmd/tools/getproto/files.go @@ -0,0 +1,93 @@ +// Code generated by getproto. DO NOT EDIT. +// If you get build errors in this file, just delete it. It will be regenerated. + +package main + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + + activity "go.temporal.io/api/activity/v1" + batch "go.temporal.io/api/batch/v1" + callback "go.temporal.io/api/callback/v1" + command "go.temporal.io/api/command/v1" + common "go.temporal.io/api/common/v1" + compute "go.temporal.io/api/compute/v1" + deployment "go.temporal.io/api/deployment/v1" + enums "go.temporal.io/api/enums/v1" + failure "go.temporal.io/api/failure/v1" + filter "go.temporal.io/api/filter/v1" + history "go.temporal.io/api/history/v1" + namespace "go.temporal.io/api/namespace/v1" + nexus "go.temporal.io/api/nexus/v1" + protocol "go.temporal.io/api/protocol/v1" + query "go.temporal.io/api/query/v1" + replication "go.temporal.io/api/replication/v1" + rules "go.temporal.io/api/rules/v1" + schedule "go.temporal.io/api/schedule/v1" + sdk "go.temporal.io/api/sdk/v1" + taskqueue "go.temporal.io/api/taskqueue/v1" + update "go.temporal.io/api/update/v1" + version "go.temporal.io/api/version/v1" + worker "go.temporal.io/api/worker/v1" + workflow "go.temporal.io/api/workflow/v1" + workflowservice "go.temporal.io/api/workflowservice/v1" + anypb "google.golang.org/protobuf/types/known/anypb" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + wrapperspb "google.golang.org/protobuf/types/known/wrapperspb" +) + +func init() { + importMap = make(map[string]protoreflect.FileDescriptor) + importMap["google/protobuf/any.proto"] = anypb.File_google_protobuf_any_proto + importMap["google/protobuf/duration.proto"] = durationpb.File_google_protobuf_duration_proto + importMap["google/protobuf/empty.proto"] = emptypb.File_google_protobuf_empty_proto + importMap["google/protobuf/field_mask.proto"] = fieldmaskpb.File_google_protobuf_field_mask_proto + importMap["google/protobuf/timestamp.proto"] = timestamppb.File_google_protobuf_timestamp_proto + importMap["google/protobuf/wrappers.proto"] = wrapperspb.File_google_protobuf_wrappers_proto + importMap["temporal/api/activity/v1/message.proto"] = activity.File_temporal_api_activity_v1_message_proto + importMap["temporal/api/batch/v1/message.proto"] = batch.File_temporal_api_batch_v1_message_proto + importMap["temporal/api/callback/v1/message.proto"] = callback.File_temporal_api_callback_v1_message_proto + importMap["temporal/api/command/v1/message.proto"] = command.File_temporal_api_command_v1_message_proto + importMap["temporal/api/common/v1/message.proto"] = common.File_temporal_api_common_v1_message_proto + importMap["temporal/api/compute/v1/config.proto"] = compute.File_temporal_api_compute_v1_config_proto + importMap["temporal/api/compute/v1/provider.proto"] = compute.File_temporal_api_compute_v1_provider_proto + importMap["temporal/api/compute/v1/scaler.proto"] = compute.File_temporal_api_compute_v1_scaler_proto + importMap["temporal/api/deployment/v1/message.proto"] = deployment.File_temporal_api_deployment_v1_message_proto + importMap["temporal/api/enums/v1/activity.proto"] = enums.File_temporal_api_enums_v1_activity_proto + importMap["temporal/api/enums/v1/batch_operation.proto"] = enums.File_temporal_api_enums_v1_batch_operation_proto + importMap["temporal/api/enums/v1/command_type.proto"] = enums.File_temporal_api_enums_v1_command_type_proto + importMap["temporal/api/enums/v1/common.proto"] = enums.File_temporal_api_enums_v1_common_proto + importMap["temporal/api/enums/v1/deployment.proto"] = enums.File_temporal_api_enums_v1_deployment_proto + importMap["temporal/api/enums/v1/event_type.proto"] = enums.File_temporal_api_enums_v1_event_type_proto + importMap["temporal/api/enums/v1/failed_cause.proto"] = enums.File_temporal_api_enums_v1_failed_cause_proto + importMap["temporal/api/enums/v1/namespace.proto"] = enums.File_temporal_api_enums_v1_namespace_proto + importMap["temporal/api/enums/v1/nexus.proto"] = enums.File_temporal_api_enums_v1_nexus_proto + importMap["temporal/api/enums/v1/query.proto"] = enums.File_temporal_api_enums_v1_query_proto + importMap["temporal/api/enums/v1/reset.proto"] = enums.File_temporal_api_enums_v1_reset_proto + importMap["temporal/api/enums/v1/schedule.proto"] = enums.File_temporal_api_enums_v1_schedule_proto + importMap["temporal/api/enums/v1/task_queue.proto"] = enums.File_temporal_api_enums_v1_task_queue_proto + importMap["temporal/api/enums/v1/update.proto"] = enums.File_temporal_api_enums_v1_update_proto + importMap["temporal/api/enums/v1/workflow.proto"] = enums.File_temporal_api_enums_v1_workflow_proto + importMap["temporal/api/failure/v1/message.proto"] = failure.File_temporal_api_failure_v1_message_proto + importMap["temporal/api/filter/v1/message.proto"] = filter.File_temporal_api_filter_v1_message_proto + importMap["temporal/api/history/v1/message.proto"] = history.File_temporal_api_history_v1_message_proto + importMap["temporal/api/namespace/v1/message.proto"] = namespace.File_temporal_api_namespace_v1_message_proto + importMap["temporal/api/nexus/v1/message.proto"] = nexus.File_temporal_api_nexus_v1_message_proto + importMap["temporal/api/protocol/v1/message.proto"] = protocol.File_temporal_api_protocol_v1_message_proto + importMap["temporal/api/query/v1/message.proto"] = query.File_temporal_api_query_v1_message_proto + importMap["temporal/api/replication/v1/message.proto"] = replication.File_temporal_api_replication_v1_message_proto + importMap["temporal/api/rules/v1/message.proto"] = rules.File_temporal_api_rules_v1_message_proto + importMap["temporal/api/schedule/v1/message.proto"] = schedule.File_temporal_api_schedule_v1_message_proto + importMap["temporal/api/sdk/v1/task_complete_metadata.proto"] = sdk.File_temporal_api_sdk_v1_task_complete_metadata_proto + importMap["temporal/api/sdk/v1/user_metadata.proto"] = sdk.File_temporal_api_sdk_v1_user_metadata_proto + importMap["temporal/api/sdk/v1/worker_config.proto"] = sdk.File_temporal_api_sdk_v1_worker_config_proto + importMap["temporal/api/taskqueue/v1/message.proto"] = taskqueue.File_temporal_api_taskqueue_v1_message_proto + importMap["temporal/api/update/v1/message.proto"] = update.File_temporal_api_update_v1_message_proto + importMap["temporal/api/version/v1/message.proto"] = version.File_temporal_api_version_v1_message_proto + importMap["temporal/api/worker/v1/message.proto"] = worker.File_temporal_api_worker_v1_message_proto + importMap["temporal/api/workflow/v1/message.proto"] = workflow.File_temporal_api_workflow_v1_message_proto + importMap["temporal/api/workflowservice/v1/request_response.proto"] = workflowservice.File_temporal_api_workflowservice_v1_request_response_proto +} diff --git a/cmd/tools/getproto/main.go b/cmd/tools/getproto/main.go new file mode 100644 index 00000000000..f1e7113ca40 --- /dev/null +++ b/cmd/tools/getproto/main.go @@ -0,0 +1,184 @@ +package main + +import ( + "flag" + "fmt" + "io/fs" + "log" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + + expmaps "golang.org/x/exp/maps" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/types/descriptorpb" +) + +var ( + matchImport = regexp.MustCompile(`^\s*import\s+"([^"]+\.proto)"\s*;\s*$`) + versionSuffix = regexp.MustCompile(`^(.*)/v\d+$`) + + // set by files.go if present + importMap map[string]protoreflect.FileDescriptor +) + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} + +func findProtoImports() []string { + importMap := make(map[string]struct{}) + fatalIfErr(filepath.WalkDir("proto/internal", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.Type().IsRegular() && strings.HasSuffix(path, ".proto") { + protoFile, err := os.ReadFile(path) + fatalIfErr(err) + for line := range strings.SplitSeq(string(protoFile), "\n") { + if match := matchImport.FindStringSubmatch(line); len(match) > 0 { + i := match[1] + if strings.HasPrefix(i, "temporal/api/") || + strings.HasPrefix(i, "google/") { + importMap[i] = struct{}{} + } + } + } + } + return nil + })) + return expmaps.Keys(importMap) +} + +func getImportName(i string) string { + withoutV := i + if match := versionSuffix.FindStringSubmatch(i); match != nil { + withoutV = match[1] + } + return filepath.Base(withoutV) +} + +func mangle(p string) string { + mangled := strings.ReplaceAll(p, "/", "_") + return "File_" + strings.ReplaceAll(mangled, ".", "_") +} + +func genFileList(protoImports []string) { + sort.Strings(protoImports) + + goImportsMap := make(map[string]string) + protoToPackage := make(map[string]string) + + for _, i := range protoImports { + if strings.HasPrefix(i, "temporal/api/") { + goImport := filepath.Dir(strings.Replace(i, "temporal/api/", "go.temporal.io/api/", 1)) + importName := getImportName(goImport) + goImportsMap[goImport] = importName + protoToPackage[i] = importName + } else if strings.HasPrefix(i, "google/") { + base := strings.TrimSuffix(filepath.Base(i), ".proto") + "pb" + base = strings.ReplaceAll(base, "field_mask", "fieldmask") + goImport := "google.golang.org/protobuf/types/known/" + base + goImportsMap[goImport] = base + protoToPackage[i] = base + } + } + goImports := expmaps.Keys(goImportsMap) + sort.Strings(goImports) + + out, err := os.Create("cmd/tools/getproto/files.go") + fatalIfErr(err) + defer out.Close() + fmt.Fprintf(out, ` +// Code generated by getproto. DO NOT EDIT. +// If you get build errors in this file, just delete it. It will be regenerated. + +package main + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + +`) + for _, i := range goImports { + fmt.Fprintf(out, "\t%s %q\n", goImportsMap[i], i) + } + fmt.Fprintf(out, `) + +func init() { + importMap = make(map[string]protoreflect.FileDescriptor) +`) + for _, i := range protoImports { + fmt.Fprintf(out, "\timportMap[%q] = %s.%s\n", i, protoToPackage[i], mangle(i)) + } + out.WriteString("}\n") +} + +func addImports(missing []string) { + newImportMap := make(map[string]struct{}) + for i, _ := range importMap { + newImportMap[i] = struct{}{} + } + for _, i := range missing { + newImportMap[i] = struct{}{} + } + + genFileList(expmaps.Keys(newImportMap)) + fmt.Println("") + os.Exit(0) +} + +func initSeeds() { + genFileList(findProtoImports()) + fmt.Println("") + os.Exit(0) +} + +func checkImports(files map[string]protoreflect.FileDescriptor) { + missing := make(map[string]struct{}) + for _, fd := range files { + imports := fd.Imports() + num := imports.Len() + for i := range num { + imp := imports.Get(i).Path() + if strings.HasPrefix(imp, "temporal/api/") || strings.HasPrefix(imp, "google/") { + if _, ok := files[imp]; !ok { + missing[imp] = struct{}{} + } + } + } + } + if len(missing) > 0 { + addImports(expmaps.Keys(missing)) // doesn't return + } +} + +func main() { + out := flag.String("out", "", "where to put the serialized FileDescriptorSet") + flag.Parse() + + if *out == "" { + flag.Usage() + os.Exit(1) + } + + if len(importMap) == 0 { + initSeeds() // doesn't return + } + + checkImports(importMap) // doesn't return if any errors + + set := &descriptorpb.FileDescriptorSet{} + for _, fd := range importMap { + set.File = append(set.File, protodesc.ToFileDescriptorProto(fd)) + } + + b, err := proto.Marshal(set) + fatalIfErr(err) + fatalIfErr(os.WriteFile(*out, b, 0644)) +} diff --git a/cmd/tools/getproto/run.sh b/cmd/tools/getproto/run.sh new file mode 100755 index 00000000000..b5cb1efca89 --- /dev/null +++ b/cmd/tools/getproto/run.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +set -e + +# Run getproto (go) in a loop until it successfully resolves all imports +while :; do + out=$(go run ./cmd/tools/getproto "$@") + ret=$? + if [ "$out" != "" ]; then + exit $ret + fi +done diff --git a/cmd/tools/optimize-test-sharding/main.go b/cmd/tools/optimize-test-sharding/main.go new file mode 100644 index 00000000000..de1c765d348 --- /dev/null +++ b/cmd/tools/optimize-test-sharding/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + "os" + + optimizetestsharding "go.temporal.io/server/tools/optimize-test-sharding" +) + +func main() { + if err := optimizetestsharding.Main(); err != nil { + fmt.Fprintln(os.Stderr, "Error:", err) + os.Exit(1) + } +} diff --git a/cmd/tools/parallelize/main.go b/cmd/tools/parallelize/main.go new file mode 100644 index 00000000000..2dc661d68c2 --- /dev/null +++ b/cmd/tools/parallelize/main.go @@ -0,0 +1,15 @@ +package main + +import ( + "fmt" + "os" + + "go.temporal.io/server/tools/parallelize" +) + +func main() { + if err := parallelize.Main(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/cmd/tools/protoc-gen-go-chasm/main.go b/cmd/tools/protoc-gen-go-chasm/main.go new file mode 100644 index 00000000000..feb90ba1b2e --- /dev/null +++ b/cmd/tools/protoc-gen-go-chasm/main.go @@ -0,0 +1,318 @@ +package main + +import ( + "errors" + "fmt" + "io" + "slices" + "strings" + + routingspb "go.temporal.io/server/api/routing/v1" + "go.temporal.io/server/cmd/tools/codegen" + "google.golang.org/protobuf/compiler/protogen" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/descriptorpb" + "google.golang.org/protobuf/types/pluginpb" +) + +const generatedFilenameExtension = "_client.pb.go" + +type writer struct { + builder strings.Builder + indentation int +} + +func (w *writer) print(f string, args ...any) { + // Ignoring error as strings.Builder.WriteString never returns an error. + for i := 0; i < w.indentation; i++ { + _, _ = w.builder.WriteString("\t") + } + _, _ = fmt.Fprintf(&w.builder, f, args...) +} + +func (w *writer) println(f string, args ...any) { + w.print(f, args...) + _, _ = w.builder.WriteString("\n") +} + +func (w *writer) indent() { + w.indentation++ +} + +func (w *writer) unindent() { + if w.indentation <= 0 { + // nolint: forbidigo + panic("unmatched unindent") + } + w.indentation-- +} + +type Plugin struct { + *protogen.Plugin +} + +func New() *Plugin { + p := &Plugin{} + + return p +} + +func (p *Plugin) Run(plugin *protogen.Plugin) error { + plugin.SupportedFeatures = uint64(pluginpb.CodeGeneratorResponse_FEATURE_SUPPORTS_EDITIONS | pluginpb.CodeGeneratorResponse_FEATURE_PROTO3_OPTIONAL) + plugin.SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO3 + plugin.SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023 + p.Plugin = plugin + + for _, file := range plugin.Files { + if !file.Generate { + continue + } + if !strings.Contains(string(file.GoImportPath), "go.temporal.io/server/chasm/lib") { + continue + } + if len(file.Services) == 0 { + continue + } + // create the file + generatedFile := p.NewGeneratedFile(file.GeneratedFilenamePrefix+generatedFilenameExtension, file.GoImportPath) + + w := &writer{} + + w.println("// Code generated by protoc-gen-go-chasm. DO NOT EDIT.") + w.println("package %s", file.GoPackageName) + w.println("") + w.println("import (") + w.indent() + w.println(`"context"`) + w.println(`"time"`) + w.println("\n") + w.println(`"go.temporal.io/server/client/history"`) + w.println(`"go.temporal.io/server/common"`) + w.println(`"go.temporal.io/server/common/backoff"`) + w.println(`"go.temporal.io/server/common/config"`) + w.println(`"go.temporal.io/server/common/dynamicconfig"`) + w.println(`"go.temporal.io/server/common/headers"`) + w.println(`"go.temporal.io/server/common/log"`) + w.println(`"go.temporal.io/server/common/membership"`) + w.println(`"go.temporal.io/server/common/metrics"`) + w.println(`"google.golang.org/grpc"`) + w.unindent() + w.println(")") + + for _, svc := range file.Services { + if err := p.genClient(w, svc); err != nil { + return err + } + } + + if _, err := io.WriteString(generatedFile, w.builder.String()); err != nil { + return err + } + } + + return nil +} + +func genAssignShard(m *protogen.Method) (string, error) { + opts, err := routingOptions(m) + if err != nil { + return "", err + } + if opts == nil { + return "", fmt.Errorf("no routing directive specified on %s", m.Desc.FullName()) + } + if opts.Random && (opts.NamespaceId != "" || opts.BusinessId != "") { + return "", fmt.Errorf("random directive cannot be combined with namespace_id or business_id on %s", m.Desc.FullName()) + } + if opts.Random { + return "shardID := int32(rand.Intn(int(c.numShards)) + 1)", nil + } + if opts.BusinessId == "" { + return "", fmt.Errorf("business_id directive empty on %s", m.Desc.FullName()) + } + if opts.Random { + return "", fmt.Errorf("random directive cannot be combined with namespace_id or business_id on %s", m.Desc.FullName()) + } + + namespaceIDField := opts.NamespaceId + if namespaceIDField == "" { + namespaceIDField = "namespace_id" + } + + namespaceIDFieldGetter, err := goFieldPath(m, namespaceIDField) + if err != nil { + return "", fmt.Errorf("unable to resolve namespace_id field path %q: %w", namespaceIDField, err) + } + businessIDFieldGetter, err := goFieldPath(m, opts.BusinessId) + if err != nil { + return "", fmt.Errorf("unable to resolve business_id field path %q: %w", opts.BusinessId, err) + } + + return fmt.Sprintf("shardID := common.WorkflowIDToHistoryShard(request%s, request%s, c.numShards)", namespaceIDFieldGetter, businessIDFieldGetter), nil +} + +func goFieldPath(m *protogen.Method, path string) (string, error) { + parts := strings.Split(path, ".") + field := m.Input + goPath := "" + for _, part := range parts { + fieldName := codegen.SnakeCaseToPascalCase(part) + i := slices.IndexFunc(field.Fields, func(f *protogen.Field) bool { + return f.GoName == fieldName + }) + if i < 0 { + return "", fmt.Errorf("field %s not found in %s", part, field.Desc.FullName()) + } + field = field.Fields[i].Message + // Convert to getter form + goPath += "." + "Get" + fieldName + "()" + } + return goPath, nil +} + +func routingOptions(m *protogen.Method) (*routingspb.RoutingOptions, error) { + opts, ok := proto.GetExtension(m.Desc.Options(), routingspb.E_Routing).(*routingspb.RoutingOptions) + if !ok { + return nil, errors.New("no routing options extension found") + } + return opts, nil +} + +func (p *Plugin) genClient(w *writer, svc *protogen.Service) error { + structName := fmt.Sprintf("%sLayeredClient", svc.GoName) + w.println("// %s is a client for %s.", structName, svc.GoName) + w.println("type %s struct {", structName) + w.indent() + w.println("metricsHandler metrics.Handler") + w.println("numShards int32") + w.println("redirector history.Redirector[%sClient]", svc.GoName) + w.println("retryPolicy backoff.RetryPolicy") + w.unindent() + w.println("}") + + ctorName := fmt.Sprintf("New%s", structName) + w.println("// %s initializes a new %s.", ctorName, structName) + w.println("func %s(", ctorName) + w.indent() + w.println("dc *dynamicconfig.Collection,") + w.println("rpcFactory common.RPCFactory,") + w.println("monitor membership.Monitor,") + w.println("config *config.Persistence,") + w.println("logger log.Logger,") + w.println("metricsHandler metrics.Handler,") + w.unindent() + w.println(") (%sClient, error) {", svc.GoName) + w.indent() // start ctor body + w.println("resolver, err := monitor.GetResolver(primitives.HistoryService)") + w.println("if err != nil {") + w.indent() + w.println("return nil, err") + w.unindent() + w.println("}") + w.println("connections := history.NewConnectionPool(resolver, rpcFactory, New%sClient)", svc.GoName) + w.println("var redirector history.Redirector[%sClient]", svc.GoName) + w.println("if dynamicconfig.HistoryClientOwnershipCachingEnabled.Get(dc)() {") + w.indent() // start if + w.println("redirector = history.NewCachingRedirector(") + w.indent() // start args + w.println("connections,") + w.println("resolver,") + w.println("logger,") + w.println("dynamicconfig.HistoryClientOwnershipCachingStaleTTL.Get(dc),") + w.unindent() // close args + w.println(")") + w.unindent() // close if + w.println("} else {") + w.indent() // start else + w.println("redirector = history.NewBasicRedirector(connections, resolver)") + w.unindent() // close else + w.println("}") + w.println("return &%s{", structName) + w.indent() // start struct literal + w.println("metricsHandler: metricsHandler,") + w.println("redirector: redirector,") + w.println("numShards: config.NumHistoryShards,") + w.println("retryPolicy: common.CreateHistoryClientRetryPolicy(),") + w.unindent() // close struct literal + w.println("}, nil") + w.unindent() // close ctor body + w.println("}") + + for _, method := range svc.Methods { + w.println("func (c *%s) call%sNoRetry(", structName, method.GoName) + w.indent() + w.println("ctx context.Context,") + w.println("request *%s,", method.Input.GoIdent.GoName) + w.println("opts ...grpc.CallOption,") + w.unindent() + w.println(") (*%s, error) {", method.Output.GoIdent.GoName) + w.indent() + w.println("var response *%s", method.Output.GoIdent.GoName) + w.println("var err error") + w.println("startTime := time.Now().UTC()") + w.println("// the caller is a namespace, hence the tag below.") + w.println("caller := headers.GetCallerInfo(ctx).CallerName") + w.println("metricsHandler := c.metricsHandler.WithTags(") + w.indent() // start args + w.println(`metrics.OperationTag("%s.%s"),`, svc.GoName, method.GoName) + w.println("metrics.NamespaceTag(caller),") + w.println("metrics.ServiceRoleTag(metrics.HistoryRoleTagValue),") + w.unindent() // close args + w.println(")") + w.println("metrics.ClientRequests.With(metricsHandler).Record(1)") + w.println("defer func() {") + w.indent() // start defer + w.println("if err != nil {") + w.indent() // start if + w.println("metrics.ClientFailures.With(metricsHandler).Record(1, metrics.ServiceErrorTypeTag(err))") + w.unindent() // close if + w.println("}") + w.println("metrics.ClientLatency.With(metricsHandler).Record(time.Since(startTime))") + w.unindent() // close defer + w.println("}()") + assignShard, err := genAssignShard(method) + if err != nil { + return err + } + w.println("%s", assignShard) + w.println("op := func(ctx context.Context, client %sClient) error {", svc.GoName) + w.indent() + w.println("var err error") + w.println("ctx, cancel := context.WithTimeout(ctx, history.DefaultTimeout)") + w.println("defer cancel()") + w.println("response, err = client.%s(ctx, request, opts...)", method.GoName) + w.println("return err") + w.unindent() + w.println("}") + w.println("err = c.redirector.Execute(ctx, shardID, op)") + w.println("return response, err") + w.unindent() + w.println("}") + + w.println("func (c *%s) %s(", structName, method.GoName) + w.indent() + w.println("ctx context.Context,") + w.println("request *%s,", method.Input.GoIdent.GoName) + w.println("opts ...grpc.CallOption,") + w.unindent() + w.println(") (*%s, error) {", method.Output.GoIdent.GoName) + w.indent() + w.println("call := func(ctx context.Context) (*%s, error) {", method.Output.GoIdent.GoName) + w.indent() + w.println("return c.call%sNoRetry(ctx, request, opts...)", method.GoName) + w.unindent() + w.println("}") + w.println("return backoff.ThrottleRetryContextWithReturn(ctx, call, c.retryPolicy, common.IsServiceClientTransientError)") + w.unindent() + w.println("}") + } + return nil +} + +func main() { + p := New() + + opts := protogen.Options{} + + opts.Run(p.Run) +} diff --git a/cmd/tools/protogen/main.go b/cmd/tools/protogen/main.go new file mode 100644 index 00000000000..cf2c00b2e6e --- /dev/null +++ b/cmd/tools/protogen/main.go @@ -0,0 +1,477 @@ +package main + +import ( + "context" + "errors" + "flag" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + + "github.com/fatih/color" +) + +var cyan = color.New(color.FgHiCyan, color.Bold) + +func info(format string, args ...any) { + log.Println(cyan.Sprintf(format, args...)) +} + +func runCommand(ctx context.Context, command string, args ...string) error { + cmd := exec.CommandContext(ctx, command, args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func copyRecursive(src, dst string) error { + srcInfo, err := os.Stat(src) + if err != nil { + return fmt.Errorf("error stating source %s: %w", src, err) + } + + if !srcInfo.IsDir() { + return fmt.Errorf("source %s is not a directory", src) + } + + err = os.MkdirAll(dst, srcInfo.Mode()) + if err != nil { + return fmt.Errorf("error creating destination directory %s: %w", dst, err) + } + + entries, err := os.ReadDir(src) + if err != nil { + return fmt.Errorf("error reading source directory %s: %w", src, err) + } + + for _, entry := range entries { + srcPath := filepath.Join(src, entry.Name()) + dstPath := filepath.Join(dst, entry.Name()) + srcInfo, err := entry.Info() + if err != nil { + return fmt.Errorf("error getting info for %s: %w", srcPath, err) + } + + if entry.IsDir() { + if err := copyRecursive(srcPath, dstPath); err != nil { + return err + } + } else { + // Fix imports in the generated mock file + content, err := os.ReadFile(srcPath) + if err != nil { + return fmt.Errorf("error reading file %s: %w", srcPath, err) + } + + if err := os.WriteFile(dstPath, []byte(content), srcInfo.Mode()); err != nil { + return fmt.Errorf("error writing file %s: %w", dstPath, err) + } + } + } + + return nil +} + +// exists checks if a path exists +func exists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func existsDir(path string) bool { + stat, err := os.Stat(path) + return err == nil && stat.IsDir() +} + +// replaceInFile replaces text in a file using sed-like patterns +func replaceInFile(filePath, oldPattern, newPattern string) error { + content, err := os.ReadFile(filePath) + if err != nil { + return err + } + + // Convert sed pattern to Go regex + re, err := regexp.Compile(oldPattern) + if err != nil { + return err + } + + newContent := re.ReplaceAllString(string(content), newPattern) + return os.WriteFile(filePath, []byte(newContent), 0644) +} + +type generator struct { + rootDir string + tempOut string + tempProtoRoot string + protoOut string + protoBackup string + apiBinpb string + protoRoot string + protogenBin string + goimportsBin string + mockgenBin string + protocGenGoBin string + protocGenGoGrpcBin string + protocGenGoHelpersBin string + protocGenGoChasmBin string + chasmLibDirs []string +} + +func newGenerator() (*generator, error) { + var gen generator + flag.StringVar(&gen.protoOut, "proto-out", "", "Proto output directory (required)") + flag.StringVar(&gen.protoRoot, "proto-root", "", "Proto root directory (required)") + flag.StringVar(&gen.rootDir, "root", "", "Root directory (required)") + flag.StringVar(&gen.apiBinpb, "api-binpb", "", "Path to API binpb file (required)") + flag.StringVar(&gen.protogenBin, "protogen-bin", "", "Path to protogen binary (required)") + flag.StringVar(&gen.goimportsBin, "goimports-bin", "", "Path to goimports binary (required)") + flag.StringVar(&gen.mockgenBin, "mockgen-bin", "", "Path to mockgen binary (required)") + flag.StringVar(&gen.protocGenGoBin, "protoc-gen-go-bin", "", "Path to protoc-gen-go binary (required)") + flag.StringVar(&gen.protocGenGoGrpcBin, "protoc-gen-go-grpc-bin", "", "Path to protoc-gen-go-grpc binary (required)") + flag.StringVar(&gen.protocGenGoChasmBin, "protoc-gen-go-chasm-bin", "", "Path to protoc-gen-go-chasm binary (required)") + flag.StringVar(&gen.protocGenGoHelpersBin, "protoc-gen-go-helpers-bin", "", "Path to protoc-gen-go-helpers binary (required)") + flag.Parse() + + // Validate required flags + if gen.protoOut == "" || gen.protoRoot == "" || gen.rootDir == "" || gen.apiBinpb == "" || + gen.protogenBin == "" || gen.goimportsBin == "" || gen.mockgenBin == "" || + gen.protocGenGoBin == "" || gen.protocGenGoGrpcBin == "" || gen.protocGenGoHelpersBin == "" || + gen.protocGenGoChasmBin == "" { + flag.Usage() + return nil, errors.New("all flags are required") + } + + chasmDir := filepath.Join(gen.rootDir, "chasm", "lib") + + ls, err := os.ReadDir(chasmDir) + if err != nil { + return nil, fmt.Errorf("error reading chasm/lib directory %s: %w", chasmDir, err) + } + gen.chasmLibDirs = make([]string, 0, len(ls)) + for _, entry := range ls { + if !entry.IsDir() { + continue + } + gen.chasmLibDirs = append(gen.chasmLibDirs, filepath.Join(chasmDir, entry.Name())) + } + gen.tempOut = gen.protoOut + ".new" + gen.tempProtoRoot = gen.protoRoot + ".tmp" + gen.protoBackup = gen.protoOut + ".old" + return &gen, nil +} + +func (g *generator) removeExistingGenDirs() error { + for _, dir := range g.chasmLibDirs { + genDir := filepath.Join(dir, "gen") + if err := os.RemoveAll(genDir); err != nil { + return fmt.Errorf("error removing directory %s: %w", genDir, err) + } + } + return nil +} + +func (g *generator) backupProtos() error { + if exists(g.protoOut) { + if err := os.Rename(g.protoOut, g.protoBackup); err != nil { + return fmt.Errorf("error backing up proto output directory %s to %s: %w", g.protoOut, g.protoBackup, err) + } + } + return nil +} + +func (g *generator) prepareTempDirs() error { + // Remove and create new directory + if err := os.RemoveAll(g.tempOut); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing directory %s: %w", g.tempOut, err) + } + if err := os.MkdirAll(g.tempOut, 0755); err != nil { + return fmt.Errorf("error creating directory %s: %w", g.tempOut, err) + } + if err := os.RemoveAll(g.tempProtoRoot); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing directory %s: %w", g.tempOut, err) + } + if err := copyRecursive(g.protoRoot, g.tempProtoRoot); err != nil { + return fmt.Errorf("error copying proto root %s to %s: %w", g.protoRoot, g.tempProtoRoot, err) + } + return nil +} + +func (g *generator) copyChasmLibProtos() error { + for _, dir := range g.chasmLibDirs { + protoDir := filepath.Join(dir, "proto") + if !existsDir(protoDir) { + continue + } + destDir := filepath.Join(g.tempProtoRoot, "internal", "temporal", "server", "chasm", "lib", filepath.Base(dir)) + if err := os.MkdirAll(destDir, 0755); err != nil { + return fmt.Errorf("error creating directory %s: %w", destDir, err) + } + if err := copyRecursive(protoDir, filepath.Join(destDir, "proto")); err != nil { + return fmt.Errorf("error copying proto files from %s to %s: %w", protoDir, destDir, err) + } + protos, err := filepath.Glob(filepath.Join(destDir, "proto", "**", "*.proto")) + if err != nil { + return fmt.Errorf("error finding proto files in %s: %w", destDir, err) + } + for _, proto := range protos { + if err := replaceInFile(proto, `import "chasm`, `import "temporal/server/chasm`); err != nil { + return fmt.Errorf("error updating import path in %s: %w", proto, err) + } + } + } + return nil +} + +func (g *generator) runProtogen(ctx context.Context) error { + // Run protogen + protoArgs := []string{ + "--descriptor_set_in=" + g.apiBinpb, + "--root=" + filepath.Join(g.tempProtoRoot, "internal"), + "--rewrite-enum=BuildId_State:BuildId", + "--output=" + g.tempOut, + "-p", "plugin=protoc-gen-go=" + g.protocGenGoBin, + "-p", "plugin=protoc-gen-go-grpc=" + g.protocGenGoGrpcBin, + "-p", "plugin=protoc-gen-go-helpers=" + g.protocGenGoHelpersBin, + "-p", "plugin=protoc-gen-go-chasm=" + g.protocGenGoChasmBin, + "-p", "go-grpc_out=paths=source_relative:" + g.tempOut, + "-p", "go-helpers_out=paths=source_relative:" + g.tempOut, + "-p", "go-chasm_out=paths=source_relative:" + g.tempOut, + } + if err := runCommand(ctx, g.protogenBin, protoArgs...); err != nil { + return fmt.Errorf("error running protogen: %w", err) + } + return nil +} + +func (g *generator) runGoImports(ctx context.Context) error { + if err := runCommand(ctx, g.goimportsBin, "-w", g.tempOut); err != nil { + return fmt.Errorf("error running goimports: %w", err) + } + return nil +} + +func (g *generator) generateProtoMocks(ctx context.Context) error { + // Find service.pb.go and service_grpc.pb.go files + return filepath.Walk(g.tempOut, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + // Don't generate mocks for chasm lib files, it's not needed and broken at the moment. + if strings.HasPrefix(path, "api.new/temporal/server/chasm/lib") { + return nil + } + if strings.HasSuffix(path, "service.pb.go") || strings.HasSuffix(path, "service_grpc.pb.go") { + + // Convert service/ to servicemock/ and .go to .mock.go + dst := strings.ReplaceAll(path, "service/", "servicemock/") + dst = strings.ReplaceAll(dst, ".go", ".mock.go") + + // Get package name + dstDir := filepath.Dir(dst) + pkg := filepath.Base(filepath.Dir(dstDir)) + + // Create destination directory + if err := os.MkdirAll(dstDir, 0755); err != nil { + return fmt.Errorf("error creating directory %s: %v", dstDir, err) + } + + // Run mockgen + mockgenArgs := []string{ + "-package", pkg, + "-source", path, + "-destination", dst, + } + + if err := runCommand(ctx, g.mockgenBin, mockgenArgs...); err != nil { + return fmt.Errorf("error running mockgen: %v", err) + } + + // Fix imports in the generated mock file + content, err := os.ReadFile(dst) + if err != nil { + return fmt.Errorf("error reading file %s: %v", dst, err) + } + + // Replace the incorrect import path + newContent := strings.ReplaceAll(string(content), g.tempOut+"/temporal/server/", "") + + if err := os.WriteFile(dst, []byte(newContent), 0644); err != nil { + return fmt.Errorf("error writing file %s: %v", dst, err) + } + } + return nil + }) +} + +func (g *generator) modifyHistoryServiceFile() error { + historyServiceFile := filepath.Join(g.tempOut, "temporal", "server", "api", "historyservice", "v1", "service_grpc.pb.go") + + // Replace GetWorkflowExecutionHistory method signature + err := replaceInFile(historyServiceFile, + `GetWorkflowExecutionHistory\(context\.Context, \*GetWorkflowExecutionHistoryRequest\) \(\*GetWorkflowExecutionHistoryResponse, error\)`, + `GetWorkflowExecutionHistory(context.Context, *GetWorkflowExecutionHistoryRequest) (*GetWorkflowExecutionHistoryResponseWithRaw, error)`) + if err != nil { + return fmt.Errorf("error modifying GetWorkflowExecutionHistory: %w", err) + } + + // Replace RecordWorkflowTaskStarted method signature to return WithRawHistory response + err = replaceInFile(historyServiceFile, + `RecordWorkflowTaskStarted\(context\.Context, \*RecordWorkflowTaskStartedRequest\) \(\*RecordWorkflowTaskStartedResponse, error\)`, + `RecordWorkflowTaskStarted(context.Context, *RecordWorkflowTaskStartedRequest) (*RecordWorkflowTaskStartedResponseWithRawHistory, error)`) + if err != nil { + return fmt.Errorf("error modifying RecordWorkflowTaskStarted: %w", err) + } + + return nil +} + +func (g *generator) modifyMatchingServiceFile() error { + matchingServiceFile := filepath.Join(g.tempOut, "temporal", "server", "api", "matchingservice", "v1", "service_grpc.pb.go") + + // Replace PollWorkflowTaskQueue method signature to return WithRawHistory response + err := replaceInFile(matchingServiceFile, + `PollWorkflowTaskQueue\(context\.Context, \*PollWorkflowTaskQueueRequest\) \(\*PollWorkflowTaskQueueResponse, error\)`, + `PollWorkflowTaskQueue(context.Context, *PollWorkflowTaskQueueRequest) (*PollWorkflowTaskQueueResponseWithRawHistory, error)`) + if err != nil { + return fmt.Errorf("error modifying PollWorkflowTaskQueue: %w", err) + } + + return nil +} + +func (g *generator) moveProtoFiles() error { + sourceApiDir := filepath.Join(g.tempOut, "temporal", "server", "api") + return os.Rename(sourceApiDir, g.protoOut) +} + +func (g *generator) moveGeneratedChasmFiles() error { + sourceChasmDir := filepath.Join(g.tempOut, "temporal", "server", "chasm") + + return filepath.Walk(sourceChasmDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error walking source chasm directory %s: %w", sourceChasmDir, err) + } + + // Calculate relative path from sourceChasmDir + relPath, err := filepath.Rel(sourceChasmDir, path) + if err != nil { + return err + } + // Transform relPath to match the destination structure + // e.g., "lib//proto/v1/*" -> "lib//gen/pb/*" + re := regexp.MustCompile(`^lib/([^/]+)/proto/(v\d+)/(.*)$`) + parts := re.FindStringSubmatch(relPath) + if len(parts) < 4 { + return nil + } + dstPath := filepath.Join("chasm", "lib", parts[1], "gen", parts[1]+"pb", parts[2], parts[3]) + dirName := filepath.Dir(dstPath) // Ensure the destination path is a base path + + if err := os.MkdirAll(dirName, 0755); err != nil { + return fmt.Errorf("error creating directory %s: %w", dirName, err) + } + + if err := os.Rename(path, dstPath); err != nil { + return fmt.Errorf("error moving file %s to %s: %w", path, dstPath, err) + } + return nil + }) +} + +func (g *generator) cleanup(restoreOld bool) error { + // Remove temporary directories + if err := os.RemoveAll(g.tempProtoRoot); err != nil { + return fmt.Errorf("error removing temporary proto root %s: %w", g.tempProtoRoot, err) + } + if err := os.RemoveAll(g.tempOut); err != nil { + return fmt.Errorf("error removing temporary output directory %s: %w", g.tempOut, err) + } + if restoreOld { + // Restore the old proto output directory + if exists(g.protoBackup) { + if err := os.Rename(g.protoBackup, g.protoOut); err != nil { + return fmt.Errorf("error restoring proto output directory from %s to %s: %w", g.protoBackup, g.protoOut, err) + } + } + } else { + // Remove the old proto output directory if it exists + if err := os.RemoveAll(g.protoBackup); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing backup proto output directory %s: %w", g.protoBackup, err) + } + } + + return nil +} + +func main() { + ctx := context.Background() + gen, err := newGenerator() + if err != nil { + log.Fatalln("Error creating generator:", err) + } + + info("Removing existing CHASM gen directories...") + if err := gen.removeExistingGenDirs(); err != nil { + log.Fatalln("Error removing existing CHASM gen directories:", err) + } + + info("Backing up existing proto output directory...") + if err := gen.backupProtos(); err != nil { + log.Fatalln("Error backing up proto output directory:", err) + } + + genErr := generate(ctx, gen) + cleanupErr := gen.cleanup(genErr != nil) + if cleanupErr != nil { + cleanupErr = fmt.Errorf("error cleaning up after generation: %w", cleanupErr) + } + err = errors.Join(genErr, cleanupErr) + if err != nil { + log.Fatalln("Generation failed:", err) + } +} + +func generate(ctx context.Context, gen *generator) error { + info("Preparing temp directories...") + if err := gen.prepareTempDirs(); err != nil { + return fmt.Errorf("error preparing new gen directory: %w", err) + } + info("Copying CHASM lib protos...") + if err := gen.copyChasmLibProtos(); err != nil { + return fmt.Errorf("error copying CHASM lib protos: %w", err) + } + info("Running protoc for proto files...") + if err := gen.runProtogen(ctx); err != nil { + return fmt.Errorf("error running protogen: %w", err) + } + info("Running goimports for proto files...") + if err := gen.runGoImports(ctx); err != nil { + return fmt.Errorf("error running goimports: %w", err) + } + info("Generating proto mocks...") + if err := gen.generateProtoMocks(ctx); err != nil { + return fmt.Errorf("error generating mock files: %w", err) + } + info("Modifying history service server interface...") + if err := gen.modifyHistoryServiceFile(); err != nil { + return fmt.Errorf("error modifying history service file: %w", err) + } + info("Modifying matching service server interface...") + if err := gen.modifyMatchingServiceFile(); err != nil { + return fmt.Errorf("error modifying matching service file: %w", err) + } + info("Moving proto files into place...") + if err := gen.moveProtoFiles(); err != nil { + return fmt.Errorf("error moving proto files: %w", err) + } + info("Moving generated CHASM files into place...") + if err := gen.moveGeneratedChasmFiles(); err != nil { + return fmt.Errorf("error moving CHASM files: %w", err) + } + return nil +} diff --git a/cmd/tools/rpcwrappers/main.go b/cmd/tools/rpcwrappers/main.go deleted file mode 100644 index 9be8e818ed9..00000000000 --- a/cmd/tools/rpcwrappers/main.go +++ /dev/null @@ -1,536 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package main - -import ( - "flag" - "fmt" - "io" - "os" - "reflect" - "strings" - "text/template" - - "go.temporal.io/api/taskqueue/v1" - "go.temporal.io/api/workflowservice/v1" - "golang.org/x/exp/slices" - - "go.temporal.io/server/api/adminservice/v1" - "go.temporal.io/server/api/historyservice/v1" - "go.temporal.io/server/api/matchingservice/v1" -) - -type ( - service struct { - name string - clientType reflect.Type - clientGenerator func(io.Writer, service) - } - - fieldWithPath struct { - field *reflect.StructField - path string - } -) - -var ( - services = []service{ - { - name: "frontend", - clientType: reflect.TypeOf((*workflowservice.WorkflowServiceClient)(nil)), - clientGenerator: generateFrontendOrAdminClient, - }, - { - name: "admin", - clientType: reflect.TypeOf((*adminservice.AdminServiceClient)(nil)), - clientGenerator: generateFrontendOrAdminClient, - }, - { - name: "history", - clientType: reflect.TypeOf((*historyservice.HistoryServiceClient)(nil)), - clientGenerator: generateHistoryClient, - }, - { - name: "matching", - clientType: reflect.TypeOf((*matchingservice.MatchingServiceClient)(nil)), - clientGenerator: generateMatchingClient, - }, - } - - longPollContext = map[string]bool{ - "client.frontend.ListArchivedWorkflowExecutions": true, - "client.frontend.PollActivityTaskQueue": true, - "client.frontend.PollWorkflowTaskQueue": true, - "client.matching.GetTaskQueueUserData": true, - "client.matching.ListNexusIncomingServices": true, - } - largeTimeoutContext = map[string]bool{ - "client.admin.GetReplicationMessages": true, - } - ignoreMethod = map[string]bool{ - // TODO stream APIs are not supported. do not generate. - "client.admin.StreamWorkflowReplicationMessages": true, - "metricsClient.admin.StreamWorkflowReplicationMessages": true, - "retryableClient.admin.StreamWorkflowReplicationMessages": true, - "client.history.StreamWorkflowReplicationMessages": true, - "metricsClient.history.StreamWorkflowReplicationMessages": true, - "retryableClient.history.StreamWorkflowReplicationMessages": true, - - // these are non-standard implementations. do not generate. - "client.history.DescribeHistoryHost": true, - "client.history.GetReplicationMessages": true, - "client.history.GetReplicationStatus": true, - "client.history.GetDLQTasks": true, - "client.history.DeleteDLQTasks": true, - "client.history.ListQueues": true, - "client.history.ListTasks": true, - // these need to pick a partition. too complicated. - "client.matching.AddActivityTask": true, - "client.matching.AddWorkflowTask": true, - "client.matching.PollActivityTaskQueue": true, - "client.matching.PollWorkflowTaskQueue": true, - "client.matching.QueryWorkflow": true, - // these do forwarding stats. too complicated. - "metricsClient.matching.AddActivityTask": true, - "metricsClient.matching.AddWorkflowTask": true, - "metricsClient.matching.PollActivityTaskQueue": true, - "metricsClient.matching.PollWorkflowTaskQueue": true, - "metricsClient.matching.QueryWorkflow": true, - } - // Fields to ignore when looking for the routing fields in a request object. - ignoreField = map[string]bool{ - // this is the workflow that sent a signal - "SignalWorkflowExecutionRequest.ExternalWorkflowExecution": true, - // this is the workflow that sent a cancel request - "RequestCancelWorkflowExecutionRequest.ExternalWorkflowExecution": true, - // this is the workflow that sent a terminate - "TerminateWorkflowExecutionRequest.ExternalWorkflowExecution": true, - // this is the parent for starting a child workflow - "StartWorkflowExecutionRequest.ParentExecutionInfo": true, - // these get routed to the parent - "RecordChildExecutionCompletedRequest.ChildExecution": true, - "VerifyChildExecutionCompletionRecordedRequest.ChildExecution": true, - } -) - -func panicIfErr(err error) { - if err != nil { - panic(err) - } -} - -func writeTemplatedCode(w io.Writer, service service, text string) { - panicIfErr(template.Must(template.New("code").Parse(text)).Execute(w, map[string]string{ - "ServiceName": service.name, - "ServicePackagePath": service.clientType.Elem().PkgPath(), - })) -} - -func findNestedField(t reflect.Type, name string, path string, maxDepth int) []fieldWithPath { - if t.Kind() != reflect.Struct || maxDepth <= 0 { - return nil - } - var out []fieldWithPath - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if ignoreField[t.Name()+"."+f.Name] { - continue - } - if f.Name == name { - out = append(out, fieldWithPath{field: &f, path: path + ".Get" + name + "()"}) - } - ft := f.Type - if ft.Kind() == reflect.Pointer { - out = append(out, findNestedField(ft.Elem(), name, path+".Get"+f.Name+"()", maxDepth-1)...) - } - } - return out -} - -func findOneNestedField(t reflect.Type, name string, path string, maxDepth int) fieldWithPath { - fields := findNestedField(t, name, path, maxDepth) - if len(fields) == 0 { - panic(fmt.Sprintf("Couldn't find %s in %s", name, t)) - } else if len(fields) > 1 { - panic(fmt.Sprintf("Found more than one %s in %s (%v)", name, t, fields)) - } - return fields[0] -} - -func makeGetHistoryClient(reqType reflect.Type) string { - // this magically figures out how to get a HistoryServiceClient from a request - t := reqType.Elem() // we know it's a pointer - - shardIdField := findNestedField(t, "ShardId", "request", 1) - workflowIdField := findNestedField(t, "WorkflowId", "request", 4) - taskTokenField := findNestedField(t, "TaskToken", "request", 2) - taskInfosField := findNestedField(t, "TaskInfos", "request", 1) - - found := len(shardIdField) + len(workflowIdField) + len(taskTokenField) + len(taskInfosField) - if found < 1 { - panic(fmt.Sprintf("Found no routing fields in %s", t)) - } else if found > 1 { - panic(fmt.Sprintf("Found more than one routing field in %s (%v, %v, %v, %v)", - t, shardIdField, workflowIdField, taskTokenField, taskInfosField)) - } - - switch { - case len(shardIdField) == 1: - return fmt.Sprintf("shardID := %s", shardIdField[0].path) - case len(workflowIdField) == 1: - return fmt.Sprintf("shardID := c.shardIDFromWorkflowID(request.NamespaceId, %s)", workflowIdField[0].path) - case len(taskTokenField) == 1: - return fmt.Sprintf(`taskToken, err := c.tokenSerializer.Deserialize(%s) - if err != nil { - return nil, err - } - shardID := c.shardIDFromWorkflowID(request.NamespaceId, taskToken.GetWorkflowId()) -`, taskTokenField[0].path) - case len(taskInfosField) == 1: - p := taskInfosField[0].path - // slice needs a tiny bit of extra handling for namespace - return fmt.Sprintf(`// All workflow IDs are in the same shard per request - if len(%s) == 0 { - return nil, serviceerror.NewInvalidArgument("missing TaskInfos") - } - shardID := c.shardIDFromWorkflowID(%s[0].NamespaceId, %s[0].WorkflowId)`, p, p, p) - default: - panic("not reached") - } -} - -func makeGetMatchingClient(reqType reflect.Type) string { - // this magically figures out how to get a MatchingServiceClient from a request - t := reqType.Elem() // we know it's a pointer - - var nsID, tq, tqt fieldWithPath - - switch t.Name() { - case "GetBuildIdTaskQueueMappingRequest": - // Pick a random node for this request, it's not associated with a specific task queue. - tq = fieldWithPath{path: "&taskqueuepb.TaskQueue{Name: fmt.Sprintf(\"not-applicable-%d\", rand.Int())}"} - tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED"} - nsID = findOneNestedField(t, "NamespaceId", "request", 1) - case "UpdateTaskQueueUserDataRequest", - "ReplicateTaskQueueUserDataRequest": - // Always route these requests to the same matching node by namespace. - tq = fieldWithPath{path: "&taskqueuepb.TaskQueue{Name: \"not-applicable\"}"} - tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED"} - nsID = findOneNestedField(t, "NamespaceId", "request", 1) - case "GetWorkerBuildIdCompatibilityRequest", - "UpdateWorkerBuildIdCompatibilityRequest", - "RespondQueryTaskCompletedRequest", - "ListTaskQueuePartitionsRequest", - "ApplyTaskQueueUserDataReplicationEventRequest": - tq = findOneNestedField(t, "TaskQueue", "request", 2) - tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_WORKFLOW"} - nsID = findOneNestedField(t, "NamespaceId", "request", 1) - case "DispatchNexusTaskRequest", - "PollNexusTaskQueueRequest", - "RespondNexusTaskCompletedRequest", - "RespondNexusTaskFailedRequest": - tq = findOneNestedField(t, "TaskQueue", "request", 2) - tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_NEXUS"} - nsID = findOneNestedField(t, "NamespaceId", "request", 1) - case "CreateNexusIncomingServiceRequest", - "UpdateNexusIncomingServiceRequest", - "ListNexusIncomingServicesRequest", - "DeleteNexusIncomingServiceRequest": - // Always route these requests to the same matching node by namespace. - tq = fieldWithPath{path: "&taskqueuepb.TaskQueue{Name: \"not-applicable\"}"} - tqt = fieldWithPath{path: "enumspb.TASK_QUEUE_TYPE_UNSPECIFIED"} - nsID = fieldWithPath{path: `"not-applicable"`} - default: - tq = findOneNestedField(t, "TaskQueue", "request", 2) - tqt = findOneNestedField(t, "TaskQueueType", "request", 2) - nsID = findOneNestedField(t, "NamespaceId", "request", 1) - } - - if nsID.path != "" && tq.path != "" && tqt.path != "" { - if tq.field != nil { - // Some task queue fields are full messages, some are just strings - isTaskQueueMessage := tq.field.Type == reflect.TypeOf((*taskqueue.TaskQueue)(nil)) - if !isTaskQueueMessage { - tq.path = fmt.Sprintf("&taskqueuepb.TaskQueue{Name: %s}", tq.path) - } - } - return fmt.Sprintf("client, err := c.getClientForTaskqueue(%s, %s, %s)", nsID.path, tq.path, tqt.path) - } - - panic("I don't know how to get a client from a " + t.String()) -} - -func writeTemplatedMethod(w io.Writer, service service, impl string, m reflect.Method, text string) { - key := fmt.Sprintf("%s.%s.%s", impl, service.name, m.Name) - if ignoreMethod[key] { - return - } - - mt := m.Type // should look like: func(context.Context, request reqType, opts []grpc.CallOption) (respType, error) - if !mt.IsVariadic() || - mt.NumIn() != 3 || - mt.NumOut() != 2 || - mt.In(0).String() != "context.Context" || - mt.Out(1).String() != "error" { - panic(m.Name + " doesn't look like a grpc handler method") - } - - reqType := mt.In(1) - respType := mt.Out(0) - - fields := map[string]string{ - "Method": m.Name, - "RequestType": reqType.String(), - "ResponseType": respType.String(), - "MetricPrefix": fmt.Sprintf("%s%sClient", strings.ToUpper(service.name[:1]), service.name[1:]), - } - if longPollContext[key] { - fields["LongPoll"] = "LongPoll" - } - if largeTimeoutContext[key] { - fields["WithLargeTimeout"] = "WithLargeTimeout" - } - if impl == "client" { - if service.name == "history" { - fields["GetClient"] = makeGetHistoryClient(reqType) - } else if service.name == "matching" { - fields["GetClient"] = makeGetMatchingClient(reqType) - } - } - - panicIfErr(template.Must(template.New("code").Parse(text)).Execute(w, fields)) -} - -func writeTemplatedMethods(w io.Writer, service service, impl string, text string) { - sType := service.clientType.Elem() - for n := 0; n < sType.NumMethod(); n++ { - writeTemplatedMethod(w, service, impl, sType.Method(n), text) - } -} - -func generateFrontendOrAdminClient(w io.Writer, service service) { - writeTemplatedCode(w, service, ` -package {{.ServiceName}} - -import ( - "context" - - "{{.ServicePackagePath}}" - "google.golang.org/grpc" -) -`) - - writeTemplatedMethods(w, service, "client", ` -func (c *clientImpl) {{.Method}}( - ctx context.Context, - request {{.RequestType}}, - opts ...grpc.CallOption, -) ({{.ResponseType}}, error) { - ctx, cancel := c.create{{or .LongPoll ""}}Context{{or .WithLargeTimeout ""}}(ctx) - defer cancel() - return c.client.{{.Method}}(ctx, request, opts...) -} -`) -} - -func generateHistoryClient(w io.Writer, service service) { - writeTemplatedCode(w, service, ` -package {{.ServiceName}} - -import ( - "context" - - "go.temporal.io/api/serviceerror" - "{{.ServicePackagePath}}" - "google.golang.org/grpc" -) -`) - - writeTemplatedMethods(w, service, "client", ` -func (c *clientImpl) {{.Method}}( - ctx context.Context, - request {{.RequestType}}, - opts ...grpc.CallOption, -) ({{.ResponseType}}, error) { - {{.GetClient}} - var response {{.ResponseType}} - op := func(ctx context.Context, client historyservice.HistoryServiceClient) error { - var err error - ctx, cancel := c.createContext(ctx) - defer cancel() - response, err = client.{{.Method}}(ctx, request, opts...) - return err - } - if err := c.executeWithRedirect(ctx, shardID, op); err != nil { - return nil, err - } - return response, nil -} -`) - // TODO: some methods call client.{{.Method}} directly and do not use executeWithRedirect. should we preserve this? - // GetDLQReplicationMessages - // GetDLQMessages - // PurgeDLQMessages - // MergeDLQMessages -} - -func generateMatchingClient(w io.Writer, service service) { - writeTemplatedCode(w, service, ` -package {{.ServiceName}} - -import ( - "context" - "fmt" - "math/rand" - - enumspb "go.temporal.io/api/enums/v1" - taskqueuepb "go.temporal.io/api/taskqueue/v1" - "{{.ServicePackagePath}}" - "google.golang.org/grpc" -) -`) - - writeTemplatedMethods(w, service, "client", ` -func (c *clientImpl) {{.Method}}( - ctx context.Context, - request {{.RequestType}}, - opts ...grpc.CallOption, -) ({{.ResponseType}}, error) { - - {{.GetClient}} - if err != nil { - return nil, err - } - ctx, cancel := c.create{{or .LongPoll ""}}Context(ctx) - defer cancel() - return client.{{.Method}}(ctx, request, opts...) -} -`) -} - -func generateMetricClient(w io.Writer, service service) { - writeTemplatedCode(w, service, ` -package {{.ServiceName}} - -import ( - "context" - - "{{.ServicePackagePath}}" - "google.golang.org/grpc" -) -`) - - writeTemplatedMethods(w, service, "metricsClient", ` -func (c *metricClient) {{.Method}}( - ctx context.Context, - request {{.RequestType}}, - opts ...grpc.CallOption, -) (_ {{.ResponseType}}, retError error) { - - metricsHandler, startTime := c.startMetricsRecording(ctx, "{{.MetricPrefix}}{{.Method}}") - defer func() { - c.finishMetricsRecording(metricsHandler, startTime, retError) - }() - - return c.client.{{.Method}}(ctx, request, opts...) -} -`) -} - -func generateRetryableClient(w io.Writer, service service) { - writeTemplatedCode(w, service, ` -package {{.ServiceName}} - -import ( - "context" - - "{{.ServicePackagePath}}" - "google.golang.org/grpc" - - "go.temporal.io/server/common/backoff" -) -`) - - writeTemplatedMethods(w, service, "retryableClient", ` -func (c *retryableClient) {{.Method}}( - ctx context.Context, - request {{.RequestType}}, - opts ...grpc.CallOption, -) ({{.ResponseType}}, error) { - var resp {{.ResponseType}} - op := func(ctx context.Context) error { - var err error - resp, err = c.client.{{.Method}}(ctx, request, opts...) - return err - } - err := backoff.ThrottleRetryContext(ctx, op, c.policy, c.isRetryable) - return resp, err -} -`) -} - -func callWithFile(f func(io.Writer, service), service service, filename string, licenseText string) { - w, err := os.Create(filename + "_gen.go") - if err != nil { - panic(err) - } - defer func() { - panicIfErr(w.Close()) - }() - if _, err := fmt.Fprintf(w, "%s\n// Code generated by cmd/tools/rpcwrappers. DO NOT EDIT.\n", licenseText); err != nil { - panic(err) - } - f(w, service) -} - -func readLicenseFile(path string) string { - text, err := os.ReadFile(path) - if err != nil { - panic(err) - } - var lines []string - for _, line := range strings.Split(string(text), "\n") { - lines = append(lines, strings.TrimRight("// "+line, " ")) - } - return strings.Join(lines, "\n") + "\n" -} - -func main() { - serviceFlag := flag.String("service", "", "which service to generate rpc client wrappers for") - licenseFlag := flag.String("licence_file", "../../LICENSE", "path to license to copy into header") - flag.Parse() - - i := slices.IndexFunc(services, func(s service) bool { return s.name == *serviceFlag }) - if i < 0 { - panic("unknown service") - } - svc := services[i] - - licenseText := readLicenseFile(*licenseFlag) - - callWithFile(svc.clientGenerator, svc, "client", licenseText) - callWithFile(generateMetricClient, svc, "metric_client", licenseText) - callWithFile(generateRetryableClient, svc, "retryable_client", licenseText) -} diff --git a/cmd/tools/sql/main.go b/cmd/tools/sql/main.go index 032671985d2..0856bf84859 100644 --- a/cmd/tools/sql/main.go +++ b/cmd/tools/sql/main.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package main import ( diff --git a/cmd/tools/tdbg/main.go b/cmd/tools/tdbg/main.go index e8660bfbb4a..f24c53f1323 100644 --- a/cmd/tools/tdbg/main.go +++ b/cmd/tools/tdbg/main.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package main import ( diff --git a/cmd/tools/test-runner/main.go b/cmd/tools/test-runner/main.go new file mode 100644 index 00000000000..68e51529f09 --- /dev/null +++ b/cmd/tools/test-runner/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "go.temporal.io/server/tools/testrunner" +) + +func main() { + testrunner.Main() +} diff --git a/cmd/tools/test/find_altered_tests.go b/cmd/tools/test/find_altered_tests.go new file mode 100644 index 00000000000..600f5f9870b --- /dev/null +++ b/cmd/tools/test/find_altered_tests.go @@ -0,0 +1,167 @@ +package main + +import ( + "flag" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" +) + +// find_altered_tests.go +// +// This program identifies altered or added test suite names based on the provided categories. +// It accepts the following inputs via command-line flags: +// +// - Category (-c): The category of tests to find (e.g., unit, integration, functional, functional_ndc). +// - Source Git Reference (-s): The source Git reference (commit SHA, branch, etc.). +// - Target Git Reference (-t): The target Git reference (commit SHA, branch, etc.). +// +// The program outputs environment variable assignments in the format `key=value`, +// where each key corresponds to a modified test suite category, and the value is a +// pipe-separated list of altered test suites. +// +// Usage: +// go run find_altered_tests.go -c -c ... -s -t + +// CategoryDirs maps test categories to their corresponding directories +// If you update these, please also update the corresponding dirs in the Makefile +// FUNCTIONAL_TEST_ROOT +// FUNCTIONAL_TEST_XDC_ROOT +// FUNCTIONAL_TEST_NDC_ROOT +// INTEGRATION_TEST_DIRS +// UNIT_TEST_DIRS +var CategoryDirs = map[string][]string{ + "unit": {"./client", "./common", "./internal", "./service", "./temporal", "./tools", "./cmd"}, + "integration": {"./common/persistence/tests", "./tools/tests", "./temporaltest"}, + "functional": {"./tests"}, + "functional_ndc": {"./tests/ndc"}, + "functional_xdc": {"./tests/xdc"}, +} + +func main() { + var categories multiFlag + var sourceRef string + var targetRef string + + flag.Var(&categories, "c", "Category of altered tests to find (can specify multiple)") + flag.StringVar(&sourceRef, "s", "", "Source Git reference (commit SHA, branch, etc.)") + flag.StringVar(&targetRef, "t", "", "Target Git reference (commit SHA, branch, etc.)") + flag.Parse() + + if len(categories) == 0 || sourceRef == "" || targetRef == "" { + log.Fatalf("Usage: find_altered_tests -c -c ... -s -t ") + } + + uniqCategories := make(map[string]struct{}) + for _, category := range categories { + if _, exists := CategoryDirs[category]; !exists { + log.Fatalf("Unknown category: %s", category) + } + uniqCategories[category] = struct{}{} + } + + modifiedFiles, err := getModifiedTestFiles(sourceRef, targetRef) + if err != nil { + log.Fatalf("Error getting modified test files: %v", err) + } + + for category := range uniqCategories { + dirs := CategoryDirs[category] + suites, err := findAlteredTestSuites(modifiedFiles, dirs) + if err != nil { + log.Fatalf("Error finding altered test suites for category %s: %v", category, err) + } + + // Join suites and output the result directly + joinedSuites := strings.Join(suites, "|") + fmt.Printf("modified_%s_test_suites=%s\n", category, joinedSuites) + } +} + +// multiFlag allows multiple instances of a flag +type multiFlag []string + +func (m *multiFlag) String() string { + return strings.Join(*m, ",") +} + +func (m *multiFlag) Set(value string) error { + *m = append(*m, value) + return nil +} + +// getModifiedTestFiles runs 'git diff' to find modified '_test.go' files between two references +func getModifiedTestFiles(sourceRef, targetRef string) ([]string, error) { + cmd := exec.Command("git", "diff", "--name-only", sourceRef, targetRef) + output, err := cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return nil, fmt.Errorf("git diff error: %w", exitErr) + } + return nil, fmt.Errorf("error running git diff: %w", err) + } + lines := strings.Split(strings.TrimSpace(string(output)), "\n") + var testFiles []string + for _, line := range lines { + if strings.HasSuffix(line, "_test.go") { + testFiles = append(testFiles, line) + } + } + return testFiles, nil +} + +// findAlteredTestSuites filters files by the test directories and extracts test suite names from them +func findAlteredTestSuites(files []string, testDirs []string) ([]string, error) { + var testSuites []string + suiteSet := make(map[string]struct{}) + + // to be detected, _test.go file must contain TestXXXXSuite function + testSuiteRegex := regexp.MustCompile(`func\s+(Test[a-zA-Z0-9_]*Suite)\s*\(`) + + for _, file := range files { + if _, err := os.Stat(file); err != nil { + continue + } + + filePath := filepath.Clean(file) + + if !isInAnyPath(filePath, testDirs) { + continue + } + + content, err := os.ReadFile(file) + if err != nil { + return nil, err + } + + // Find test suite names in the file + matches := testSuiteRegex.FindAllStringSubmatch(string(content), -1) + for _, match := range matches { + suiteName := match[1] + if _, exists := suiteSet[suiteName]; !exists { + suiteSet[suiteName] = struct{}{} + testSuites = append(testSuites, suiteName) + } + } + } + + return testSuites, nil +} + +// isInAnyPath checks if the file is within any of the specified paths +func isInAnyPath(file string, paths []string) bool { + for _, path := range paths { + cleanPath := filepath.Clean(path) + if !strings.HasSuffix(cleanPath, string(os.PathSeparator)) { + cleanPath += string(os.PathSeparator) + } + if strings.HasPrefix(file, cleanPath) { + return true + } + } + return false +} diff --git a/common/aggregate/bench_test.go b/common/aggregate/bench_test.go index 3500b789e29..1c0a3bbf7e7 100644 --- a/common/aggregate/bench_test.go +++ b/common/aggregate/bench_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package aggregate import ( diff --git a/common/aggregate/moving_window_average.go b/common/aggregate/moving_window_average.go index ad5d93cfc0c..3338346dc56 100644 --- a/common/aggregate/moving_window_average.go +++ b/common/aggregate/moving_window_average.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package aggregate import ( diff --git a/common/aggregate/noop_moving_window_average.go b/common/aggregate/noop_moving_window_average.go index 060c3d922e3..e4966bf53d7 100644 --- a/common/aggregate/noop_moving_window_average.go +++ b/common/aggregate/noop_moving_window_average.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package aggregate var NoopMovingWindowAverage MovingWindowAverage = newNoopMovingWindowAverage() diff --git a/common/api/metadata.go b/common/api/metadata.go index ef3fe4c03dd..bb584d38000 100644 --- a/common/api/metadata.go +++ b/common/api/metadata.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package api import "strings" @@ -36,11 +12,16 @@ type ( // against roles in claims. Access int32 + // Describes if the method supports long-polled requests. + Polling int32 + MethodMetadata struct { // Describes the scope of a method (whole cluster or inividual namespace). Scope Scope // Describes what level of access is needed for a method (advisory). Access Access + // Describes if long polling is supported by the method. + Polling Polling } ) @@ -64,98 +45,171 @@ const ( AccessAdmin ) +const ( + // Represents a missing Polling value. + PollingUnknown Polling = iota + // Method isn't capable of long-polling. + PollingNone + // Method can optionally return long-polled responses. + PollingCapable + // Method responses are always long-polled. + PollingAlways +) + const ( WorkflowServicePrefix = "/temporal.api.workflowservice.v1.WorkflowService/" OperatorServicePrefix = "/temporal.api.operatorservice.v1.OperatorService/" + HistoryServicePrefix = "/temporal.server.api.historyservice.v1.HistoryService/" AdminServicePrefix = "/temporal.server.api.adminservice.v1.AdminService/" + MatchingServicePrefix = "/temporal.server.api.matchingservice.v1.MatchingService/" // Technically not a gRPC service, but still using this format for metadata. NexusServicePrefix = "/temporal.api.nexusservice.v1.NexusService/" ) var ( workflowServiceMetadata = map[string]MethodMetadata{ - "RegisterNamespace": {Scope: ScopeNamespace, Access: AccessAdmin}, - "DescribeNamespace": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "ListNamespaces": {Scope: ScopeCluster, Access: AccessReadOnly}, - "UpdateNamespace": {Scope: ScopeNamespace, Access: AccessAdmin}, - "DeprecateNamespace": {Scope: ScopeNamespace, Access: AccessAdmin}, - "StartWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite}, - "GetWorkflowExecutionHistory": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "GetWorkflowExecutionHistoryReverse": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "PollWorkflowTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondWorkflowTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondWorkflowTaskFailed": {Scope: ScopeNamespace, Access: AccessWrite}, - "PollActivityTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite}, - "RecordActivityTaskHeartbeat": {Scope: ScopeNamespace, Access: AccessWrite}, - "RecordActivityTaskHeartbeatById": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondActivityTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondActivityTaskCompletedById": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondActivityTaskFailed": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondActivityTaskFailedById": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondActivityTaskCanceled": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondActivityTaskCanceledById": {Scope: ScopeNamespace, Access: AccessWrite}, - "PollNexusTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondNexusTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite}, - "RespondNexusTaskFailed": {Scope: ScopeNamespace, Access: AccessWrite}, - "RequestCancelWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite}, - "SignalWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite}, - "SignalWithStartWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite}, - "ResetWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite}, - "TerminateWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite}, - "DeleteWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite}, - "ListOpenWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "ListClosedWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "ListWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "ListArchivedWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "ScanWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "CountWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "GetSearchAttributes": {Scope: ScopeCluster, Access: AccessReadOnly}, - "RespondQueryTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite}, - "ResetStickyTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite}, - "QueryWorkflow": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "DescribeWorkflowExecution": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "DescribeTaskQueue": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "GetClusterInfo": {Scope: ScopeCluster, Access: AccessReadOnly}, - "GetSystemInfo": {Scope: ScopeCluster, Access: AccessReadOnly}, - "ListTaskQueuePartitions": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "CreateSchedule": {Scope: ScopeNamespace, Access: AccessWrite}, - "DescribeSchedule": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "UpdateSchedule": {Scope: ScopeNamespace, Access: AccessWrite}, - "PatchSchedule": {Scope: ScopeNamespace, Access: AccessWrite}, - "ListScheduleMatchingTimes": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "DeleteSchedule": {Scope: ScopeNamespace, Access: AccessWrite}, - "ListSchedules": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "UpdateWorkerBuildIdCompatibility": {Scope: ScopeNamespace, Access: AccessWrite}, - "GetWorkerBuildIdCompatibility": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "GetWorkerTaskReachability": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "UpdateWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite}, - "PollWorkflowExecutionUpdate": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "StartBatchOperation": {Scope: ScopeNamespace, Access: AccessWrite}, - "StopBatchOperation": {Scope: ScopeNamespace, Access: AccessWrite}, - "DescribeBatchOperation": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "ListBatchOperations": {Scope: ScopeNamespace, Access: AccessReadOnly}, + "RegisterNamespace": {Scope: ScopeNamespace, Access: AccessAdmin, Polling: PollingNone}, + "DescribeNamespace": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "ListNamespaces": {Scope: ScopeCluster, Access: AccessReadOnly, Polling: PollingNone}, + "UpdateNamespace": {Scope: ScopeNamespace, Access: AccessAdmin, Polling: PollingNone}, + "DeprecateNamespace": {Scope: ScopeNamespace, Access: AccessAdmin, Polling: PollingNone}, + "StartWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "GetWorkflowExecutionHistory": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingCapable}, + "GetWorkflowExecutionHistoryReverse": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "PollWorkflowTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingAlways}, + "RespondWorkflowTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RespondWorkflowTaskFailed": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "PollActivityTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingAlways}, + "RecordActivityTaskHeartbeat": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RecordActivityTaskHeartbeatById": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RespondActivityTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RespondActivityTaskCompletedById": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RespondActivityTaskFailed": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RespondActivityTaskFailedById": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RespondActivityTaskCanceled": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RespondActivityTaskCanceledById": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "CountActivityExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "CountNexusOperationExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DeleteActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DeleteNexusOperationExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DescribeActivityExecution": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingCapable}, + "DescribeNexusOperationExecution": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingCapable}, + "PollActivityExecution": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingAlways}, + "PollNexusOperationExecution": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingAlways}, + "ListActivityExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "ListNexusOperationExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "RequestCancelActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RequestCancelNexusOperationExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "StartActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "StartNexusOperationExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "TerminateActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "TerminateNexusOperationExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "PollNexusTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingAlways}, + "RespondNexusTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RespondNexusTaskFailed": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RequestCancelWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "SignalWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "SignalWithStartWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ResetWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "TerminateWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DeleteWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ListOpenWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "ListClosedWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "ListWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "ListArchivedWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "ScanWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "CountWorkflowExecutions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "GetSearchAttributes": {Scope: ScopeCluster, Access: AccessReadOnly, Polling: PollingNone}, + "RespondQueryTaskCompleted": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ResetStickyTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ShutdownWorker": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ExecuteMultiOperation": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "QueryWorkflow": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DescribeWorkflowExecution": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DescribeTaskQueue": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "GetClusterInfo": {Scope: ScopeCluster, Access: AccessReadOnly, Polling: PollingNone}, + "GetSystemInfo": {Scope: ScopeCluster, Access: AccessReadOnly, Polling: PollingNone}, + "ListTaskQueuePartitions": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "CreateSchedule": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DescribeSchedule": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "UpdateSchedule": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "PatchSchedule": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ListScheduleMatchingTimes": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DeleteSchedule": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ListSchedules": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "CountSchedules": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "UpdateWorkerBuildIdCompatibility": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "GetWorkerBuildIdCompatibility": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "UpdateWorkerVersioningRules": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "GetWorkerVersioningRules": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "GetWorkerTaskReachability": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "UpdateWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "PollWorkflowExecutionUpdate": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingAlways}, + "StartBatchOperation": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "StopBatchOperation": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DescribeBatchOperation": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "ListBatchOperations": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "UpdateActivityOptions": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "PauseActivity": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "UnpauseActivity": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ResetActivity": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "UpdateActivityExecutionOptions": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "PauseActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "UnpauseActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ResetActivityExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "UpdateWorkflowExecutionOptions": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DescribeDeployment": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, // [cleanup-wv-pre-release] + "ListDeployments": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, // [cleanup-wv-pre-release] + "GetDeploymentReachability": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, // [cleanup-wv-pre-release] + "GetCurrentDeployment": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, // [cleanup-wv-pre-release] + "SetCurrentDeployment": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, // [cleanup-wv-pre-release] + "DescribeWorkerDeploymentVersion": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DescribeWorkerDeployment": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "SetWorkerDeploymentCurrentVersion": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "SetWorkerDeploymentRampingVersion": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "SetWorkerDeploymentManager": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "CreateWorkerDeployment": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DeleteWorkerDeployment": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "CreateWorkerDeploymentVersion": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "UpdateWorkerDeploymentVersionComputeConfig": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ValidateWorkerDeploymentVersionComputeConfig": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DeleteWorkerDeploymentVersion": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "UpdateWorkerDeploymentVersionMetadata": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ListWorkerDeployments": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "CreateWorkflowRule": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DescribeWorkflowRule": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DeleteWorkflowRule": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ListWorkflowRules": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "TriggerWorkflowRule": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "RecordWorkerHeartbeat": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "ListWorkers": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DescribeWorker": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "UpdateTaskQueueConfig": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "FetchWorkerConfig": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "UpdateWorkerConfig": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "PauseWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "UnpauseWorkflowExecution": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, } operatorServiceMetadata = map[string]MethodMetadata{ - "AddSearchAttributes": {Scope: ScopeNamespace, Access: AccessAdmin}, - "RemoveSearchAttributes": {Scope: ScopeNamespace, Access: AccessAdmin}, - "ListSearchAttributes": {Scope: ScopeNamespace, Access: AccessReadOnly}, - "DeleteNamespace": {Scope: ScopeNamespace, Access: AccessAdmin}, - "AddOrUpdateRemoteCluster": {Scope: ScopeCluster, Access: AccessAdmin}, - "RemoveRemoteCluster": {Scope: ScopeCluster, Access: AccessAdmin}, - "ListClusters": {Scope: ScopeCluster, Access: AccessAdmin}, - "CreateNexusIncomingService": {Scope: ScopeCluster, Access: AccessAdmin}, - "UpdateNexusIncomingService": {Scope: ScopeCluster, Access: AccessAdmin}, - "DeleteNexusIncomingService": {Scope: ScopeCluster, Access: AccessAdmin}, - "GetNexusIncomingService": {Scope: ScopeCluster, Access: AccessAdmin}, - "ListNexusIncomingServices": {Scope: ScopeCluster, Access: AccessAdmin}, - "GetNexusOutgoingService": {Scope: ScopeNamespace, Access: AccessAdmin}, - "CreateNexusOutgoingService": {Scope: ScopeNamespace, Access: AccessAdmin}, - "UpdateNexusOutgoingService": {Scope: ScopeNamespace, Access: AccessAdmin}, - "DeleteNexusOutgoingService": {Scope: ScopeNamespace, Access: AccessAdmin}, - "ListNexusOutgoingServices": {Scope: ScopeNamespace, Access: AccessAdmin}, + "AddSearchAttributes": {Scope: ScopeNamespace, Access: AccessAdmin, Polling: PollingNone}, + "RemoveSearchAttributes": {Scope: ScopeNamespace, Access: AccessAdmin, Polling: PollingNone}, + "ListSearchAttributes": {Scope: ScopeNamespace, Access: AccessReadOnly, Polling: PollingNone}, + "DeleteNamespace": {Scope: ScopeNamespace, Access: AccessAdmin, Polling: PollingNone}, + "AddOrUpdateRemoteCluster": {Scope: ScopeCluster, Access: AccessAdmin, Polling: PollingNone}, + "RemoveRemoteCluster": {Scope: ScopeCluster, Access: AccessAdmin, Polling: PollingNone}, + "ListClusters": {Scope: ScopeCluster, Access: AccessAdmin, Polling: PollingNone}, + "CreateNexusEndpoint": {Scope: ScopeCluster, Access: AccessAdmin, Polling: PollingNone}, + "UpdateNexusEndpoint": {Scope: ScopeCluster, Access: AccessAdmin, Polling: PollingNone}, + "DeleteNexusEndpoint": {Scope: ScopeCluster, Access: AccessAdmin, Polling: PollingNone}, + "GetNexusEndpoint": {Scope: ScopeCluster, Access: AccessAdmin, Polling: PollingNone}, + "ListNexusEndpoints": {Scope: ScopeCluster, Access: AccessAdmin, Polling: PollingNone}, } nexusServiceMetadata = map[string]MethodMetadata{ - "DispatchNexusTask": {Scope: ScopeNamespace, Access: AccessWrite}, + "DispatchNexusTask": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DispatchByNamespaceAndTaskQueue": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "DispatchByEndpoint": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "CompleteNexusOperation": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, + "CompleteNexusOperationChasm": {Scope: ScopeNamespace, Access: AccessWrite, Polling: PollingNone}, } ) @@ -176,7 +230,7 @@ func GetMethodMetadata(fullApiName string) MethodMetadata { } } -// BaseName returns just the method name from a fullly qualified name. +// MethodName returns just the method name from a fully qualified name. func MethodName(fullApiName string) string { index := strings.LastIndex(fullApiName, "/") if index > -1 { @@ -184,3 +238,11 @@ func MethodName(fullApiName string) string { } return fullApiName } + +func ServiceName(fullApiName string) string { + index := strings.LastIndex(fullApiName, "/") + if index > -1 { + return fullApiName[:index+1] + } + return "" +} diff --git a/common/api/metadata_test.go b/common/api/metadata_test.go index de0f408ddda..2861a504470 100644 --- a/common/api/metadata_test.go +++ b/common/api/metadata_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package api import ( @@ -33,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "go.temporal.io/api/operatorservice/v1" "go.temporal.io/api/workflowservice/v1" - "golang.org/x/exp/maps" + expmaps "golang.org/x/exp/maps" ) var publicMethodRgx = regexp.MustCompile("^[A-Z]") @@ -50,7 +26,7 @@ func TestOperatorServiceMetadata(t *testing.T) { func checkService(t *testing.T, tp reflect.Type, m map[string]MethodMetadata) { methods := getMethodNames(tp) - require.ElementsMatch(t, methods, maps.Keys(m), + require.ElementsMatch(t, methods, expmaps.Keys(m), "If you're adding a new method to Workflow/OperatorService, please add metadata for it in metadata.go") for _, method := range methods { @@ -125,3 +101,10 @@ func getMethodNames(tp reflect.Type) []string { } return out } + +func TestServiceName(t *testing.T) { + assert.Equal(t, WorkflowServicePrefix, ServiceName(WorkflowServicePrefix+"SomeAPI")) + assert.Equal(t, AdminServicePrefix, ServiceName(AdminServicePrefix+"SomeAPI")) + assert.Equal(t, "", ServiceName("SomeAPI")) + assert.Equal(t, "", ServiceName("")) +} diff --git a/common/archiver/README.md b/common/archiver/README.md index 925a14327e7..be0a6fdea14 100644 --- a/common/archiver/README.md +++ b/common/archiver/README.md @@ -2,14 +2,22 @@ This README explains how to add new Archiver implementations. -## Steps +There are two approaches: + +1. **Built-in implementation** — add the archiver directly to this repository (e.g., `filestore`, `gcloud`, `s3store`). **We are not currently accepting contributions for new built-in archiver implementations.** Maintaining a growing set of built-in implementations places an ongoing maintenance burden on the team, so new implementations should use Option 2 instead. + +2. **Custom implementation via server option** — implement the archiver in an external package and inject it into the server at startup using `WithCustomHistoryArchiverFactory` / `WithCustomVisibilityArchiverFactory`. This is the recommended approach for all new archiver implementations. + +--- + +## Option 1: Built-in implementation (in-repo) **Step 1: Create a new package for your implementation** Create a new directory in the `archiver` folder. The structure should look like the following: ``` ./common/archiver - - filestore/ -- Filestore implementation + - filestore/ -- Filestore implementation - provider/ - provider.go -- Provider of archiver instances - yourImplementation/ @@ -28,17 +36,17 @@ type HistoryArchiver interface { // the resource that histories should be archived into. The implementor gets to determine how to interpret the URI. // The Archive method may or may not be automatically retried by the caller. The ArchiveOptions are used // to interact with these retries including giving the implementor the ability to cancel retries and record progress - // between retry attempts. + // between retry attempts. // This method will be invoked after a workflow passes its retention period. // It's possible that this method will be invoked for one workflow multiple times and potentially concurrently, // implementation should correctly handle the workflow not exist case and return nil error. Archive(context.Context, URI, *ArchiveHistoryRequest, ...ArchiveOption) error - + // Get is used to access an archived history. When context expires method should stop trying to fetch history. // The URI identifies the resource from which history should be accessed and it is up to the implementor to interpret this URI. // This method should thrift errors - see filestore as an example. Get(context.Context, URI, *GetHistoryRequest) (*GetHistoryResponse, error) - + // ValidateURI is used to define what a valid URI for an implementation is. ValidateURI(URI) error } @@ -48,17 +56,17 @@ type HistoryArchiver interface { ```go type VisibilityArchiver interface { - // Archive is used to archive one workflow visibility record. - // Check the Archive() method of the HistoryArchiver interface in Step 2 for parameters' meaning and requirements. - // The only difference is that the ArchiveOption parameter won't include an option for recording process. - // Please make sure your implementation is lossless. If any in-memory batching mechanism is used, then those batched records will be lost during server restarts. + // Archive is used to archive one workflow visibility record. + // Check the Archive() method of the HistoryArchiver interface in Step 2 for parameters' meaning and requirements. + // The only difference is that the ArchiveOption parameter won't include an option for recording process. + // Please make sure your implementation is lossless. If any in-memory batching mechanism is used, then those batched records will be lost during server restarts. // This method will be invoked when workflow closes. Note that because of conflict resolution, it is possible for a workflow to through the closing process multiple times, which means that this method can be invoked more than once after a workflow closes. Archive(context.Context, URI, *ArchiveVisibilityRequest, ...ArchiveOption) error - - // Query is used to retrieve archived visibility records. + + // Query is used to retrieve archived visibility records. // Check the Get() method of the HistoryArchiver interface in Step 2 for parameters' meaning and requirements. - // The request includes a string field called query, which describes what kind of visibility records should be returned. For example, it can be some SQL-like syntax query string. - // Your implementation is responsible for parsing and validating the query, and also returning all visibility records that match the query. + // The request includes a string field called query, which describes what kind of visibility records should be returned. For example, it can be some SQL-like syntax query string. + // Your implementation is responsible for parsing and validating the query, and also returning all visibility records that match the query. // Currently the maximum context timeout passed into the method is 3 minutes, so it's ok if this method takes a long time to run. Query(context.Context, URI, *QueryVisibilityRequest) (*QueryVisibilityResponse, error) @@ -69,21 +77,135 @@ type VisibilityArchiver interface { **Step 4: Update provider to provide access to your implementation** -Modify the `./provider/provider.go` file so that the `ArchiverProvider` knows how to create an instance of your archiver. -Also, add configs for you archiver to static yaml config files and modify the `HistoryArchiverProvider` +Modify the `./provider/provider.go` file so that the `ArchiverProvider` knows how to create an instance of your archiver. +Also, add configs for you archiver to static yaml config files and modify the `HistoryArchiverProvider` and `VisibilityArchiverProvider` struct in the `../common/service/config.go` accordingly. +--- + +## Option 2: Custom implementation via server option (external package) + +This approach lets you define archiver implementations in your own codebase and inject them into the Temporal server at startup, without modifying the server source. + +**Step 1: Implement the HistoryArchiver and VisibilityArchiver interfaces** + +Same interfaces as Steps 2 and 3 above. + +**Step 2: Implement CustomHistoryArchiverFactory and/or CustomVisibilityArchiverFactory** + +```go +// CustomHistoryArchiverFactory constructs a history archiver for a given URI scheme. +// Return provider.ErrUnknownScheme to fall back to the built-in implementation for that scheme. +// If a non-nil archiver is returned, it takes precedence over built-in archiver implementations. +type CustomHistoryArchiverFactory interface { + NewCustomHistoryArchiver(provider.NewCustomHistoryArchiverParams) (archiver.HistoryArchiver, error) +} + +// CustomVisibilityArchiverFactory constructs a visibility archiver for a given URI scheme. +// Return provider.ErrUnknownScheme to fall back to the built-in implementation for that scheme. +// If a non-nil archiver is returned, it takes precedence over built-in archiver implementations. +type CustomVisibilityArchiverFactory interface { + NewCustomVisibilityArchiver(provider.NewCustomVisibilityArchiverParams) (archiver.VisibilityArchiver, error) +} +``` + +The params structs provide everything your factory needs to construct an archiver: + +```go +type NewCustomHistoryArchiverParams struct { + Scheme string + ExecutionManager persistence.ExecutionManager + Logger log.Logger + MetricsHandler metrics.Handler + Configs map[string]any // from archival.history.provider.customStores. in config yaml +} + +type NewCustomVisibilityArchiverParams struct { + Scheme string + Logger log.Logger + MetricsHandler metrics.Handler + Configs map[string]any // from archival.visibility.provider.customStores. in config yaml +} +``` + +Example factory implementation using the functional adapter types: + +```go +historyFactory := provider.CustomHistoryArchiverFactoryFunc(func(params provider.NewCustomHistoryArchiverParams) (archiver.HistoryArchiver, error) { + if params.Scheme != "myscheme" { + return nil, provider.ErrUnknownScheme + } + return mypackage.NewHistoryArchiver(params.ExecutionManager, params.Logger, params.MetricsHandler, params.Configs) +}) + +visibilityFactory := provider.CustomVisibilityArchiverFactoryFunc(func(params provider.NewCustomVisibilityArchiverParams) (archiver.VisibilityArchiver, error) { + if params.Scheme != "myscheme" { + return nil, provider.ErrUnknownScheme + } + return mypackage.NewVisibilityArchiver(params.Logger, params.MetricsHandler, params.Configs) +}) +``` + +**Step 3: Register the factories with the server** + +Pass the factories as server options when constructing the Temporal server: + +```go +s, err := temporal.NewServer( + temporal.WithConfig(cfg), + temporal.WithCustomHistoryArchiverFactory(historyFactory), + temporal.WithCustomVisibilityArchiverFactory(visibilityFactory), + // ... other options +) +``` + +**Step 4: Configure archival in your YAML config** + +Enable archival and configure the URI scheme for your implementation. Use `customStores` to pass arbitrary config key-values to your factory: + +```yaml +archival: + history: + state: "enabled" + enableRead: true + provider: + customStores: + myscheme: # must match the scheme in your URIs + endpoint: "https://my-storage.example.com" + bucketName: "temporal-history" + visibility: + state: "enabled" + enableRead: true + provider: + customStores: + myscheme: + endpoint: "https://my-storage.example.com" + bucketName: "temporal-visibility" + +namespaceDefaults: + archival: + history: + state: "enabled" + URI: "myscheme://temporal-history" + visibility: + state: "enabled" + URI: "myscheme://temporal-visibility" +``` + +The `customStores.` map is passed as `Configs` in the params to your factory. Built-in schemes (`filestore`, `gstorage`, `s3store`) continue to use their own config sections unless your factory handles them (see FAQ below). + +--- ## FAQ **If my Archive method can automatically be retried by caller how can I record and access progress between retries?** -ArchiverOptions is used to handle this. The following shows and example: +ArchiverOptions is used to handle this. The following shows and example: ```go func (a *Archiver) Archive( - ctx context.Context, - URI string, - request *ArchiveRequest, - opts ...ArchiveOption, + ctx context.Context, + URI string, + request *ArchiveRequest, + opts ...ArchiveOption, ) error { featureCatalog := GetFeatureCatalog(opts...) // this function is defined in options.go @@ -101,7 +223,7 @@ func (a *Archiver) Archive( // Record current progress if featureCatalog.ProgressManager != nil { if err := featureCatalog.ProgressManager.RecordProgress(ctx, progress); err != nil { - // log some error message and return error if needed. + // log some error message and return error if needed. } } } @@ -111,17 +233,17 @@ func (a *Archiver) Archive( ```go func (a *Archiver) Archive( - ctx context.Context, - URI string, - request *ArchiveRequest, - opts ...ArchiveOption, + ctx context.Context, + URI string, + request *ArchiveRequest, + opts ...ArchiveOption, ) error { featureCatalog := GetFeatureCatalog(opts...) // this function is defined in options.go err := youArchiverImpl() if nonRetryableErr(err) { if featureCatalog.NonRetryableError != nil { - return featureCatalog.NonRetryableError() // when the caller gets this error type back it will not retry anymore. + return featureCatalog.NonRetryableError() // when the caller gets this error type back it will not retry anymore. } } } @@ -129,12 +251,17 @@ func (a *Archiver) Archive( **How does my history archiver implementation read history?** -The `archiver` package provides a utility class called `HistoryIterator` which is a wrapper of `ExecutionManager`. -Its usage is simpler than the `ExecutionManager` given in the `BootstrapContainer`, -so archiver implementations can choose to use it when reading workflow histories. -See the `historyIterator.go` file for more details. +The `archiver` package provides a utility class called `HistoryIterator` which is a wrapper of `ExecutionManager`. +Its usage is simpler than `ExecutionManager`, so archiver implementations can choose to use it when reading workflow histories. +See the `historyIterator.go` file for more details. Sample usage can be found in the filestore historyArchiver implementation. +**Can a custom factory override a built-in scheme like `filestore`?** + +Yes. The custom factory is always consulted first. If your factory returns a non-nil archiver for a scheme that is also built-in (e.g., `filestore`), your implementation takes precedence and the built-in one is never used. Only return `ErrUnknownScheme` for schemes you want to delegate to the built-in implementations. + +Note that when overriding a built-in scheme, the `Configs` field in the params is populated from `customStores.` — not from the built-in config section (e.g., `filestore:`). If you need those config values, read them from `customStores` instead. + **Should my archiver define all its own error types?** Each archiver is free to define and return any errors it wants. However many common errors which diff --git a/common/archiver/archival_metadata.go b/common/archiver/archival_metadata.go index e072db61193..0761b61ea91 100644 --- a/common/archiver/archival_metadata.go +++ b/common/archiver/archival_metadata.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination archival_metadata_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination archival_metadata_mock.go package archiver @@ -31,9 +7,7 @@ import ( "strings" enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/server/common/config" - "go.temporal.io/server/common/dynamicconfig" ) @@ -97,16 +71,16 @@ func NewArchivalMetadata( ) ArchivalMetadata { historyConfig := NewArchivalConfig( historyState, - dc.GetStringProperty(dynamicconfig.HistoryArchivalState, historyState), - dc.GetBoolProperty(dynamicconfig.EnableReadFromHistoryArchival, historyReadEnabled), + dynamicconfig.HistoryArchivalState.WithDefault(historyState).Get(dc), + dynamicconfig.EnableReadFromHistoryArchival.WithDefault(historyReadEnabled).Get(dc), namespaceDefaults.History.State, namespaceDefaults.History.URI, ) visibilityConfig := NewArchivalConfig( visibilityState, - dc.GetStringProperty(dynamicconfig.VisibilityArchivalState, visibilityState), - dc.GetBoolProperty(dynamicconfig.EnableReadFromVisibilityArchival, visibilityReadEnabled), + dynamicconfig.VisibilityArchivalState.WithDefault(visibilityState).Get(dc), + dynamicconfig.EnableReadFromVisibilityArchival.WithDefault(visibilityReadEnabled).Get(dc), namespaceDefaults.Visibility.State, namespaceDefaults.Visibility.URI, ) diff --git a/common/archiver/archival_metadata_mock.go b/common/archiver/archival_metadata_mock.go index 09954cbae26..45c2e883806 100644 --- a/common/archiver/archival_metadata_mock.go +++ b/common/archiver/archival_metadata_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: archival_metadata.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package archiver -source archival_metadata.go -destination archival_metadata_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: archival_metadata.go // Package archiver is a generated GoMock package. package archiver @@ -31,14 +12,15 @@ package archiver import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" - v1 "go.temporal.io/api/enums/v1" + enums "go.temporal.io/api/enums/v1" + gomock "go.uber.org/mock/gomock" ) // MockArchivalMetadata is a mock of ArchivalMetadata interface. type MockArchivalMetadata struct { ctrl *gomock.Controller recorder *MockArchivalMetadataMockRecorder + isgomock struct{} } // MockArchivalMetadataMockRecorder is the mock recorder for MockArchivalMetadata. @@ -90,6 +72,7 @@ func (mr *MockArchivalMetadataMockRecorder) GetVisibilityConfig() *gomock.Call { type MockArchivalConfig struct { ctrl *gomock.Controller recorder *MockArchivalConfigMockRecorder + isgomock struct{} } // MockArchivalConfigMockRecorder is the mock recorder for MockArchivalConfig. @@ -138,10 +121,10 @@ func (mr *MockArchivalConfigMockRecorder) GetClusterState() *gomock.Call { } // GetNamespaceDefaultState mocks base method. -func (m *MockArchivalConfig) GetNamespaceDefaultState() v1.ArchivalState { +func (m *MockArchivalConfig) GetNamespaceDefaultState() enums.ArchivalState { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetNamespaceDefaultState") - ret0, _ := ret[0].(v1.ArchivalState) + ret0, _ := ret[0].(enums.ArchivalState) return ret0 } diff --git a/common/archiver/constants.go b/common/archiver/constants.go index 117b6500222..a09a282fd04 100644 --- a/common/archiver/constants.go +++ b/common/archiver/constants.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package archiver import ( @@ -40,8 +16,6 @@ const ( ErrReasonInvalidURI = "URI is invalid" // ErrReasonInvalidArchiveRequest is the error reason for invalid archive request ErrReasonInvalidArchiveRequest = "archive request is invalid" - // ErrReasonConstructHistoryIterator is the error reason for failing to construct history iterator - ErrReasonConstructHistoryIterator = "failed to construct history iterator" // ErrReasonReadHistory is the error reason for failing to read history ErrReasonReadHistory = "failed to read history batches" // ErrReasonHistoryMutated is the error reason for mutated history @@ -55,8 +29,6 @@ var ( ErrURISchemeMismatch = errors.New("URI scheme does not match the archiver") // ErrHistoryMutated is the error for mutated history ErrHistoryMutated = errors.New("history was mutated") - // ErrContextTimeout is the error for context timeout - ErrContextTimeout = errors.New("archive aborted because context timed out") // ErrInvalidGetHistoryRequest is the error for invalid GetHistory request ErrInvalidGetHistoryRequest = errors.New("get archived history request is invalid") // ErrInvalidQueryVisibilityRequest is the error for invalid Query Visibility request diff --git a/common/archiver/filestore/history_archiver.go b/common/archiver/filestore/history_archiver.go index 37e750b589e..0dc0b98a3e4 100644 --- a/common/archiver/filestore/history_archiver.go +++ b/common/archiver/filestore/history_archiver.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Filestore History Archiver will archive workflow histories to local disk. // Each Archive() request results in a file named in the format of @@ -47,13 +23,14 @@ import ( historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" - "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/codec" "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" ) const ( @@ -74,9 +51,11 @@ var ( type ( historyArchiver struct { - container *archiver.HistoryBootstrapContainer - fileMode os.FileMode - dirMode os.FileMode + executionManager persistence.ExecutionManager + logger log.Logger + metricsHandler metrics.Handler + fileMode os.FileMode + dirMode os.FileMode // only set in test code historyIterator archiver.HistoryIterator @@ -90,14 +69,18 @@ type ( // NewHistoryArchiver creates a new archiver.HistoryArchiver based on filestore func NewHistoryArchiver( - container *archiver.HistoryBootstrapContainer, + executionManager persistence.ExecutionManager, + logger log.Logger, + metricsHandler metrics.Handler, config *config.FilestoreArchiver, ) (archiver.HistoryArchiver, error) { - return newHistoryArchiver(container, config, nil) + return newHistoryArchiver(executionManager, logger, metricsHandler, config, nil) } func newHistoryArchiver( - container *archiver.HistoryBootstrapContainer, + executionManager persistence.ExecutionManager, + logger log.Logger, + metricsHandler metrics.Handler, config *config.FilestoreArchiver, historyIterator archiver.HistoryIterator, ) (*historyArchiver, error) { @@ -110,10 +93,12 @@ func newHistoryArchiver( return nil, errInvalidDirMode } return &historyArchiver{ - container: container, - fileMode: os.FileMode(fileMode), - dirMode: os.FileMode(dirMode), - historyIterator: historyIterator, + executionManager: executionManager, + logger: logger, + metricsHandler: metricsHandler, + fileMode: os.FileMode(fileMode), + dirMode: os.FileMode(dirMode), + historyIterator: historyIterator, }, nil } @@ -130,7 +115,7 @@ func (h *historyArchiver) Archive( } }() - logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) + logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.logger, request, URI.String()) if err := h.ValidateURI(URI); err != nil { logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) @@ -144,7 +129,7 @@ func (h *historyArchiver) Archive( historyIterator := h.historyIterator if historyIterator == nil { // will only be set by testing code - historyIterator = archiver.NewHistoryIterator(request, h.container.ExecutionManager, targetHistoryBlobSize) + historyIterator = archiver.NewHistoryIterator(request, h.executionManager, targetHistoryBlobSize) } var historyBatches []*historypb.History diff --git a/common/archiver/filestore/history_archiver_test.go b/common/archiver/filestore/history_archiver_test.go index 19527cf3ad7..a3ae84f811f 100644 --- a/common/archiver/filestore/history_archiver_test.go +++ b/common/archiver/filestore/history_archiver_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package filestore import ( @@ -32,22 +8,21 @@ import ( "testing" "time" - enumspb "go.temporal.io/api/enums/v1" - "google.golang.org/protobuf/types/known/timestamppb" - - "go.temporal.io/server/tests/testutils" - - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" + "go.temporal.io/server/tests/testutils" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -71,7 +46,9 @@ type historyArchiverSuite struct { *require.Assertions suite.Suite - container *archiver.HistoryBootstrapContainer + logger log.Logger + executionManager persistence.ExecutionManager + metricsHandler metrics.Handler testArchivalURI archiver.URI testGetDirectory string historyBatchesV1 []*historypb.History @@ -99,9 +76,8 @@ func (s *historyArchiverSuite) TearDownSuite() { func (s *historyArchiverSuite) SetupTest() { s.Assertions = require.New(s.T()) - s.container = &archiver.HistoryBootstrapContainer{ - Logger: log.NewNoopLogger(), - } + s.logger = log.NewNoopLogger() + s.metricsHandler = metrics.NoopMetricsHandler } func (s *historyArchiverSuite) TestValidateURI() { @@ -192,7 +168,14 @@ func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { historyIterator := archiver.NewMockHistoryIterator(mockCtrl) gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), + historyIterator.EXPECT().Next(gomock.Any()).Return( + nil, + &serviceerror.ResourceExhausted{ + Cause: enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, + Scope: enumspb.RESOURCE_EXHAUSTED_SCOPE_NAMESPACE, + Message: "", + }, + ), ) historyArchiver := s.newTestHistoryArchiver(historyIterator) @@ -576,9 +559,9 @@ func (s *historyArchiverSuite) newTestHistoryArchiver(historyIterator archiver.H FileMode: testFileModeStr, DirMode: testDirModeStr, } - archiver, err := newHistoryArchiver(s.container, config, historyIterator) + a, err := newHistoryArchiver(s.executionManager, s.logger, s.metricsHandler, config, historyIterator) s.NoError(err) - return archiver + return a } func (s *historyArchiverSuite) setupHistoryDirectory() { diff --git a/common/archiver/filestore/query_parser.go b/common/archiver/filestore/query_parser.go index f2e96786cc9..ad780e99e66 100644 --- a/common/archiver/filestore/query_parser.go +++ b/common/archiver/filestore/query_parser.go @@ -1,43 +1,17 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser +//go:generate mockgen -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser package filestore import ( "errors" "fmt" - "strconv" "strings" "time" "github.com/temporalio/sqlparser" enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/server/common/convert" - "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/sqlquery" "go.temporal.io/server/common/util" ) @@ -70,12 +44,6 @@ const ( ExecutionStatus = "ExecutionStatus" ) -const ( - queryTemplate = "select * from dummy where %s" - - defaultDateTimeFormat = time.RFC3339 -) - // NewQueryParser creates a new query parser for filestore func NewQueryParser() QueryParser { return &queryParser{} @@ -89,7 +57,7 @@ func (p *queryParser) Parse(query string) (*parsedQuery, error) { if strings.TrimSpace(query) == "" { return parsedQuery, nil } - stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) + stmt, err := sqlparser.Parse(fmt.Sprintf(sqlquery.QueryTemplate, query)) if err != nil { return nil, err } @@ -143,7 +111,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, switch colNameStr { case WorkflowID: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -156,7 +124,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.workflowID = util.Ptr(val) case RunID: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -169,7 +137,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.runID = util.Ptr(val) case WorkflowType: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -182,7 +150,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.workflowTypeName = util.Ptr(val) case ExecutionStatus: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { // if failed to extract string value, it means user input close status as a number val = valStr @@ -200,7 +168,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.status = &status case CloseTime: - timestamp, err := convertToTime(valStr) + timestamp, err := sqlquery.ConvertToTime(valStr) if err != nil { return err } @@ -235,22 +203,6 @@ func (p *queryParser) convertCloseTime(timestamp time.Time, op string, parsedQue return nil } -func convertToTime(timeStr string) (time.Time, error) { - ts, err := strconv.ParseInt(timeStr, 10, 64) - if err == nil { - return timestamp.UnixOrZeroTime(ts), nil - } - timestampStr, err := extractStringValue(timeStr) - if err != nil { - return time.Time{}, err - } - parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) - if err != nil { - return time.Time{}, err - } - return parsedTime, nil -} - func convertStatusStr(statusStr string) (enumspb.WorkflowExecutionStatus, error) { statusStr = strings.ToLower(strings.TrimSpace(statusStr)) switch statusStr { @@ -270,10 +222,3 @@ func convertStatusStr(statusStr string) (enumspb.WorkflowExecutionStatus, error) return 0, fmt.Errorf("unknown workflow close status: %s", statusStr) } } - -func extractStringValue(s string) (string, error) { - if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { - return s[1 : len(s)-1], nil - } - return "", fmt.Errorf("value %s is not a string value", s) -} diff --git a/common/archiver/filestore/query_parser_mock.go b/common/archiver/filestore/query_parser_mock.go index ff6f9314af9..917efcd1626 100644 --- a/common/archiver/filestore/query_parser_mock.go +++ b/common/archiver/filestore/query_parser_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: query_parser.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package filestore -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: query_parser.go // Package filestore is a generated GoMock package. package filestore @@ -31,13 +12,14 @@ package filestore import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockQueryParser is a mock of QueryParser interface. type MockQueryParser struct { ctrl *gomock.Controller recorder *MockQueryParserMockRecorder + isgomock struct{} } // MockQueryParserMockRecorder is the mock recorder for MockQueryParser. @@ -67,7 +49,7 @@ func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { } // Parse indicates an expected call of Parse. -func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { +func (mr *MockQueryParserMockRecorder) Parse(query any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) } diff --git a/common/archiver/filestore/query_parser_test.go b/common/archiver/filestore/query_parser_test.go index 6dd37acffa5..1b5c8ab607b 100644 --- a/common/archiver/filestore/query_parser_test.go +++ b/common/archiver/filestore/query_parser_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package filestore import ( @@ -31,7 +7,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" enumspb "go.temporal.io/api/enums/v1" - "go.temporal.io/server/common/util" ) diff --git a/common/archiver/filestore/util.go b/common/archiver/filestore/util.go index 11ade3d4969..c73b0429ec3 100644 --- a/common/archiver/filestore/util.go +++ b/common/archiver/filestore/util.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package filestore import ( @@ -35,12 +11,11 @@ import ( "github.com/dgryski/go-farm" historypb "go.temporal.io/api/history/v1" - "go.uber.org/multierr" - "google.golang.org/protobuf/proto" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/codec" + "go.uber.org/multierr" + "google.golang.org/protobuf/proto" ) var ( @@ -164,7 +139,7 @@ func decodeVisibilityRecord(data []byte) (*archiverspb.VisibilityRecord, error) return record, nil } -func serializeToken(token interface{}) ([]byte, error) { +func serializeToken(token any) ([]byte, error) { if token == nil { return nil, nil } diff --git a/common/archiver/filestore/util_test.go b/common/archiver/filestore/util_test.go index f32e81488ae..ff994157f50 100644 --- a/common/archiver/filestore/util_test.go +++ b/common/archiver/filestore/util_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package filestore import ( @@ -34,12 +10,11 @@ import ( "github.com/stretchr/testify/suite" enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" - "google.golang.org/protobuf/types/known/timestamppb" - "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/codec" "go.temporal.io/server/tests/testutils" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( diff --git a/common/archiver/filestore/visibility_archiver.go b/common/archiver/filestore/visibility_archiver.go index 1ac1dd0e9b3..4a4cd2a3c23 100644 --- a/common/archiver/filestore/visibility_archiver.go +++ b/common/archiver/filestore/visibility_archiver.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package filestore import ( @@ -37,11 +13,12 @@ import ( commonpb "go.temporal.io/api/common/v1" "go.temporal.io/api/serviceerror" workflowpb "go.temporal.io/api/workflow/v1" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/searchattribute" ) @@ -52,10 +29,11 @@ const ( type ( visibilityArchiver struct { - container *archiver.VisibilityBootstrapContainer - fileMode os.FileMode - dirMode os.FileMode - queryParser QueryParser + logger log.Logger + metricsHandler metrics.Handler + fileMode os.FileMode + dirMode os.FileMode + queryParser QueryParser } queryVisibilityToken struct { @@ -73,7 +51,8 @@ type ( // NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on filestore func NewVisibilityArchiver( - container *archiver.VisibilityBootstrapContainer, + logger log.Logger, + metricsHandler metrics.Handler, config *config.FilestoreArchiver, ) (archiver.VisibilityArchiver, error) { fileMode, err := strconv.ParseUint(config.FileMode, 0, 32) @@ -85,10 +64,11 @@ func NewVisibilityArchiver( return nil, errInvalidDirMode } return &visibilityArchiver{ - container: container, - fileMode: os.FileMode(fileMode), - dirMode: os.FileMode(dirMode), - queryParser: NewQueryParser(), + logger: logger, + metricsHandler: metricsHandler, + fileMode: os.FileMode(fileMode), + dirMode: os.FileMode(dirMode), + queryParser: NewQueryParser(), }, nil } @@ -105,7 +85,7 @@ func (v *visibilityArchiver) Archive( } }() - logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) + logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.logger, request, URI.String()) if err := v.ValidateURI(URI); err != nil { logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) @@ -356,12 +336,13 @@ func convertToExecutionInfo(record *archiverspb.VisibilityRecord, saTypeMap sear Type: &commonpb.WorkflowType{ Name: record.WorkflowTypeName, }, - StartTime: record.StartTime, - ExecutionTime: record.ExecutionTime, - CloseTime: record.CloseTime, - Status: record.Status, - HistoryLength: record.HistoryLength, - Memo: record.Memo, - SearchAttributes: searchAttributes, + StartTime: record.StartTime, + ExecutionTime: record.ExecutionTime, + CloseTime: record.CloseTime, + ExecutionDuration: record.ExecutionDuration, + Status: record.Status, + HistoryLength: record.HistoryLength, + Memo: record.Memo, + SearchAttributes: searchAttributes, }, nil } diff --git a/common/archiver/filestore/visibility_archiver_test.go b/common/archiver/filestore/visibility_archiver_test.go index 57820a9d235..3c3311097a1 100644 --- a/common/archiver/filestore/visibility_archiver_test.go +++ b/common/archiver/filestore/visibility_archiver_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package filestore import ( @@ -32,25 +8,25 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" workflowpb "go.temporal.io/api/workflow/v1" - "google.golang.org/protobuf/types/known/timestamppb" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/codec" "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/payload" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/searchattribute" "go.temporal.io/server/common/util" "go.temporal.io/server/tests/testutils" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -61,7 +37,8 @@ type visibilityArchiverSuite struct { *require.Assertions suite.Suite - container *archiver.VisibilityBootstrapContainer + logger log.Logger + metricsHandler metrics.Handler testArchivalURI archiver.URI testQueryDirectory string visibilityRecords []*archiverspb.VisibilityRecord @@ -90,9 +67,8 @@ func (s *visibilityArchiverSuite) TearDownSuite() { func (s *visibilityArchiverSuite) SetupTest() { s.Assertions = require.New(s.T()) - s.container = &archiver.VisibilityBootstrapContainer{ - Logger: log.NewNoopLogger(), - } + s.logger = log.NewNoopLogger() + s.metricsHandler = metrics.NoopMetricsHandler s.controller = gomock.NewController(s.T()) } @@ -341,14 +317,14 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidURI() { NamespaceID: testNamespaceID, PageSize: 1, } - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidRequest() { visibilityArchiver := s.newTestVisibilityArchiver() - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{}, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{}, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } @@ -362,7 +338,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { NamespaceID: "some random namespaceID", PageSize: 10, Query: "some invalid query", - }, searchattribute.TestNameTypeMap) + }, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } @@ -380,7 +356,7 @@ func (s *visibilityArchiverSuite) TestQuery_Success_DirectoryNotExist() { Query: "parsed by mockParser", PageSize: 1, } - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Empty(response.Executions) @@ -401,7 +377,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidToken() { PageSize: 1, NextPageToken: []byte{1, 2, 3}, } - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } @@ -422,12 +398,12 @@ func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { } URI, err := archiver.NewURI("file://" + s.testQueryDirectory) s.NoError(err) - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Nil(response.NextPageToken) s.Len(response.Executions, 1) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, response.Executions[0]) } @@ -448,25 +424,25 @@ func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { } URI, err := archiver.NewURI("file://" + s.testQueryDirectory) s.NoError(err) - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.NotNil(response.NextPageToken) s.Len(response.Executions, 2) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, response.Executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, response.Executions[1]) request.NextPageToken = response.NextPageToken - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Nil(response.NextPageToken) s.Len(response.Executions, 1) - ei, err = convertToExecutionInfo(s.visibilityRecords[3], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[3], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, response.Executions[0]) } @@ -496,17 +472,17 @@ func (s *visibilityArchiverSuite) TestArchiveAndQuery() { } executions := []*workflowpb.WorkflowExecutionInfo{} for len(executions) == 0 || request.NextPageToken != nil { - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) executions = append(executions, response.Executions...) request.NextPageToken = response.NextPageToken } s.Len(executions, 2) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, executions[1]) } @@ -528,7 +504,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { NextPageToken: nil, Query: "", } - _, err := visibilityArchiver.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) + _, err := visibilityArchiver.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap()) var svcErr *serviceerror.InvalidArgument @@ -544,7 +520,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { NextPageToken: nil, Query: "", } - _, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, req, searchattribute.TestNameTypeMap) + _, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, req, searchattribute.TestNameTypeMap()) var svcErr *serviceerror.InvalidArgument @@ -569,7 +545,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { } var executions []*workflowpb.WorkflowExecutionInfo for len(executions) == 0 || request.NextPageToken != nil { - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) executions = append(executions, response.Executions...) @@ -583,9 +559,9 @@ func (s *visibilityArchiverSuite) newTestVisibilityArchiver() *visibilityArchive FileMode: testFileModeStr, DirMode: testDirModeStr, } - archiver, err := NewVisibilityArchiver(s.container, config) + a, err := NewVisibilityArchiver(s.logger, s.metricsHandler, config) s.NoError(err) - return archiver.(*visibilityArchiver) + return a.(*visibilityArchiver) } func (s *visibilityArchiverSuite) setupVisibilityDirectory() { diff --git a/common/archiver/gcloud/connector/client.go b/common/archiver/gcloud/connector/client.go index 79a556f840a..f6f8dbfe78b 100644 --- a/common/archiver/gcloud/connector/client.go +++ b/common/archiver/gcloud/connector/client.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination client_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination client_mock.go package connector @@ -34,11 +10,10 @@ import ( "os" "cloud.google.com/go/storage" - "go.uber.org/multierr" - "google.golang.org/api/iterator" - "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/config" + "go.uber.org/multierr" + "google.golang.org/api/iterator" ) var ( @@ -50,7 +25,7 @@ var ( type ( // Precondition is a function that allow you to filter a query result. // If subject match params conditions then return true, else return false. - Precondition func(subject interface{}) bool + Precondition func(subject any) bool // Client is a wrapper around Google cloud storages client library. Client interface { @@ -98,11 +73,10 @@ func NewClientWithParams(clientD GcloudStorageClient) (Client, error) { func (s *storageWrapper) Upload(ctx context.Context, URI archiver.URI, fileName string, file []byte) (err error) { bucket := s.client.Bucket(URI.Hostname()) writer := bucket.Object(formatSinkPath(URI.Path()) + "/" + fileName).NewWriter(ctx) + defer func() { + err = multierr.Combine(err, writer.Close()) + }() _, err = io.Copy(writer, bytes.NewReader(file)) - if err == nil { - err = writer.Close() - } - return err } @@ -157,7 +131,7 @@ func (s *storageWrapper) Query(ctx context.Context, URI archiver.URI, fileNamePr } -// QueryWithFilter, retieves filenames that match filter parameters. PageSize is optional, 0 means all records. +// QueryWithFilters, retieves filenames that match filter parameters. PageSize is optional, 0 means all records. func (s *storageWrapper) QueryWithFilters(ctx context.Context, URI archiver.URI, fileNamePrefix string, pageSize, offset int, filters []Precondition) ([]string, bool, int, error) { var err error currentPos := offset diff --git a/common/archiver/gcloud/connector/client_delegate.go b/common/archiver/gcloud/connector/client_delegate.go index 1049bdf54dd..9b68b76c6e7 100644 --- a/common/archiver/gcloud/connector/client_delegate.go +++ b/common/archiver/gcloud/connector/client_delegate.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination client_delegate_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination client_delegate_mock.go package connector @@ -110,7 +86,6 @@ func newDefaultClientDelegate(ctx context.Context) (*clientDelegate, error) { } func newClientDelegateWithCredentials(ctx context.Context, credentialsPath string) (*clientDelegate, error) { - jsonKey, err := os.ReadFile(credentialsPath) if err != nil { return newDefaultClientDelegate(ctx) diff --git a/common/archiver/gcloud/connector/client_delegate_mock.go b/common/archiver/gcloud/connector/client_delegate_mock.go index 7560ff86a19..c27360b80ec 100644 --- a/common/archiver/gcloud/connector/client_delegate_mock.go +++ b/common/archiver/gcloud/connector/client_delegate_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: client_delegate.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package connector -source client_delegate.go -destination client_delegate_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: client_delegate.go // Package connector is a generated GoMock package. package connector @@ -33,13 +14,14 @@ import ( reflect "reflect" storage "cloud.google.com/go/storage" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockGcloudStorageClient is a mock of GcloudStorageClient interface. type MockGcloudStorageClient struct { ctrl *gomock.Controller recorder *MockGcloudStorageClientMockRecorder + isgomock struct{} } // MockGcloudStorageClientMockRecorder is the mock recorder for MockGcloudStorageClient. @@ -68,7 +50,7 @@ func (m *MockGcloudStorageClient) Bucket(URI string) BucketHandleWrapper { } // Bucket indicates an expected call of Bucket. -func (mr *MockGcloudStorageClientMockRecorder) Bucket(URI interface{}) *gomock.Call { +func (mr *MockGcloudStorageClientMockRecorder) Bucket(URI any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bucket", reflect.TypeOf((*MockGcloudStorageClient)(nil).Bucket), URI) } @@ -77,6 +59,7 @@ func (mr *MockGcloudStorageClientMockRecorder) Bucket(URI interface{}) *gomock.C type MockBucketHandleWrapper struct { ctrl *gomock.Controller recorder *MockBucketHandleWrapperMockRecorder + isgomock struct{} } // MockBucketHandleWrapperMockRecorder is the mock recorder for MockBucketHandleWrapper. @@ -106,7 +89,7 @@ func (m *MockBucketHandleWrapper) Attrs(ctx context.Context) (*storage.BucketAtt } // Attrs indicates an expected call of Attrs. -func (mr *MockBucketHandleWrapperMockRecorder) Attrs(ctx interface{}) *gomock.Call { +func (mr *MockBucketHandleWrapperMockRecorder) Attrs(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attrs", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Attrs), ctx) } @@ -120,7 +103,7 @@ func (m *MockBucketHandleWrapper) Object(name string) ObjectHandleWrapper { } // Object indicates an expected call of Object. -func (mr *MockBucketHandleWrapperMockRecorder) Object(name interface{}) *gomock.Call { +func (mr *MockBucketHandleWrapperMockRecorder) Object(name any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Object", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Object), name) } @@ -134,7 +117,7 @@ func (m *MockBucketHandleWrapper) Objects(ctx context.Context, q *storage.Query) } // Objects indicates an expected call of Objects. -func (mr *MockBucketHandleWrapperMockRecorder) Objects(ctx, q interface{}) *gomock.Call { +func (mr *MockBucketHandleWrapperMockRecorder) Objects(ctx, q any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Objects", reflect.TypeOf((*MockBucketHandleWrapper)(nil).Objects), ctx, q) } @@ -143,6 +126,7 @@ func (mr *MockBucketHandleWrapperMockRecorder) Objects(ctx, q interface{}) *gomo type MockObjectHandleWrapper struct { ctrl *gomock.Controller recorder *MockObjectHandleWrapperMockRecorder + isgomock struct{} } // MockObjectHandleWrapperMockRecorder is the mock recorder for MockObjectHandleWrapper. @@ -172,7 +156,7 @@ func (m *MockObjectHandleWrapper) Attrs(ctx context.Context) (*storage.ObjectAtt } // Attrs indicates an expected call of Attrs. -func (mr *MockObjectHandleWrapperMockRecorder) Attrs(ctx interface{}) *gomock.Call { +func (mr *MockObjectHandleWrapperMockRecorder) Attrs(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Attrs", reflect.TypeOf((*MockObjectHandleWrapper)(nil).Attrs), ctx) } @@ -187,7 +171,7 @@ func (m *MockObjectHandleWrapper) NewReader(ctx context.Context) (ReaderWrapper, } // NewReader indicates an expected call of NewReader. -func (mr *MockObjectHandleWrapperMockRecorder) NewReader(ctx interface{}) *gomock.Call { +func (mr *MockObjectHandleWrapperMockRecorder) NewReader(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewReader", reflect.TypeOf((*MockObjectHandleWrapper)(nil).NewReader), ctx) } @@ -201,7 +185,7 @@ func (m *MockObjectHandleWrapper) NewWriter(ctx context.Context) WriterWrapper { } // NewWriter indicates an expected call of NewWriter. -func (mr *MockObjectHandleWrapperMockRecorder) NewWriter(ctx interface{}) *gomock.Call { +func (mr *MockObjectHandleWrapperMockRecorder) NewWriter(ctx any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewWriter", reflect.TypeOf((*MockObjectHandleWrapper)(nil).NewWriter), ctx) } @@ -210,6 +194,7 @@ func (mr *MockObjectHandleWrapperMockRecorder) NewWriter(ctx interface{}) *gomoc type MockWriterWrapper struct { ctrl *gomock.Controller recorder *MockWriterWrapperMockRecorder + isgomock struct{} } // MockWriterWrapperMockRecorder is the mock recorder for MockWriterWrapper. @@ -252,7 +237,7 @@ func (m *MockWriterWrapper) CloseWithError(err error) error { } // CloseWithError indicates an expected call of CloseWithError. -func (mr *MockWriterWrapperMockRecorder) CloseWithError(err interface{}) *gomock.Call { +func (mr *MockWriterWrapperMockRecorder) CloseWithError(err any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloseWithError", reflect.TypeOf((*MockWriterWrapper)(nil).CloseWithError), err) } @@ -267,7 +252,7 @@ func (m *MockWriterWrapper) Write(p []byte) (int, error) { } // Write indicates an expected call of Write. -func (mr *MockWriterWrapperMockRecorder) Write(p interface{}) *gomock.Call { +func (mr *MockWriterWrapperMockRecorder) Write(p any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockWriterWrapper)(nil).Write), p) } @@ -276,6 +261,7 @@ func (mr *MockWriterWrapperMockRecorder) Write(p interface{}) *gomock.Call { type MockReaderWrapper struct { ctrl *gomock.Controller recorder *MockReaderWrapperMockRecorder + isgomock struct{} } // MockReaderWrapperMockRecorder is the mock recorder for MockReaderWrapper. @@ -319,7 +305,7 @@ func (m *MockReaderWrapper) Read(p []byte) (int, error) { } // Read indicates an expected call of Read. -func (mr *MockReaderWrapperMockRecorder) Read(p interface{}) *gomock.Call { +func (mr *MockReaderWrapperMockRecorder) Read(p any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockReaderWrapper)(nil).Read), p) } @@ -328,6 +314,7 @@ func (mr *MockReaderWrapperMockRecorder) Read(p interface{}) *gomock.Call { type MockObjectIteratorWrapper struct { ctrl *gomock.Controller recorder *MockObjectIteratorWrapperMockRecorder + isgomock struct{} } // MockObjectIteratorWrapperMockRecorder is the mock recorder for MockObjectIteratorWrapper. diff --git a/common/archiver/gcloud/connector/client_mock.go b/common/archiver/gcloud/connector/client_mock.go index 758ccc0b87b..1e89738dcc5 100644 --- a/common/archiver/gcloud/connector/client_mock.go +++ b/common/archiver/gcloud/connector/client_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: client.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package connector -source client.go -destination client_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: client.go // Package connector is a generated GoMock package. package connector @@ -32,14 +13,15 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" archiver "go.temporal.io/server/common/archiver" + gomock "go.uber.org/mock/gomock" ) // MockClient is a mock of Client interface. type MockClient struct { ctrl *gomock.Controller recorder *MockClientMockRecorder + isgomock struct{} } // MockClientMockRecorder is the mock recorder for MockClient. @@ -69,7 +51,7 @@ func (m *MockClient) Exist(ctx context.Context, URI archiver.URI, fileName strin } // Exist indicates an expected call of Exist. -func (mr *MockClientMockRecorder) Exist(ctx, URI, fileName interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) Exist(ctx, URI, fileName any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exist", reflect.TypeOf((*MockClient)(nil).Exist), ctx, URI, fileName) } @@ -84,7 +66,7 @@ func (m *MockClient) Get(ctx context.Context, URI archiver.URI, file string) ([] } // Get indicates an expected call of Get. -func (mr *MockClientMockRecorder) Get(ctx, URI, file interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) Get(ctx, URI, file any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockClient)(nil).Get), ctx, URI, file) } @@ -99,7 +81,7 @@ func (m *MockClient) Query(ctx context.Context, URI archiver.URI, fileNamePrefix } // Query indicates an expected call of Query. -func (mr *MockClientMockRecorder) Query(ctx, URI, fileNamePrefix interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) Query(ctx, URI, fileNamePrefix any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockClient)(nil).Query), ctx, URI, fileNamePrefix) } @@ -116,7 +98,7 @@ func (m *MockClient) QueryWithFilters(ctx context.Context, URI archiver.URI, fil } // QueryWithFilters indicates an expected call of QueryWithFilters. -func (mr *MockClientMockRecorder) QueryWithFilters(ctx, URI, fileNamePrefix, pageSize, offset, filters interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) QueryWithFilters(ctx, URI, fileNamePrefix, pageSize, offset, filters any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "QueryWithFilters", reflect.TypeOf((*MockClient)(nil).QueryWithFilters), ctx, URI, fileNamePrefix, pageSize, offset, filters) } @@ -130,7 +112,7 @@ func (m *MockClient) Upload(ctx context.Context, URI archiver.URI, fileName stri } // Upload indicates an expected call of Upload. -func (mr *MockClientMockRecorder) Upload(ctx, URI, fileName, file interface{}) *gomock.Call { +func (mr *MockClientMockRecorder) Upload(ctx, URI, fileName, file any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Upload", reflect.TypeOf((*MockClient)(nil).Upload), ctx, URI, fileName, file) } diff --git a/common/archiver/gcloud/connector/client_test.go b/common/archiver/gcloud/connector/client_test.go index dead1a77ac4..10c32a9dfc4 100644 --- a/common/archiver/gcloud/connector/client_test.go +++ b/common/archiver/gcloud/connector/client_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package connector_test import ( @@ -30,18 +6,18 @@ import ( "errors" "io" "os" + "path/filepath" "strings" "testing" "cloud.google.com/go/storage" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "google.golang.org/api/iterator" - "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/gcloud/connector" "go.temporal.io/server/common/config" + "go.uber.org/mock/gomock" + "google.golang.org/api/iterator" ) func (s *clientSuite) SetupTest() { @@ -268,7 +244,6 @@ func (s *clientSuite) TestQuery() { } func (s *clientSuite) TestQueryWithFilter() { - ctx := context.Background() mockBucketHandleClient := connector.NewMockBucketHandleWrapper(s.controller) mockStorageClient := connector.NewMockGcloudStorageClient(s.controller) @@ -306,7 +281,7 @@ func (s *clientSuite) TestQueryWithFilter() { } func newWorkflowIDPrecondition(workflowID string) connector.Precondition { - return func(subject interface{}) bool { + return func(subject any) bool { if workflowID == "" { return true @@ -328,3 +303,32 @@ func newWorkflowIDPrecondition(workflowID string) connector.Precondition { return false } } + +// Ensures that no code in this package or its parent folder accidentally uses gRPC functions +// since they are stripped from the binary via `disable_grpc_modules`. This is crude but effective. +func (s *clientSuite) TestNoGRPCUsage() { + currentPackageFiles, err := filepath.Glob("*.go") + s.NoError(err) + parentPackageFiles, err := filepath.Glob("../*.go") + s.NoError(err) + allFiles := append(currentPackageFiles, parentPackageFiles...) + + var checkedClientFile bool + for _, file := range allFiles { + if strings.HasSuffix(file, "_test.go") { + continue + } + + content, err := os.ReadFile(file) + s.NoError(err) + + if strings.Contains(string(content), "NewGRPCClient") { + s.T().Errorf("❌ Found forbidden gRPC usage in file: %s", file) + } + + // Check for client.go in both current and parent directories + checkedClientFile = checkedClientFile || strings.HasSuffix(file, "client.go") + } + + s.True(checkedClientFile, "should have checked client.go for gRPC usage") +} diff --git a/common/archiver/gcloud/history_archiver.go b/common/archiver/gcloud/history_archiver.go index 56b00062245..0cc43166b16 100644 --- a/common/archiver/gcloud/history_archiver.go +++ b/common/archiver/gcloud/history_archiver.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package gcloud import ( @@ -33,7 +9,6 @@ import ( historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" - "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/gcloud/connector" @@ -60,8 +35,10 @@ const ( ) type historyArchiver struct { - container *archiver.HistoryBootstrapContainer - gcloudStorage connector.Client + executionManager persistence.ExecutionManager + logger log.Logger + metricsHandler metrics.Handler + gcloudStorage connector.Client // only set in test code historyIterator archiver.HistoryIterator @@ -81,21 +58,25 @@ type getHistoryToken struct { // NewHistoryArchiver creates a new gcloud storage HistoryArchiver func NewHistoryArchiver( - container *archiver.HistoryBootstrapContainer, + executionManager persistence.ExecutionManager, + logger log.Logger, + metricsHandler metrics.Handler, config *config.GstorageArchiver, ) (archiver.HistoryArchiver, error) { storage, err := connector.NewClient(context.Background(), config) if err == nil { - return newHistoryArchiver(container, nil, storage), nil + return newHistoryArchiver(executionManager, logger, metricsHandler, nil, storage), nil } return nil, err } -func newHistoryArchiver(container *archiver.HistoryBootstrapContainer, historyIterator archiver.HistoryIterator, storage connector.Client) archiver.HistoryArchiver { +func newHistoryArchiver(executionManager persistence.ExecutionManager, logger log.Logger, metricsHandler metrics.Handler, historyIterator archiver.HistoryIterator, storage connector.Client) archiver.HistoryArchiver { return &historyArchiver{ - container: container, - gcloudStorage: storage, - historyIterator: historyIterator, + executionManager: executionManager, + logger: logger, + metricsHandler: metricsHandler, + gcloudStorage: storage, + historyIterator: historyIterator, } } @@ -107,7 +88,7 @@ func newHistoryArchiver(container *archiver.HistoryBootstrapContainer, historyIt // between retry attempts. // This method will be invoked after a workflow passes its retention period. func (h *historyArchiver) Archive(ctx context.Context, URI archiver.URI, request *archiver.ArchiveHistoryRequest, opts ...archiver.ArchiveOption) (err error) { - handler := h.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) + handler := h.metricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) featureCatalog := archiver.GetFeatureCatalog(opts...) startTime := time.Now().UTC() defer func() { @@ -127,7 +108,7 @@ func (h *historyArchiver) Archive(ctx context.Context, URI archiver.URI, request } }() - logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) + logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.logger, request, URI.String()) if err := h.ValidateURI(URI); err != nil { logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) @@ -143,7 +124,7 @@ func (h *historyArchiver) Archive(ctx context.Context, URI archiver.URI, request historyIterator := h.historyIterator var progress progress if historyIterator == nil { // will only be set by testing code - historyIterator, _ = loadHistoryIterator(ctx, request, h.container.ExecutionManager, featureCatalog, &progress) + historyIterator, _ = loadHistoryIterator(ctx, request, h.executionManager, featureCatalog, &progress) } encoder := codec.NewJSONPBEncoder() diff --git a/common/archiver/gcloud/history_archiver_test.go b/common/archiver/gcloud/history_archiver_test.go index 81160f07293..237118461f3 100644 --- a/common/archiver/gcloud/history_archiver_test.go +++ b/common/archiver/gcloud/history_archiver_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package gcloud import ( @@ -30,21 +6,21 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" - "google.golang.org/protobuf/types/known/timestamppb" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/gcloud/connector" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/util" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -67,10 +43,8 @@ var ( func (h *historyArchiverSuite) SetupTest() { h.Assertions = require.New(h.T()) h.controller = gomock.NewController(h.T()) - h.container = &archiver.HistoryBootstrapContainer{ - Logger: log.NewNoopLogger(), - MetricsHandler: metrics.NoopMetricsHandler, - } + h.logger = log.NewNoopLogger() + h.metricsHandler = metrics.NoopMetricsHandler h.testArchivalURI, _ = archiver.NewURI("gs://my-bucket-cad/temporal_archival/development") } @@ -88,8 +62,10 @@ type historyArchiverSuite struct { controller *gomock.Controller - container *archiver.HistoryBootstrapContainer - testArchivalURI archiver.URI + logger log.Logger + metricsHandler metrics.Handler + executionManager persistence.ExecutionManager + testArchivalURI archiver.URI } func getCanceledContext() context.Context { @@ -147,7 +123,7 @@ func (h *historyArchiverSuite) TestArchive_Fail_InvalidURI() { historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, @@ -170,7 +146,7 @@ func (h *historyArchiverSuite) TestArchive_Fail_InvalidRequest() { historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, @@ -196,7 +172,7 @@ func (h *historyArchiverSuite) TestArchive_Fail_ErrorOnReadHistory() { historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("some random error")), ) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, @@ -219,10 +195,17 @@ func (h *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { historyIterator := archiver.NewMockHistoryIterator(h.controller) gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), + historyIterator.EXPECT().Next(gomock.Any()).Return( + nil, + &serviceerror.ResourceExhausted{ + Cause: enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, + Scope: enumspb.RESOURCE_EXHAUSTED_SCOPE_NAMESPACE, + Message: "", + }, + ), ) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, @@ -264,7 +247,7 @@ func (h *historyArchiverSuite) TestArchive_Fail_HistoryMutated() { historyIterator.EXPECT().Next(gomock.Any()).Return(historyBlob, nil), ) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, @@ -290,7 +273,7 @@ func (h *historyArchiverSuite) TestArchive_Fail_NonRetryableErrorOption() { historyIterator.EXPECT().Next(gomock.Any()).Return(nil, errors.New("upload non-retryable error")), ) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, @@ -336,7 +319,7 @@ func (h *historyArchiverSuite) TestArchive_Skip() { historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewNotFound("workflow not found")), ) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, Namespace: testNamespace, @@ -396,7 +379,7 @@ func (h *historyArchiverSuite) TestArchive_Success() { historyIterator.EXPECT().HasNext().Return(false), ) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.ArchiveHistoryRequest{ NamespaceID: testNamespaceID, @@ -417,7 +400,7 @@ func (h *historyArchiverSuite) TestGet_Fail_InvalidURI() { mockStorageClient := connector.NewMockGcloudStorageClient(h.controller) storageWrapper, _ := connector.NewClientWithParams(mockStorageClient) historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, @@ -437,7 +420,7 @@ func (h *historyArchiverSuite) TestGet_Fail_InvalidToken() { mockStorageClient := connector.NewMockGcloudStorageClient(h.controller) storageWrapper, _ := connector.NewClientWithParams(mockStorageClient) historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, @@ -460,7 +443,7 @@ func (h *historyArchiverSuite) TestGet_Success_PickHighestVersion() { storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, gomock.Any()).Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_0.history").Return([]byte(exampleNewHistoryRecord), nil) historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, @@ -480,7 +463,7 @@ func (h *historyArchiverSuite) TestGet_Success_PickHighestVersion_OldJSON() { storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, gomock.Any()).Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_0.history").Return([]byte(exampleOldHistoryRecord), nil) historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, @@ -500,7 +483,7 @@ func (h *historyArchiverSuite) TestGet_Success_UseProvidedVersion() { storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470").Return([]string{"905702227796330300141628222723188294514017512010591354159_-24_0.history", "905702227796330300141628222723188294514017512010591354159_-25_0.history"}, nil) storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-25_0.history").Return([]byte(exampleNewHistoryRecord), nil) historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, @@ -523,7 +506,7 @@ func (h *historyArchiverSuite) TestGet_Success_PageSize() { storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_1.history").Return([]byte(exampleNewHistoryRecord), nil) historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, @@ -546,7 +529,7 @@ func (h *historyArchiverSuite) TestGet_Success_FromToken() { storageWrapper.EXPECT().Get(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470_-24_4.history").Return([]byte(exampleNewHistoryRecord), nil) historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) token := &getHistoryToken{ CloseFailoverVersion: -24, @@ -593,7 +576,7 @@ func (h *historyArchiverSuite) TestGet_NoHistory() { storageWrapper.EXPECT().Query(ctx, h.testArchivalURI, "141323698701063509081739672280485489488911532452831150339470").Return([]string{}, nil) historyIterator := archiver.NewMockHistoryIterator(h.controller) - historyArchiver := newHistoryArchiver(h.container, historyIterator, storageWrapper) + historyArchiver := newHistoryArchiver(h.executionManager, h.logger, h.metricsHandler, historyIterator, storageWrapper) request := &archiver.GetHistoryRequest{ NamespaceID: testNamespaceID, WorkflowID: testWorkflowID, diff --git a/common/archiver/gcloud/query_parser.go b/common/archiver/gcloud/query_parser.go index d22c84cbe61..d6625dd317e 100644 --- a/common/archiver/gcloud/query_parser.go +++ b/common/archiver/gcloud/query_parser.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser +//go:generate mockgen -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser package gcloud @@ -32,7 +8,7 @@ import ( "time" "github.com/temporalio/sqlparser" - + "go.temporal.io/server/common/sqlquery" "go.temporal.io/server/common/util" ) @@ -73,19 +49,13 @@ const ( PrecisionSecond = "Second" ) -const ( - queryTemplate = "select * from dummy where %s" - - defaultDateTimeFormat = time.RFC3339 -) - // NewQueryParser creates a new query parser for filestore func NewQueryParser() QueryParser { return &queryParser{} } func (p *queryParser) Parse(query string) (*parsedQuery, error) { - stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) + stmt, err := sqlparser.Parse(fmt.Sprintf(sqlquery.QueryTemplate, query)) if err != nil { return nil, err } @@ -149,7 +119,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, switch colNameStr { case WorkflowID: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -162,7 +132,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.workflowID = util.Ptr(val) case RunID: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -175,7 +145,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.runID = util.Ptr(val) case CloseTime: - closeTime, err := convertToTime(valStr) + closeTime, err := sqlquery.ConvertToTime(valStr) if err != nil { return err } @@ -185,7 +155,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, parsedQuery.closeTime = closeTime case StartTime: - startTime, err := convertToTime(valStr) + startTime, err := sqlquery.ConvertToTime(valStr) if err != nil { return err } @@ -194,7 +164,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.startTime = startTime case WorkflowType: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -207,7 +177,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.workflowType = util.Ptr(val) case SearchPrecision: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -232,22 +202,3 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, return nil } - -func convertToTime(timeStr string) (time.Time, error) { - timestampStr, err := extractStringValue(timeStr) - if err != nil { - return time.Time{}, err - } - parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) - if err != nil { - return time.Time{}, err - } - return parsedTime, nil -} - -func extractStringValue(s string) (string, error) { - if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { - return s[1 : len(s)-1], nil - } - return "", fmt.Errorf("value %s is not a string value", s) -} diff --git a/common/archiver/gcloud/query_parser_mock.go b/common/archiver/gcloud/query_parser_mock.go index ad548d32407..70b766bc6f0 100644 --- a/common/archiver/gcloud/query_parser_mock.go +++ b/common/archiver/gcloud/query_parser_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: query_parser.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package gcloud -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: query_parser.go // Package gcloud is a generated GoMock package. package gcloud @@ -31,13 +12,14 @@ package gcloud import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockQueryParser is a mock of QueryParser interface. type MockQueryParser struct { ctrl *gomock.Controller recorder *MockQueryParserMockRecorder + isgomock struct{} } // MockQueryParserMockRecorder is the mock recorder for MockQueryParser. @@ -67,7 +49,7 @@ func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { } // Parse indicates an expected call of Parse. -func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { +func (mr *MockQueryParserMockRecorder) Parse(query any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) } diff --git a/common/archiver/gcloud/util.go b/common/archiver/gcloud/util.go index 4a6ea2cf48e..a7f94f1ba5c 100644 --- a/common/archiver/gcloud/util.go +++ b/common/archiver/gcloud/util.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package gcloud import ( @@ -35,13 +11,12 @@ import ( "github.com/dgryski/go-farm" commonpb "go.temporal.io/api/common/v1" workflowpb "go.temporal.io/api/workflow/v1" - "google.golang.org/protobuf/proto" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/gcloud/connector" "go.temporal.io/server/common/codec" "go.temporal.io/server/common/searchattribute" + "google.golang.org/protobuf/proto" ) func encode(message proto.Message) ([]byte, error) { @@ -111,7 +86,7 @@ func extractCloseFailoverVersion(filename string) (int64, int, error) { return failoverVersion, highestPart, err } -func serializeToken(token interface{}) ([]byte, error) { +func serializeToken(token any) ([]byte, error) { if token == nil { return nil, nil } @@ -153,18 +128,19 @@ func convertToExecutionInfo(record *archiverspb.VisibilityRecord, saTypeMap sear Type: &commonpb.WorkflowType{ Name: record.WorkflowTypeName, }, - StartTime: record.StartTime, - ExecutionTime: record.ExecutionTime, - CloseTime: record.CloseTime, - Status: record.Status, - HistoryLength: record.HistoryLength, - Memo: record.Memo, - SearchAttributes: searchAttributes, + StartTime: record.StartTime, + ExecutionTime: record.ExecutionTime, + CloseTime: record.CloseTime, + ExecutionDuration: record.ExecutionDuration, + Status: record.Status, + HistoryLength: record.HistoryLength, + Memo: record.Memo, + SearchAttributes: searchAttributes, }, nil } func newRunIDPrecondition(runID string) connector.Precondition { - return func(subject interface{}) bool { + return func(subject any) bool { if runID == "" { return true @@ -176,7 +152,7 @@ func newRunIDPrecondition(runID string) connector.Precondition { } if strings.Contains(fileName, runID) { - fileNameParts := strings.Split(fileName, "_") + fileNameParts := strings.SplitN(fileName, "_", 5) if len(fileNameParts) != 5 { return true } @@ -188,7 +164,7 @@ func newRunIDPrecondition(runID string) connector.Precondition { } func newWorkflowIDPrecondition(workflowID string) connector.Precondition { - return func(subject interface{}) bool { + return func(subject any) bool { if workflowID == "" { return true @@ -200,7 +176,7 @@ func newWorkflowIDPrecondition(workflowID string) connector.Precondition { } if strings.Contains(fileName, workflowID) { - fileNameParts := strings.Split(fileName, "_") + fileNameParts := strings.SplitN(fileName, "_", 5) if len(fileNameParts) != 5 { return true } @@ -212,7 +188,7 @@ func newWorkflowIDPrecondition(workflowID string) connector.Precondition { } func newWorkflowTypeNamePrecondition(workflowTypeName string) connector.Precondition { - return func(subject interface{}) bool { + return func(subject any) bool { if workflowTypeName == "" { return true @@ -224,7 +200,7 @@ func newWorkflowTypeNamePrecondition(workflowTypeName string) connector.Precondi } if strings.Contains(fileName, workflowTypeName) { - fileNameParts := strings.Split(fileName, "_") + fileNameParts := strings.SplitN(fileName, "_", 5) if len(fileNameParts) != 5 { return true } diff --git a/common/archiver/gcloud/util_test.go b/common/archiver/gcloud/util_test.go index aa0806fe36c..26f335d182e 100644 --- a/common/archiver/gcloud/util_test.go +++ b/common/archiver/gcloud/util_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package gcloud import ( @@ -31,10 +7,9 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" historypb "go.temporal.io/api/history/v1" - "google.golang.org/protobuf/types/known/timestamppb" - "go.temporal.io/server/common" "go.temporal.io/server/common/codec" + "google.golang.org/protobuf/types/known/timestamppb" ) func (s *utilSuite) SetupTest() { diff --git a/common/archiver/gcloud/visibility_archiver.go b/common/archiver/gcloud/visibility_archiver.go index 0314a30909a..6db106419e5 100644 --- a/common/archiver/gcloud/visibility_archiver.go +++ b/common/archiver/gcloud/visibility_archiver.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package gcloud import ( @@ -33,11 +9,11 @@ import ( "time" "go.temporal.io/api/serviceerror" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/gcloud/connector" "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/searchattribute" @@ -56,9 +32,10 @@ var ( type ( visibilityArchiver struct { - container *archiver.VisibilityBootstrapContainer - gcloudStorage connector.Client - queryParser QueryParser + logger log.Logger + metricsHandler metrics.Handler + gcloudStorage connector.Client + queryParser QueryParser } queryVisibilityToken struct { @@ -73,18 +50,19 @@ type ( } ) -func newVisibilityArchiver(container *archiver.VisibilityBootstrapContainer, storage connector.Client) *visibilityArchiver { +func newVisibilityArchiver(logger log.Logger, metricsHandler metrics.Handler, storage connector.Client) *visibilityArchiver { return &visibilityArchiver{ - container: container, - gcloudStorage: storage, - queryParser: NewQueryParser(), + logger: logger, + metricsHandler: metricsHandler, + gcloudStorage: storage, + queryParser: NewQueryParser(), } } // NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on filestore -func NewVisibilityArchiver(container *archiver.VisibilityBootstrapContainer, config *config.GstorageArchiver) (archiver.VisibilityArchiver, error) { - storage, err := connector.NewClient(context.Background(), config) - return newVisibilityArchiver(container, storage), err +func NewVisibilityArchiver(logger log.Logger, metricsHandler metrics.Handler, cfg *config.GstorageArchiver) (archiver.VisibilityArchiver, error) { + storage, err := connector.NewClient(context.Background(), cfg) + return newVisibilityArchiver(logger, metricsHandler, storage), err } // Archive is used to archive one workflow visibility record. @@ -93,7 +71,7 @@ func NewVisibilityArchiver(container *archiver.VisibilityBootstrapContainer, con // Please make sure your implementation is lossless. If any in-memory batching mechanism is used, then those batched records will be lost during server restarts. // This method will be invoked when workflow closes. Note that because of conflict resolution, it is possible for a workflow to through the closing process multiple times, which means that this method can be invoked more than once after a workflow closes. func (v *visibilityArchiver) Archive(ctx context.Context, URI archiver.URI, request *archiverspb.VisibilityRecord, opts ...archiver.ArchiveOption) (err error) { - handler := v.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) + handler := v.metricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) featureCatalog := archiver.GetFeatureCatalog(opts...) startTime := time.Now().UTC() defer func() { @@ -110,7 +88,7 @@ func (v *visibilityArchiver) Archive(ctx context.Context, URI archiver.URI, requ } }() - logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) + logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.logger, request, URI.String()) if err := v.ValidateURI(URI); err != nil { if isRetryableError(err) { @@ -236,7 +214,7 @@ func (v *visibilityArchiver) queryAll( pageSize: request.PageSize, nextPageToken: request.NextPageToken, parsedQuery: &parsedQuery{}, - }, saTypeMap, request.NamespaceID) + }, saTypeMap, constructVisibilityFilenamePrefix(request.NamespaceID, indexKeyCloseTimeout)) } func (v *visibilityArchiver) queryPrefix(ctx context.Context, uri archiver.URI, request *queryVisibilityRequest, saTypeMap searchattribute.NameTypeMap, prefix string) (*archiver.QueryVisibilityResponse, error) { diff --git a/common/archiver/gcloud/visibility_archiver_test.go b/common/archiver/gcloud/visibility_archiver_test.go index 387ab79dbdd..3cdbeab59a3 100644 --- a/common/archiver/gcloud/visibility_archiver_test.go +++ b/common/archiver/gcloud/visibility_archiver_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package gcloud import ( @@ -30,13 +6,11 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" - "go.temporal.io/api/workflow/v1" - + workflowpb "go.temporal.io/api/workflow/v1" archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/gcloud/connector" @@ -46,6 +20,7 @@ import ( "go.temporal.io/server/common/searchattribute" "go.temporal.io/server/common/testing/protorequire" "go.temporal.io/server/common/util" + "go.uber.org/mock/gomock" ) const ( @@ -59,10 +34,8 @@ const ( func (s *visibilityArchiverSuite) SetupTest() { s.Assertions = require.New(s.T()) s.controller = gomock.NewController(s.T()) - s.container = &archiver.VisibilityBootstrapContainer{ - Logger: log.NewNoopLogger(), - MetricsHandler: metrics.NoopMetricsHandler, - } + s.logger = log.NewNoopLogger() + s.metricsHandler = metrics.NoopMetricsHandler s.expectedVisibilityRecords = []*archiverspb.VisibilityRecord{ { NamespaceId: testNamespaceID, @@ -91,7 +64,8 @@ type visibilityArchiverSuite struct { protorequire.ProtoAssertions suite.Suite controller *gomock.Controller - container *archiver.VisibilityBootstrapContainer + logger log.Logger + metricsHandler metrics.Handler expectedVisibilityRecords []*archiverspb.VisibilityRecord } @@ -143,7 +117,7 @@ func (s *visibilityArchiverSuite) TestArchive_Fail_InvalidVisibilityURI() { s.NoError(err) storageWrapper := connector.NewMockClient(s.controller) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + visibilityArchiver := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) s.NoError(err) request := &archiverspb.VisibilityRecord{ NamespaceId: testNamespaceID, @@ -162,7 +136,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidVisibilityURI() { s.NoError(err) storageWrapper := connector.NewMockClient(s.controller) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + visibilityArchiver := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) s.NoError(err) request := &archiver.QueryVisibilityRequest{ NamespaceID: testNamespaceID, @@ -170,7 +144,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidVisibilityURI() { Query: "WorkflowType='type::example' AND CloseTime='2020-02-05T11:00:00Z' AND SearchPrecision='Day'", } - _, err = visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) + _, err = visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap()) s.Error(err) } @@ -182,7 +156,7 @@ func (s *visibilityArchiverSuite) TestVisibilityArchive() { storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) storageWrapper.EXPECT().Upload(gomock.Any(), URI, gomock.Any(), gomock.Any()).Return(nil).Times(2) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + visibilityArchiver := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) s.NoError(err) request := &archiverspb.VisibilityRecord{ @@ -208,7 +182,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { s.NoError(err) storageWrapper := connector.NewMockClient(s.controller) storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + visibilityArchiver := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) s.NoError(err) mockParser := NewMockQueryParser(s.controller) @@ -218,7 +192,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { NamespaceID: "some random namespaceID", PageSize: 10, Query: "some invalid query", - }, searchattribute.TestNameTypeMap) + }, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } @@ -228,7 +202,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidToken() { s.NoError(err) storageWrapper := connector.NewMockClient(s.controller) storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + visibilityArchiver := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) s.NoError(err) mockParser := NewMockQueryParser(s.controller) @@ -247,7 +221,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidToken() { PageSize: 1, NextPageToken: []byte{1, 2, 3}, } - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } @@ -261,7 +235,7 @@ func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { storageWrapper.EXPECT().QueryWithFilters(gomock.Any(), URI, gomock.Any(), 10, 0, gomock.Any()).Return([]string{"closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility"}, true, 1, nil) storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + visibilityArchiver := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) s.NoError(err) mockParser := NewMockQueryParser(s.controller) @@ -281,12 +255,12 @@ func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { Query: "parsed by mockParser", } - response, err := visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Nil(response.NextPageToken) s.Len(response.Executions, 1) - ei, err := convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err := convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.ProtoEqual(ei, response.Executions[0]) } @@ -304,7 +278,7 @@ func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:15Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) storageWrapper.EXPECT().Get(gomock.Any(), URI, "test-namespace-id/closeTimeout_2020-02-05T09:56:16Z_test-workflow-id_MobileOnlyWorkflow::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord), nil) - visibilityArchiver := newVisibilityArchiver(s.container, storageWrapper) + visibilityArchiver := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) s.NoError(err) mockParser := NewMockQueryParser(s.controller) @@ -324,25 +298,25 @@ func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { Query: "parsed by mockParser", } - response, err := visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.NotNil(response.NextPageToken) s.Len(response.Executions, 2) - ei, err := convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err := convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.ProtoEqual(ei, response.Executions[0]) - ei, err = convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.ProtoEqual(ei, response.Executions[1]) request.NextPageToken = response.NextPageToken - response, err = visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap) + response, err = visibilityArchiver.Query(ctx, URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Nil(response.NextPageToken) s.Len(response.Executions, 1) - ei, err = convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.expectedVisibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.ProtoEqual(ei, response.Executions[0]) } @@ -352,14 +326,14 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { s.NoError(err) storageWrapper := connector.NewMockClient(s.controller) storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - arc := newVisibilityArchiver(s.container, storageWrapper) + arc := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) req := &archiver.QueryVisibilityRequest{ NamespaceID: "", PageSize: 1, NextPageToken: nil, Query: "", } - _, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) + _, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap()) var svcErr *serviceerror.InvalidArgument @@ -371,7 +345,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { s.NoError(err) storageWrapper := connector.NewMockClient(s.controller) storageWrapper.EXPECT().Exist(gomock.Any(), URI, gomock.Any()).Return(false, nil) - arc := newVisibilityArchiver(s.container, storageWrapper) + arc := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) req := &archiver.QueryVisibilityRequest{ NamespaceID: testNamespaceID, @@ -379,7 +353,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { NextPageToken: nil, Query: "", } - _, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) + _, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap()) var svcErr *serviceerror.InvalidArgument @@ -394,7 +368,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { storageWrapper.EXPECT().QueryWithFilters( gomock.Any(), URI, - gomock.Any(), + constructVisibilityFilenamePrefix(testNamespaceID, indexKeyCloseTimeout), 1, 0, gomock.Any(), @@ -407,7 +381,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { storageWrapper.EXPECT().QueryWithFilters( gomock.Any(), URI, - gomock.Any(), + constructVisibilityFilenamePrefix(testNamespaceID, indexKeyCloseTimeout), 1, 1, gomock.Any(), @@ -427,7 +401,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { "test-namespace-id/closeTimeout_2020-02-05T09:56:14Z_test-workflow-id2_MobileOnlyWorkflow"+ "::processMobileOnly_test-run-id.visibility").Return([]byte(exampleVisibilityRecord2), nil) - arc := newVisibilityArchiver(s.container, storageWrapper) + arc := newVisibilityArchiver(s.logger, s.metricsHandler, storageWrapper) response := &archiver.QueryVisibilityResponse{ Executions: nil, @@ -435,17 +409,17 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { } limit := 10 - executions := make(map[string]*workflow.WorkflowExecutionInfo, limit) + executions := make(map[string]*workflowpb.WorkflowExecutionInfo, limit) numPages := 2 - for i := 0; i < numPages; i++ { + for i := range numPages { req := &archiver.QueryVisibilityRequest{ NamespaceID: testNamespaceID, PageSize: 1, NextPageToken: response.NextPageToken, Query: "", } - response, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap) + response, err = arc.Query(context.Background(), URI, req, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Len(response.Executions, 1) diff --git a/common/archiver/history_iterator.go b/common/archiver/history_iterator.go index 898819d5148..97ea9aee597 100644 --- a/common/archiver/history_iterator.go +++ b/common/archiver/history_iterator.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination history_iterator_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination history_iterator_mock.go package archiver @@ -33,12 +9,11 @@ import ( historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" - "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/persistence" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) const ( @@ -243,14 +218,14 @@ func (i *historyIterator) reset(stateToken []byte) error { type ( // SizeEstimator is used to estimate the size of any object SizeEstimator interface { - EstimateSize(v interface{}) (int, error) + EstimateSize(v any) (int, error) } jsonSizeEstimator struct { } ) -func (e *jsonSizeEstimator) EstimateSize(v interface{}) (int, error) { +func (e *jsonSizeEstimator) EstimateSize(v any) (int, error) { // protojson must be used for proto structs. if protoMessage, ok := v.(proto.Message); ok { bs, err := protojson.Marshal(protoMessage) diff --git a/common/archiver/history_iterator_mock.go b/common/archiver/history_iterator_mock.go index 74c7d68e354..3217fc2104e 100644 --- a/common/archiver/history_iterator_mock.go +++ b/common/archiver/history_iterator_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: history_iterator.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package archiver -source history_iterator.go -destination history_iterator_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: history_iterator.go // Package archiver is a generated GoMock package. package archiver @@ -32,14 +13,15 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" archiver "go.temporal.io/server/api/archiver/v1" + gomock "go.uber.org/mock/gomock" ) // MockHistoryIterator is a mock of HistoryIterator interface. type MockHistoryIterator struct { ctrl *gomock.Controller recorder *MockHistoryIteratorMockRecorder + isgomock struct{} } // MockHistoryIteratorMockRecorder is the mock recorder for MockHistoryIterator. @@ -98,7 +80,7 @@ func (m *MockHistoryIterator) Next(arg0 context.Context) (*archiver.HistoryBlob, } // Next indicates an expected call of Next. -func (mr *MockHistoryIteratorMockRecorder) Next(arg0 interface{}) *gomock.Call { +func (mr *MockHistoryIteratorMockRecorder) Next(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Next", reflect.TypeOf((*MockHistoryIterator)(nil).Next), arg0) } @@ -107,6 +89,7 @@ func (mr *MockHistoryIteratorMockRecorder) Next(arg0 interface{}) *gomock.Call { type MockSizeEstimator struct { ctrl *gomock.Controller recorder *MockSizeEstimatorMockRecorder + isgomock struct{} } // MockSizeEstimatorMockRecorder is the mock recorder for MockSizeEstimator. @@ -127,7 +110,7 @@ func (m *MockSizeEstimator) EXPECT() *MockSizeEstimatorMockRecorder { } // EstimateSize mocks base method. -func (m *MockSizeEstimator) EstimateSize(v interface{}) (int, error) { +func (m *MockSizeEstimator) EstimateSize(v any) (int, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EstimateSize", v) ret0, _ := ret[0].(int) @@ -136,7 +119,7 @@ func (m *MockSizeEstimator) EstimateSize(v interface{}) (int, error) { } // EstimateSize indicates an expected call of EstimateSize. -func (mr *MockSizeEstimatorMockRecorder) EstimateSize(v interface{}) *gomock.Call { +func (mr *MockSizeEstimatorMockRecorder) EstimateSize(v any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EstimateSize", reflect.TypeOf((*MockSizeEstimator)(nil).EstimateSize), v) } diff --git a/common/archiver/history_iterator_test.go b/common/archiver/history_iterator_test.go index c26476790fc..be1e86f1ab6 100644 --- a/common/archiver/history_iterator_test.go +++ b/common/archiver/history_iterator_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package archiver import ( @@ -30,19 +6,18 @@ import ( "testing" "time" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" - "google.golang.org/protobuf/types/known/durationpb" - "google.golang.org/protobuf/types/known/timestamppb" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/persistence" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -81,7 +56,7 @@ type ( testSizeEstimator struct{} ) -func (e *testSizeEstimator) EstimateSize(v interface{}) (int, error) { +func (e *testSizeEstimator) EstimateSize(v any) (int, error) { historyBatch, ok := v.(*historypb.History) if !ok { return -1, errors.New("test size estimator only estimate the size of history batches") @@ -445,11 +420,11 @@ func (s *HistoryIteratorSuite) TestNext_Fail_ReturnErrOnSecondCallToNext() { func (s *HistoryIteratorSuite) TestNext_Success_TenCallsToNext() { var batchInfo []int - for i := 0; i < 100; i++ { + for range 100 { batchInfo = append(batchInfo, []int{1, 2, 3, 4, 4, 3, 2, 1}...) } var pages []page - for i := 0; i < 100; i++ { + for i := range 100 { p := page{ firstbatchIdx: i * 8, numBatches: 8, @@ -465,7 +440,7 @@ func (s *HistoryIteratorSuite) TestNext_Success_TenCallsToNext() { FinishedIteration: false, NextEventID: common.FirstEventID, } - for i := 0; i < 10; i++ { + for i := range 10 { s.assertStateMatches(expectedIteratorState, itr) s.True(itr.HasNext()) blob, err := itr.Next(context.Background()) @@ -666,7 +641,7 @@ func (s *HistoryIteratorSuite) constructHistoryBatches(batchInfo []int, page pag eventsID := firstEventID for batchIdx, numEvents := range batchInfo[page.firstbatchIdx : page.firstbatchIdx+page.numBatches] { var events []*historypb.HistoryEvent - for i := 0; i < numEvents; i++ { + for range numEvents { event := &historypb.HistoryEvent{ EventId: eventsID, Version: page.firstEventFailoverVersion, diff --git a/common/archiver/interface.go b/common/archiver/interface.go index a63df798e13..ada923ef222 100644 --- a/common/archiver/interface.go +++ b/common/archiver/interface.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination interface_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination interface_mock.go package archiver @@ -31,14 +7,8 @@ import ( historypb "go.temporal.io/api/history/v1" workflowpb "go.temporal.io/api/workflow/v1" - - "go.temporal.io/server/common/searchattribute" - archiverspb "go.temporal.io/server/api/archiver/v1" - "go.temporal.io/server/common/cluster" - "go.temporal.io/server/common/log" - "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/persistence" + "go.temporal.io/server/common/searchattribute" ) type ( @@ -70,14 +40,6 @@ type ( NextPageToken []byte } - // HistoryBootstrapContainer contains components needed by all history Archiver implementations - HistoryBootstrapContainer struct { - ExecutionManager persistence.ExecutionManager - Logger log.Logger - MetricsHandler metrics.Handler - ClusterMetadata cluster.Metadata - } - // HistoryArchiver is used to archive history and read archived history HistoryArchiver interface { // Archive is used to archive a Workflow's history. When the context expires the method should stop trying to archive. @@ -96,13 +58,6 @@ type ( ValidateURI(uri URI) error } - // VisibilityBootstrapContainer contains components needed by all visibility Archiver implementations - VisibilityBootstrapContainer struct { - Logger log.Logger - MetricsHandler metrics.Handler - ClusterMetadata cluster.Metadata - } - // QueryVisibilityRequest is the request to query archived visibility records QueryVisibilityRequest struct { NamespaceID string diff --git a/common/archiver/interface_mock.go b/common/archiver/interface_mock.go index 52c6972c78e..a70e885a0d9 100644 --- a/common/archiver/interface_mock.go +++ b/common/archiver/interface_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: interface.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package archiver -source interface.go -destination interface_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: interface.go // Package archiver is a generated GoMock package. package archiver @@ -32,15 +13,16 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" - v1 "go.temporal.io/server/api/archiver/v1" + archiver "go.temporal.io/server/api/archiver/v1" searchattribute "go.temporal.io/server/common/searchattribute" + gomock "go.uber.org/mock/gomock" ) // MockHistoryArchiver is a mock of HistoryArchiver interface. type MockHistoryArchiver struct { ctrl *gomock.Controller recorder *MockHistoryArchiverMockRecorder + isgomock struct{} } // MockHistoryArchiverMockRecorder is the mock recorder for MockHistoryArchiver. @@ -63,7 +45,7 @@ func (m *MockHistoryArchiver) EXPECT() *MockHistoryArchiverMockRecorder { // Archive mocks base method. func (m *MockHistoryArchiver) Archive(ctx context.Context, uri URI, request *ArchiveHistoryRequest, opts ...ArchiveOption) error { m.ctrl.T.Helper() - varargs := []interface{}{ctx, uri, request} + varargs := []any{ctx, uri, request} for _, a := range opts { varargs = append(varargs, a) } @@ -73,9 +55,9 @@ func (m *MockHistoryArchiver) Archive(ctx context.Context, uri URI, request *Arc } // Archive indicates an expected call of Archive. -func (mr *MockHistoryArchiverMockRecorder) Archive(ctx, uri, request interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockHistoryArchiverMockRecorder) Archive(ctx, uri, request any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, uri, request}, opts...) + varargs := append([]any{ctx, uri, request}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Archive", reflect.TypeOf((*MockHistoryArchiver)(nil).Archive), varargs...) } @@ -89,7 +71,7 @@ func (m *MockHistoryArchiver) Get(ctx context.Context, url URI, request *GetHist } // Get indicates an expected call of Get. -func (mr *MockHistoryArchiverMockRecorder) Get(ctx, url, request interface{}) *gomock.Call { +func (mr *MockHistoryArchiverMockRecorder) Get(ctx, url, request any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockHistoryArchiver)(nil).Get), ctx, url, request) } @@ -103,7 +85,7 @@ func (m *MockHistoryArchiver) ValidateURI(uri URI) error { } // ValidateURI indicates an expected call of ValidateURI. -func (mr *MockHistoryArchiverMockRecorder) ValidateURI(uri interface{}) *gomock.Call { +func (mr *MockHistoryArchiverMockRecorder) ValidateURI(uri any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateURI", reflect.TypeOf((*MockHistoryArchiver)(nil).ValidateURI), uri) } @@ -112,6 +94,7 @@ func (mr *MockHistoryArchiverMockRecorder) ValidateURI(uri interface{}) *gomock. type MockVisibilityArchiver struct { ctrl *gomock.Controller recorder *MockVisibilityArchiverMockRecorder + isgomock struct{} } // MockVisibilityArchiverMockRecorder is the mock recorder for MockVisibilityArchiver. @@ -132,9 +115,9 @@ func (m *MockVisibilityArchiver) EXPECT() *MockVisibilityArchiverMockRecorder { } // Archive mocks base method. -func (m *MockVisibilityArchiver) Archive(ctx context.Context, uri URI, request *v1.VisibilityRecord, opts ...ArchiveOption) error { +func (m *MockVisibilityArchiver) Archive(ctx context.Context, uri URI, request *archiver.VisibilityRecord, opts ...ArchiveOption) error { m.ctrl.T.Helper() - varargs := []interface{}{ctx, uri, request} + varargs := []any{ctx, uri, request} for _, a := range opts { varargs = append(varargs, a) } @@ -144,9 +127,9 @@ func (m *MockVisibilityArchiver) Archive(ctx context.Context, uri URI, request * } // Archive indicates an expected call of Archive. -func (mr *MockVisibilityArchiverMockRecorder) Archive(ctx, uri, request interface{}, opts ...interface{}) *gomock.Call { +func (mr *MockVisibilityArchiverMockRecorder) Archive(ctx, uri, request any, opts ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{ctx, uri, request}, opts...) + varargs := append([]any{ctx, uri, request}, opts...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Archive", reflect.TypeOf((*MockVisibilityArchiver)(nil).Archive), varargs...) } @@ -160,7 +143,7 @@ func (m *MockVisibilityArchiver) Query(ctx context.Context, uri URI, request *Qu } // Query indicates an expected call of Query. -func (mr *MockVisibilityArchiverMockRecorder) Query(ctx, uri, request, saTypeMap interface{}) *gomock.Call { +func (mr *MockVisibilityArchiverMockRecorder) Query(ctx, uri, request, saTypeMap any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Query", reflect.TypeOf((*MockVisibilityArchiver)(nil).Query), ctx, uri, request, saTypeMap) } @@ -174,7 +157,7 @@ func (m *MockVisibilityArchiver) ValidateURI(uri URI) error { } // ValidateURI indicates an expected call of ValidateURI. -func (mr *MockVisibilityArchiverMockRecorder) ValidateURI(uri interface{}) *gomock.Call { +func (mr *MockVisibilityArchiverMockRecorder) ValidateURI(uri any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateURI", reflect.TypeOf((*MockVisibilityArchiver)(nil).ValidateURI), uri) } diff --git a/common/archiver/metadata_mock.go b/common/archiver/metadata_mock.go index f1f5dcdb6ca..74c55ab3db0 100644 --- a/common/archiver/metadata_mock.go +++ b/common/archiver/metadata_mock.go @@ -1,31 +1,7 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package archiver import ( - "github.com/golang/mock/gomock" + "go.uber.org/mock/gomock" ) // MetadataMock is an implementation of ArchivalMetadata that can be used for testing. diff --git a/common/archiver/metadata_mock_test.go b/common/archiver/metadata_mock_test.go index e2958a80c68..1043c1350d0 100644 --- a/common/archiver/metadata_mock_test.go +++ b/common/archiver/metadata_mock_test.go @@ -1,34 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package archiver import ( "testing" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" ) func TestMetadataMock(t *testing.T) { diff --git a/common/archiver/options.go b/common/archiver/options.go index f423f448122..f7146236552 100644 --- a/common/archiver/options.go +++ b/common/archiver/options.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package archiver import ( @@ -48,8 +24,8 @@ type ( // ProgressManager is used to record and load archive progress ProgressManager interface { - RecordProgress(ctx context.Context, progress interface{}) error - LoadProgress(ctx context.Context, valuePtr interface{}) error + RecordProgress(ctx context.Context, progress any) error + LoadProgress(ctx context.Context, valuePtr any) error HasProgress(ctx context.Context) bool } ) @@ -74,12 +50,12 @@ func GetHeartbeatArchiveOption() ArchiveOption { type heartbeatProgressManager struct{} -func (h *heartbeatProgressManager) RecordProgress(ctx context.Context, progress interface{}) error { +func (h *heartbeatProgressManager) RecordProgress(ctx context.Context, progress any) error { activity.RecordHeartbeat(ctx, progress) return nil } -func (h *heartbeatProgressManager) LoadProgress(ctx context.Context, valuePtr interface{}) error { +func (h *heartbeatProgressManager) LoadProgress(ctx context.Context, valuePtr any) error { if !h.HasProgress(ctx) { return errors.New("no progress information in the context") } diff --git a/common/archiver/provider/provider.go b/common/archiver/provider/provider.go index fb3c2449623..8b53125a0cf 100644 --- a/common/archiver/provider/provider.go +++ b/common/archiver/provider/provider.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source $GOFILE -destination provider_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination provider_mock.go package provider @@ -30,135 +6,168 @@ import ( "errors" "sync" - "go.temporal.io/server/common/archiver/gcloud" - "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/filestore" + "go.temporal.io/server/common/archiver/gcloud" "go.temporal.io/server/common/archiver/s3store" "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" ) var ( // ErrUnknownScheme is the error for unknown archiver scheme ErrUnknownScheme = errors.New("unknown archiver scheme") - // ErrNotSupported is the error for not supported archiver implementation - ErrNotSupported = errors.New("archiver provider not supported") - // ErrBootstrapContainerNotFound is the error for unable to find the bootstrap container given serviceName - ErrBootstrapContainerNotFound = errors.New("unable to find bootstrap container for the given service name") // ErrArchiverConfigNotFound is the error for unable to find the config for an archiver given scheme ErrArchiverConfigNotFound = errors.New("unable to find archiver config for the given scheme") - // ErrBootstrapContainerAlreadyRegistered is the error for registering multiple containers for the same serviceName - ErrBootstrapContainerAlreadyRegistered = errors.New("bootstrap container has already been registered") ) type ( - // ArchiverProvider returns history or visibility archiver based on the scheme and serviceName. - // The archiver for each combination of scheme and serviceName will be created only once and cached. + // ArchiverProvider returns history or visibility archiver based on the scheme. + // The archiver for each scheme will be created only once and cached. ArchiverProvider interface { - RegisterBootstrapContainer( - serviceName string, - historyContainer *archiver.HistoryBootstrapContainer, - visibilityContainter *archiver.VisibilityBootstrapContainer, - ) error - GetHistoryArchiver(scheme, serviceName string) (archiver.HistoryArchiver, error) - GetVisibilityArchiver(scheme, serviceName string) (archiver.VisibilityArchiver, error) + GetHistoryArchiver(scheme string) (archiver.HistoryArchiver, error) + GetVisibilityArchiver(scheme string) (archiver.VisibilityArchiver, error) + } + + // NewCustomHistoryArchiverParams provides dependencies for constructing a history archiver. + NewCustomHistoryArchiverParams struct { + Scheme string + ExecutionManager persistence.ExecutionManager + Logger log.Logger + MetricsHandler metrics.Handler + Configs map[string]any + } + + // NewCustomVisibilityArchiverParams provides dependencies for constructing a visibility archiver. + NewCustomVisibilityArchiverParams struct { + Scheme string + Logger log.Logger + MetricsHandler metrics.Handler + Configs map[string]any + } + + // CustomHistoryArchiverFactory constructs a history archiver for the given scheme. + // Return ErrUnknownScheme to fall back to the default implementation. + // If a non-nil archiver is returned, it takes precedence over built-in archiver implementations. + CustomHistoryArchiverFactory interface { + NewCustomHistoryArchiver(NewCustomHistoryArchiverParams) (archiver.HistoryArchiver, error) + } + + // CustomVisibilityArchiverFactory constructs a visibility archiver for the given scheme. + // Return ErrUnknownScheme to fall back to the default implementation. + // If a non-nil archiver is returned, it takes precedence over built-in archiver implementations. + CustomVisibilityArchiverFactory interface { + NewCustomVisibilityArchiver(NewCustomVisibilityArchiverParams) (archiver.VisibilityArchiver, error) } + CustomHistoryArchiverFactoryFunc func(NewCustomHistoryArchiverParams) (archiver.HistoryArchiver, error) + + CustomVisibilityArchiverFactoryFunc func(NewCustomVisibilityArchiverParams) (archiver.VisibilityArchiver, error) + archiverProvider struct { sync.RWMutex historyArchiverConfigs *config.HistoryArchiverProvider visibilityArchiverConfigs *config.VisibilityArchiverProvider - // Key for the container is just serviceName - historyContainers map[string]*archiver.HistoryBootstrapContainer - visibilityContainers map[string]*archiver.VisibilityBootstrapContainer + customHistoryArchiverFactory CustomHistoryArchiverFactory + customVisibilityArchiverFactory CustomVisibilityArchiverFactory - // Key for the archiver is scheme + serviceName + executionManager persistence.ExecutionManager + logger log.Logger + metricsHandler metrics.Handler + + // Key for the archiver is scheme historyArchivers map[string]archiver.HistoryArchiver visibilityArchivers map[string]archiver.VisibilityArchiver } ) +func (f CustomHistoryArchiverFactoryFunc) NewCustomHistoryArchiver( + params NewCustomHistoryArchiverParams, +) (archiver.HistoryArchiver, error) { + return f(params) +} + +func (f CustomVisibilityArchiverFactoryFunc) NewCustomVisibilityArchiver( + params NewCustomVisibilityArchiverParams, +) (archiver.VisibilityArchiver, error) { + return f(params) +} + // NewArchiverProvider returns a new Archiver provider func NewArchiverProvider( historyArchiverConfigs *config.HistoryArchiverProvider, visibilityArchiverConfigs *config.VisibilityArchiverProvider, + customHistoryArchiverFactory CustomHistoryArchiverFactory, + customVisibilityArchiverFactory CustomVisibilityArchiverFactory, + executionManager persistence.ExecutionManager, + logger log.Logger, + metricsHandler metrics.Handler, ) ArchiverProvider { return &archiverProvider{ - historyArchiverConfigs: historyArchiverConfigs, - visibilityArchiverConfigs: visibilityArchiverConfigs, - historyContainers: make(map[string]*archiver.HistoryBootstrapContainer), - visibilityContainers: make(map[string]*archiver.VisibilityBootstrapContainer), - historyArchivers: make(map[string]archiver.HistoryArchiver), - visibilityArchivers: make(map[string]archiver.VisibilityArchiver), - } -} - -// RegisterBootstrapContainer stores the given bootstrap container given the serviceName -// The container should be registered when a service starts up and before GetArchiver() is ever called. -// Later calls to GetArchiver() will used the registered container to initialize new archivers. -// If the container for a service has already registered, and this method is invoked for that service again -// with an non-nil container, an error will be returned. -func (p *archiverProvider) RegisterBootstrapContainer( - serviceName string, - historyContainer *archiver.HistoryBootstrapContainer, - visibilityContainter *archiver.VisibilityBootstrapContainer, -) error { - p.Lock() - defer p.Unlock() - - if _, ok := p.historyContainers[serviceName]; ok && historyContainer != nil { - return ErrBootstrapContainerAlreadyRegistered - } - if _, ok := p.visibilityContainers[serviceName]; ok && visibilityContainter != nil { - return ErrBootstrapContainerAlreadyRegistered + historyArchiverConfigs: historyArchiverConfigs, + visibilityArchiverConfigs: visibilityArchiverConfigs, + executionManager: executionManager, + logger: logger, + metricsHandler: metricsHandler, + customHistoryArchiverFactory: customHistoryArchiverFactory, + customVisibilityArchiverFactory: customVisibilityArchiverFactory, + historyArchivers: make(map[string]archiver.HistoryArchiver), + visibilityArchivers: make(map[string]archiver.VisibilityArchiver), } - - if historyContainer != nil { - p.historyContainers[serviceName] = historyContainer - } - if visibilityContainter != nil { - p.visibilityContainers[serviceName] = visibilityContainter - } - return nil } -func (p *archiverProvider) GetHistoryArchiver(scheme, serviceName string) (historyArchiver archiver.HistoryArchiver, err error) { - archiverKey := p.getArchiverKey(scheme, serviceName) +func (p *archiverProvider) GetHistoryArchiver(scheme string) (historyArchiver archiver.HistoryArchiver, err error) { p.RLock() - if historyArchiver, ok := p.historyArchivers[archiverKey]; ok { + if historyArchiver, ok := p.historyArchivers[scheme]; ok { p.RUnlock() return historyArchiver, nil } p.RUnlock() - container, ok := p.historyContainers[serviceName] - if !ok { - return nil, ErrBootstrapContainerNotFound - } - - switch scheme { - case filestore.URIScheme: - if p.historyArchiverConfigs.Filestore == nil { - return nil, ErrArchiverConfigNotFound + if p.customHistoryArchiverFactory != nil { + var customConfigs map[string]any + if p.historyArchiverConfigs != nil { + customConfigs = p.historyArchiverConfigs.CustomStores[scheme] } - historyArchiver, err = filestore.NewHistoryArchiver(container, p.historyArchiverConfigs.Filestore) - - case gcloud.URIScheme: - if p.historyArchiverConfigs.Gstorage == nil { - return nil, ErrArchiverConfigNotFound + historyArchiver, err = p.customHistoryArchiverFactory.NewCustomHistoryArchiver(NewCustomHistoryArchiverParams{ + Scheme: scheme, + ExecutionManager: p.executionManager, + Logger: p.logger, + MetricsHandler: p.metricsHandler, + Configs: customConfigs, + }) + if err != nil && !errors.Is(err, ErrUnknownScheme) { + return nil, err } + } - historyArchiver, err = gcloud.NewHistoryArchiver(container, p.historyArchiverConfigs.Gstorage) - - case s3store.URIScheme: - if p.historyArchiverConfigs.S3store == nil { - return nil, ErrArchiverConfigNotFound + if historyArchiver == nil { + switch scheme { + case filestore.URIScheme: + if p.historyArchiverConfigs.Filestore == nil { + return nil, ErrArchiverConfigNotFound + } + historyArchiver, err = filestore.NewHistoryArchiver(p.executionManager, p.logger, p.metricsHandler, p.historyArchiverConfigs.Filestore) + + case gcloud.URIScheme: + if p.historyArchiverConfigs.Gstorage == nil { + return nil, ErrArchiverConfigNotFound + } + + historyArchiver, err = gcloud.NewHistoryArchiver(p.executionManager, p.logger, p.metricsHandler, p.historyArchiverConfigs.Gstorage) + + case s3store.URIScheme: + if p.historyArchiverConfigs.S3store == nil { + return nil, ErrArchiverConfigNotFound + } + historyArchiver, err = s3store.NewHistoryArchiver(p.executionManager, p.logger, p.metricsHandler, p.historyArchiverConfigs.S3store) + default: + return nil, ErrUnknownScheme } - historyArchiver, err = s3store.NewHistoryArchiver(container, p.historyArchiverConfigs.S3store) - default: - return nil, ErrUnknownScheme } if err != nil { @@ -167,49 +176,61 @@ func (p *archiverProvider) GetHistoryArchiver(scheme, serviceName string) (histo p.Lock() defer p.Unlock() - if existingHistoryArchiver, ok := p.historyArchivers[archiverKey]; ok { + if existingHistoryArchiver, ok := p.historyArchivers[scheme]; ok { return existingHistoryArchiver, nil } - p.historyArchivers[archiverKey] = historyArchiver + p.historyArchivers[scheme] = historyArchiver return historyArchiver, nil } -func (p *archiverProvider) GetVisibilityArchiver(scheme, serviceName string) (archiver.VisibilityArchiver, error) { - archiverKey := p.getArchiverKey(scheme, serviceName) +func (p *archiverProvider) GetVisibilityArchiver(scheme string) (archiver.VisibilityArchiver, error) { p.RLock() - if visibilityArchiver, ok := p.visibilityArchivers[archiverKey]; ok { + if visibilityArchiver, ok := p.visibilityArchivers[scheme]; ok { p.RUnlock() return visibilityArchiver, nil } p.RUnlock() - container, ok := p.visibilityContainers[serviceName] - if !ok { - return nil, ErrBootstrapContainerNotFound - } - var visibilityArchiver archiver.VisibilityArchiver var err error - switch scheme { - case filestore.URIScheme: - if p.visibilityArchiverConfigs.Filestore == nil { - return nil, ErrArchiverConfigNotFound + if p.customVisibilityArchiverFactory != nil { + var customConfigs map[string]any + if p.visibilityArchiverConfigs != nil { + customConfigs = p.visibilityArchiverConfigs.CustomStores[scheme] } - visibilityArchiver, err = filestore.NewVisibilityArchiver(container, p.visibilityArchiverConfigs.Filestore) - case s3store.URIScheme: - if p.visibilityArchiverConfigs.S3store == nil { - return nil, ErrArchiverConfigNotFound + visibilityArchiver, err = p.customVisibilityArchiverFactory.NewCustomVisibilityArchiver(NewCustomVisibilityArchiverParams{ + Scheme: scheme, + Logger: p.logger, + MetricsHandler: p.metricsHandler, + Configs: customConfigs, + }) + if err != nil && !errors.Is(err, ErrUnknownScheme) { + return nil, err } - visibilityArchiver, err = s3store.NewVisibilityArchiver(container, p.visibilityArchiverConfigs.S3store) - case gcloud.URIScheme: - if p.visibilityArchiverConfigs.Gstorage == nil { - return nil, ErrArchiverConfigNotFound - } - visibilityArchiver, err = gcloud.NewVisibilityArchiver(container, p.visibilityArchiverConfigs.Gstorage) + } - default: - return nil, ErrUnknownScheme + if visibilityArchiver == nil { + switch scheme { + case filestore.URIScheme: + if p.visibilityArchiverConfigs.Filestore == nil { + return nil, ErrArchiverConfigNotFound + } + visibilityArchiver, err = filestore.NewVisibilityArchiver(p.logger, p.metricsHandler, p.visibilityArchiverConfigs.Filestore) + case s3store.URIScheme: + if p.visibilityArchiverConfigs.S3store == nil { + return nil, ErrArchiverConfigNotFound + } + visibilityArchiver, err = s3store.NewVisibilityArchiver(p.logger, p.metricsHandler, p.visibilityArchiverConfigs.S3store) + case gcloud.URIScheme: + if p.visibilityArchiverConfigs.Gstorage == nil { + return nil, ErrArchiverConfigNotFound + } + visibilityArchiver, err = gcloud.NewVisibilityArchiver(p.logger, p.metricsHandler, p.visibilityArchiverConfigs.Gstorage) + + default: + return nil, ErrUnknownScheme + } } if err != nil { return nil, err @@ -217,14 +238,10 @@ func (p *archiverProvider) GetVisibilityArchiver(scheme, serviceName string) (ar p.Lock() defer p.Unlock() - if existingVisibilityArchiver, ok := p.visibilityArchivers[archiverKey]; ok { + if existingVisibilityArchiver, ok := p.visibilityArchivers[scheme]; ok { return existingVisibilityArchiver, nil } - p.visibilityArchivers[archiverKey] = visibilityArchiver + p.visibilityArchivers[scheme] = visibilityArchiver return visibilityArchiver, nil } - -func (p *archiverProvider) getArchiverKey(scheme, serviceName string) string { - return scheme + ":" + serviceName -} diff --git a/common/archiver/provider/provider_mock.go b/common/archiver/provider/provider_mock.go index f789e406387..573b90f5a2b 100644 --- a/common/archiver/provider/provider_mock.go +++ b/common/archiver/provider/provider_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: provider.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package provider -source provider.go -destination provider_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: provider.go // Package provider is a generated GoMock package. package provider @@ -31,14 +12,15 @@ package provider import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" archiver "go.temporal.io/server/common/archiver" + gomock "go.uber.org/mock/gomock" ) // MockArchiverProvider is a mock of ArchiverProvider interface. type MockArchiverProvider struct { ctrl *gomock.Controller recorder *MockArchiverProviderMockRecorder + isgomock struct{} } // MockArchiverProviderMockRecorder is the mock recorder for MockArchiverProvider. @@ -59,45 +41,109 @@ func (m *MockArchiverProvider) EXPECT() *MockArchiverProviderMockRecorder { } // GetHistoryArchiver mocks base method. -func (m *MockArchiverProvider) GetHistoryArchiver(scheme, serviceName string) (archiver.HistoryArchiver, error) { +func (m *MockArchiverProvider) GetHistoryArchiver(scheme string) (archiver.HistoryArchiver, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetHistoryArchiver", scheme, serviceName) + ret := m.ctrl.Call(m, "GetHistoryArchiver", scheme) ret0, _ := ret[0].(archiver.HistoryArchiver) ret1, _ := ret[1].(error) return ret0, ret1 } // GetHistoryArchiver indicates an expected call of GetHistoryArchiver. -func (mr *MockArchiverProviderMockRecorder) GetHistoryArchiver(scheme, serviceName interface{}) *gomock.Call { +func (mr *MockArchiverProviderMockRecorder) GetHistoryArchiver(scheme any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryArchiver", reflect.TypeOf((*MockArchiverProvider)(nil).GetHistoryArchiver), scheme, serviceName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHistoryArchiver", reflect.TypeOf((*MockArchiverProvider)(nil).GetHistoryArchiver), scheme) } // GetVisibilityArchiver mocks base method. -func (m *MockArchiverProvider) GetVisibilityArchiver(scheme, serviceName string) (archiver.VisibilityArchiver, error) { +func (m *MockArchiverProvider) GetVisibilityArchiver(scheme string) (archiver.VisibilityArchiver, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetVisibilityArchiver", scheme, serviceName) + ret := m.ctrl.Call(m, "GetVisibilityArchiver", scheme) ret0, _ := ret[0].(archiver.VisibilityArchiver) ret1, _ := ret[1].(error) return ret0, ret1 } // GetVisibilityArchiver indicates an expected call of GetVisibilityArchiver. -func (mr *MockArchiverProviderMockRecorder) GetVisibilityArchiver(scheme, serviceName interface{}) *gomock.Call { +func (mr *MockArchiverProviderMockRecorder) GetVisibilityArchiver(scheme any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVisibilityArchiver", reflect.TypeOf((*MockArchiverProvider)(nil).GetVisibilityArchiver), scheme, serviceName) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVisibilityArchiver", reflect.TypeOf((*MockArchiverProvider)(nil).GetVisibilityArchiver), scheme) +} + +// MockCustomHistoryArchiverFactory is a mock of CustomHistoryArchiverFactory interface. +type MockCustomHistoryArchiverFactory struct { + ctrl *gomock.Controller + recorder *MockCustomHistoryArchiverFactoryMockRecorder + isgomock struct{} +} + +// MockCustomHistoryArchiverFactoryMockRecorder is the mock recorder for MockCustomHistoryArchiverFactory. +type MockCustomHistoryArchiverFactoryMockRecorder struct { + mock *MockCustomHistoryArchiverFactory +} + +// NewMockCustomHistoryArchiverFactory creates a new mock instance. +func NewMockCustomHistoryArchiverFactory(ctrl *gomock.Controller) *MockCustomHistoryArchiverFactory { + mock := &MockCustomHistoryArchiverFactory{ctrl: ctrl} + mock.recorder = &MockCustomHistoryArchiverFactoryMockRecorder{mock} + return mock } -// RegisterBootstrapContainer mocks base method. -func (m *MockArchiverProvider) RegisterBootstrapContainer(serviceName string, historyContainer *archiver.HistoryBootstrapContainer, visibilityContainter *archiver.VisibilityBootstrapContainer) error { +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCustomHistoryArchiverFactory) EXPECT() *MockCustomHistoryArchiverFactoryMockRecorder { + return m.recorder +} + +// NewCustomHistoryArchiver mocks base method. +func (m *MockCustomHistoryArchiverFactory) NewCustomHistoryArchiver(arg0 NewCustomHistoryArchiverParams) (archiver.HistoryArchiver, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterBootstrapContainer", serviceName, historyContainer, visibilityContainter) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "NewCustomHistoryArchiver", arg0) + ret0, _ := ret[0].(archiver.HistoryArchiver) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NewCustomHistoryArchiver indicates an expected call of NewCustomHistoryArchiver. +func (mr *MockCustomHistoryArchiverFactoryMockRecorder) NewCustomHistoryArchiver(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCustomHistoryArchiver", reflect.TypeOf((*MockCustomHistoryArchiverFactory)(nil).NewCustomHistoryArchiver), arg0) +} + +// MockCustomVisibilityArchiverFactory is a mock of CustomVisibilityArchiverFactory interface. +type MockCustomVisibilityArchiverFactory struct { + ctrl *gomock.Controller + recorder *MockCustomVisibilityArchiverFactoryMockRecorder + isgomock struct{} +} + +// MockCustomVisibilityArchiverFactoryMockRecorder is the mock recorder for MockCustomVisibilityArchiverFactory. +type MockCustomVisibilityArchiverFactoryMockRecorder struct { + mock *MockCustomVisibilityArchiverFactory +} + +// NewMockCustomVisibilityArchiverFactory creates a new mock instance. +func NewMockCustomVisibilityArchiverFactory(ctrl *gomock.Controller) *MockCustomVisibilityArchiverFactory { + mock := &MockCustomVisibilityArchiverFactory{ctrl: ctrl} + mock.recorder = &MockCustomVisibilityArchiverFactoryMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCustomVisibilityArchiverFactory) EXPECT() *MockCustomVisibilityArchiverFactoryMockRecorder { + return m.recorder +} + +// NewCustomVisibilityArchiver mocks base method. +func (m *MockCustomVisibilityArchiverFactory) NewCustomVisibilityArchiver(arg0 NewCustomVisibilityArchiverParams) (archiver.VisibilityArchiver, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewCustomVisibilityArchiver", arg0) + ret0, _ := ret[0].(archiver.VisibilityArchiver) + ret1, _ := ret[1].(error) + return ret0, ret1 } -// RegisterBootstrapContainer indicates an expected call of RegisterBootstrapContainer. -func (mr *MockArchiverProviderMockRecorder) RegisterBootstrapContainer(serviceName, historyContainer, visibilityContainter interface{}) *gomock.Call { +// NewCustomVisibilityArchiver indicates an expected call of NewCustomVisibilityArchiver. +func (mr *MockCustomVisibilityArchiverFactoryMockRecorder) NewCustomVisibilityArchiver(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterBootstrapContainer", reflect.TypeOf((*MockArchiverProvider)(nil).RegisterBootstrapContainer), serviceName, historyContainer, visibilityContainter) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewCustomVisibilityArchiver", reflect.TypeOf((*MockCustomVisibilityArchiverFactory)(nil).NewCustomVisibilityArchiver), arg0) } diff --git a/common/archiver/provider/provider_test.go b/common/archiver/provider/provider_test.go new file mode 100644 index 00000000000..6593f72eeae --- /dev/null +++ b/common/archiver/provider/provider_test.go @@ -0,0 +1,618 @@ +package provider + +import ( + "errors" + "sync" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.temporal.io/server/common/archiver" + "go.temporal.io/server/common/archiver/filestore" + "go.temporal.io/server/common/archiver/gcloud" + "go.temporal.io/server/common/archiver/s3store" + "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/persistence" + "go.uber.org/mock/gomock" +) + +type ( + ProviderSuite struct { + *require.Assertions + suite.Suite + + controller *gomock.Controller + mockExecutionManager *persistence.MockExecutionManager + mockHistoryArchiver *archiver.MockHistoryArchiver + mockVisibilityArchiver *archiver.MockVisibilityArchiver + mockCustomHistoryFactory *MockCustomHistoryArchiverFactory + mockCustomVisibilityFactory *MockCustomVisibilityArchiverFactory + + logger log.Logger + metricsHandler metrics.Handler + } +) + +func TestProviderSuite(t *testing.T) { + suite.Run(t, new(ProviderSuite)) +} + +func (s *ProviderSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.controller = gomock.NewController(s.T()) + + s.mockExecutionManager = persistence.NewMockExecutionManager(s.controller) + s.mockHistoryArchiver = archiver.NewMockHistoryArchiver(s.controller) + s.mockVisibilityArchiver = archiver.NewMockVisibilityArchiver(s.controller) + s.mockCustomHistoryFactory = NewMockCustomHistoryArchiverFactory(s.controller) + s.mockCustomVisibilityFactory = NewMockCustomVisibilityArchiverFactory(s.controller) + + s.logger = log.NewNoopLogger() + s.metricsHandler = metrics.NoopMetricsHandler +} + +func (s *ProviderSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *ProviderSuite) TestNewArchiverProvider() { + historyConfig := &config.HistoryArchiverProvider{} + visibilityConfig := &config.VisibilityArchiverProvider{} + + provider := NewArchiverProvider( + historyConfig, + visibilityConfig, + s.mockCustomHistoryFactory, + s.mockCustomVisibilityFactory, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + s.NotNil(provider) + + // Verify internal state + p := provider.(*archiverProvider) + s.Equal(historyConfig, p.historyArchiverConfigs) + s.Equal(visibilityConfig, p.visibilityArchiverConfigs) + s.Equal(s.mockCustomHistoryFactory, p.customHistoryArchiverFactory) + s.Equal(s.mockCustomVisibilityFactory, p.customVisibilityArchiverFactory) + s.Equal(s.mockExecutionManager, p.executionManager) + s.Equal(s.logger, p.logger) + s.Equal(s.metricsHandler, p.metricsHandler) + s.NotNil(p.historyArchivers) + s.NotNil(p.visibilityArchivers) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_CustomFactory_Success() { + scheme := "custom" + + provider := NewArchiverProvider( + &config.HistoryArchiverProvider{ + CustomStores: map[string]map[string]any{ + scheme: {"key": "value"}, + }, + }, + nil, + s.mockCustomHistoryFactory, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + s.mockCustomHistoryFactory.EXPECT(). + NewCustomHistoryArchiver(gomock.Any()). + DoAndReturn(func(params NewCustomHistoryArchiverParams) (archiver.HistoryArchiver, error) { + s.Equal(scheme, params.Scheme) + s.Equal(s.mockExecutionManager, params.ExecutionManager) + s.Equal(s.logger, params.Logger) + s.Equal(s.metricsHandler, params.MetricsHandler) + s.Equal(map[string]any{"key": "value"}, params.Configs) + return s.mockHistoryArchiver, nil + }) + + result, err := provider.GetHistoryArchiver(scheme) + s.NoError(err) + s.Equal(s.mockHistoryArchiver, result) + + // Test caching - should not call factory again + result2, err := provider.GetHistoryArchiver(scheme) + s.NoError(err) + s.Equal(result, result2) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_CustomFactory_ReturnsError() { + scheme := "custom" + expectedErr := errors.New("custom factory error") + + provider := NewArchiverProvider( + nil, + nil, + s.mockCustomHistoryFactory, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + s.mockCustomHistoryFactory.EXPECT(). + NewCustomHistoryArchiver(gomock.Any()). + Return(nil, expectedErr) + + result, err := provider.GetHistoryArchiver(scheme) + s.Error(err) + s.Equal(expectedErr, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_CustomFactory_FallbackToBuiltIn() { + scheme := filestore.URIScheme + + provider := NewArchiverProvider( + &config.HistoryArchiverProvider{ + Filestore: &config.FilestoreArchiver{ + FileMode: "0600", + DirMode: "0700", + }, + }, + nil, + s.mockCustomHistoryFactory, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + // Custom factory returns ErrUnknownScheme, should fallback to built-in + s.mockCustomHistoryFactory.EXPECT(). + NewCustomHistoryArchiver(gomock.Any()). + Return(nil, ErrUnknownScheme) + + result, err := provider.GetHistoryArchiver(scheme) + s.NoError(err) + s.NotNil(result) + + // Verify it's cached + result2, err := provider.GetHistoryArchiver(scheme) + s.NoError(err) + s.Equal(result, result2) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_UnknownScheme() { + scheme := "unknown" + + provider := NewArchiverProvider( + nil, + nil, + nil, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + result, err := provider.GetHistoryArchiver(scheme) + s.Error(err) + s.Equal(ErrUnknownScheme, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_ConfigNotFound_Filestore() { + scheme := filestore.URIScheme + + provider := NewArchiverProvider( + &config.HistoryArchiverProvider{ + Filestore: nil, // Config not set + }, + nil, + nil, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + result, err := provider.GetHistoryArchiver(scheme) + s.Error(err) + s.Equal(ErrArchiverConfigNotFound, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_ConfigNotFound_GCloud() { + scheme := gcloud.URIScheme + + provider := NewArchiverProvider( + &config.HistoryArchiverProvider{ + Gstorage: nil, // Config not set + }, + nil, + nil, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + result, err := provider.GetHistoryArchiver(scheme) + s.Error(err) + s.Equal(ErrArchiverConfigNotFound, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_ConfigNotFound_S3() { + scheme := s3store.URIScheme + + provider := NewArchiverProvider( + &config.HistoryArchiverProvider{ + S3store: nil, // Config not set + }, + nil, + nil, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + result, err := provider.GetHistoryArchiver(scheme) + s.Error(err) + s.Equal(ErrArchiverConfigNotFound, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_ConcurrentAccess() { + scheme := "custom" + + provider := NewArchiverProvider( + nil, + nil, + s.mockCustomHistoryFactory, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + // The factory may be called multiple times before caching occurs, + // but all calls should return the same archiver. + s.mockCustomHistoryFactory.EXPECT(). + NewCustomHistoryArchiver(gomock.Any()). + Return(s.mockHistoryArchiver, nil). + AnyTimes() + + var wg sync.WaitGroup + numGoroutines := 10 + results := make([]archiver.HistoryArchiver, numGoroutines) + archiverErrors := make([]error, numGoroutines) + + for i := range numGoroutines { + wg.Go(func() { + results[i], archiverErrors[i] = provider.GetHistoryArchiver(scheme) + }) + } + + wg.Wait() + + // All should succeed and return a valid archiver + for i := range numGoroutines { + s.NoError(archiverErrors[i]) + s.NotNil(results[i]) + } + + // Verify caching works after concurrent initialization + cachedResult, err := provider.GetHistoryArchiver(scheme) + s.NoError(err) + s.NotNil(cachedResult) +} + +func (s *ProviderSuite) TestGetHistoryArchiver_NilConfigs() { + scheme := "custom" + + provider := NewArchiverProvider( + nil, // nil history config + nil, + s.mockCustomHistoryFactory, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + s.mockCustomHistoryFactory.EXPECT(). + NewCustomHistoryArchiver(gomock.Any()). + DoAndReturn(func(params NewCustomHistoryArchiverParams) (archiver.HistoryArchiver, error) { + s.Nil(params.Configs) // Should be nil when configs are not provided + return s.mockHistoryArchiver, nil + }) + + result, err := provider.GetHistoryArchiver(scheme) + s.NoError(err) + s.Equal(s.mockHistoryArchiver, result) +} + +func (s *ProviderSuite) TestCustomHistoryArchiverFactoryFunc() { + called := false + expectedArchiver := s.mockHistoryArchiver + + factoryFunc := CustomHistoryArchiverFactoryFunc(func(params NewCustomHistoryArchiverParams) (archiver.HistoryArchiver, error) { + called = true + s.Equal("test-scheme", params.Scheme) + return expectedArchiver, nil + }) + + result, err := factoryFunc.NewCustomHistoryArchiver(NewCustomHistoryArchiverParams{ + Scheme: "test-scheme", + }) + + s.NoError(err) + s.True(called) + s.Equal(expectedArchiver, result) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_CustomFactory_Success() { + scheme := "custom" + + provider := NewArchiverProvider( + nil, + &config.VisibilityArchiverProvider{ + CustomStores: map[string]map[string]any{ + scheme: {"key": "value"}, + }, + }, + nil, + s.mockCustomVisibilityFactory, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + s.mockCustomVisibilityFactory.EXPECT(). + NewCustomVisibilityArchiver(gomock.Any()). + DoAndReturn(func(params NewCustomVisibilityArchiverParams) (archiver.VisibilityArchiver, error) { + s.Equal(scheme, params.Scheme) + s.Equal(s.logger, params.Logger) + s.Equal(s.metricsHandler, params.MetricsHandler) + s.Equal(map[string]any{"key": "value"}, params.Configs) + return s.mockVisibilityArchiver, nil + }) + + result, err := provider.GetVisibilityArchiver(scheme) + s.NoError(err) + s.Equal(s.mockVisibilityArchiver, result) + + // Test caching - should not call factory again + result2, err := provider.GetVisibilityArchiver(scheme) + s.NoError(err) + s.Equal(result, result2) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_CustomFactory_ReturnsError() { + scheme := "custom" + expectedErr := errors.New("custom factory error") + + provider := NewArchiverProvider( + nil, + nil, + nil, + s.mockCustomVisibilityFactory, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + s.mockCustomVisibilityFactory.EXPECT(). + NewCustomVisibilityArchiver(gomock.Any()). + Return(nil, expectedErr) + + result, err := provider.GetVisibilityArchiver(scheme) + s.Error(err) + s.Equal(expectedErr, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_CustomFactory_FallbackToBuiltIn() { + scheme := filestore.URIScheme + + provider := NewArchiverProvider( + nil, + &config.VisibilityArchiverProvider{ + Filestore: &config.FilestoreArchiver{ + FileMode: "0600", + DirMode: "0700", + }, + }, + nil, + s.mockCustomVisibilityFactory, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + // Custom factory returns ErrUnknownScheme, should fallback to built-in + s.mockCustomVisibilityFactory.EXPECT(). + NewCustomVisibilityArchiver(gomock.Any()). + Return(nil, ErrUnknownScheme) + + result, err := provider.GetVisibilityArchiver(scheme) + s.NoError(err) + s.NotNil(result) + + // Verify it's cached + result2, err := provider.GetVisibilityArchiver(scheme) + s.NoError(err) + s.Equal(result, result2) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_UnknownScheme() { + scheme := "unknown" + + provider := NewArchiverProvider( + nil, + nil, + nil, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + result, err := provider.GetVisibilityArchiver(scheme) + s.Error(err) + s.Equal(ErrUnknownScheme, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_ConfigNotFound_Filestore() { + scheme := filestore.URIScheme + + provider := NewArchiverProvider( + nil, + &config.VisibilityArchiverProvider{ + Filestore: nil, // Config not set + }, + nil, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + result, err := provider.GetVisibilityArchiver(scheme) + s.Error(err) + s.Equal(ErrArchiverConfigNotFound, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_ConfigNotFound_GCloud() { + scheme := gcloud.URIScheme + + provider := NewArchiverProvider( + nil, + &config.VisibilityArchiverProvider{ + Gstorage: nil, // Config not set + }, + nil, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + result, err := provider.GetVisibilityArchiver(scheme) + s.Error(err) + s.Equal(ErrArchiverConfigNotFound, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_ConfigNotFound_S3() { + scheme := s3store.URIScheme + + provider := NewArchiverProvider( + nil, + &config.VisibilityArchiverProvider{ + S3store: nil, // Config not set + }, + nil, + nil, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + result, err := provider.GetVisibilityArchiver(scheme) + s.Error(err) + s.Equal(ErrArchiverConfigNotFound, err) + s.Nil(result) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_ConcurrentAccess() { + scheme := "custom" + + provider := NewArchiverProvider( + nil, + nil, + nil, + s.mockCustomVisibilityFactory, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + // The factory may be called multiple times before caching occurs, + // but all calls should return the same archiver. + s.mockCustomVisibilityFactory.EXPECT(). + NewCustomVisibilityArchiver(gomock.Any()). + Return(s.mockVisibilityArchiver, nil). + AnyTimes() + + var wg sync.WaitGroup + numGoroutines := 10 + results := make([]archiver.VisibilityArchiver, numGoroutines) + archiverErrors := make([]error, numGoroutines) + + for i := range numGoroutines { + wg.Go(func() { + results[i], archiverErrors[i] = provider.GetVisibilityArchiver(scheme) + }) + } + + wg.Wait() + + // All should succeed and return a valid archiver + for i := range numGoroutines { + s.NoError(archiverErrors[i]) + s.NotNil(results[i]) + } + + // Verify caching works after concurrent initialization + cachedResult, err := provider.GetVisibilityArchiver(scheme) + s.NoError(err) + s.NotNil(cachedResult) +} + +func (s *ProviderSuite) TestGetVisibilityArchiver_NilConfigs() { + scheme := "custom" + + provider := NewArchiverProvider( + nil, + nil, // nil visibility config + nil, + s.mockCustomVisibilityFactory, + s.mockExecutionManager, + s.logger, + s.metricsHandler, + ) + + s.mockCustomVisibilityFactory.EXPECT(). + NewCustomVisibilityArchiver(gomock.Any()). + DoAndReturn(func(params NewCustomVisibilityArchiverParams) (archiver.VisibilityArchiver, error) { + s.Nil(params.Configs) // Should be nil when configs are not provided + return s.mockVisibilityArchiver, nil + }) + + result, err := provider.GetVisibilityArchiver(scheme) + s.NoError(err) + s.Equal(s.mockVisibilityArchiver, result) +} + +func (s *ProviderSuite) TestCustomVisibilityArchiverFactoryFunc() { + called := false + expectedArchiver := s.mockVisibilityArchiver + + factoryFunc := CustomVisibilityArchiverFactoryFunc(func(params NewCustomVisibilityArchiverParams) (archiver.VisibilityArchiver, error) { + called = true + s.Equal("test-scheme", params.Scheme) + return expectedArchiver, nil + }) + + result, err := factoryFunc.NewCustomVisibilityArchiver(NewCustomVisibilityArchiverParams{ + Scheme: "test-scheme", + }) + + s.NoError(err) + s.True(called) + s.Equal(expectedArchiver, result) +} diff --git a/common/archiver/s3store/history_archiver.go b/common/archiver/s3store/history_archiver.go index 25b75e33ebb..de834d6f747 100644 --- a/common/archiver/s3store/history_archiver.go +++ b/common/archiver/s3store/history_archiver.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // S3 History Archiver will archive workflow histories to amazon s3 package s3store @@ -34,14 +10,12 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "go.temporal.io/api/serviceerror" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" @@ -66,12 +40,17 @@ var ( errNoBucketSpecified = errors.New("no bucket specified") errBucketNotExists = errors.New("requested bucket does not exist") errEmptyAwsRegion = errors.New("empty aws region") + + // the retryer is used to check if the error is retryable + awsRetryer aws.Retryer = retry.NewStandard() ) type ( historyArchiver struct { - container *archiver.HistoryBootstrapContainer - s3cli s3iface.S3API + executionManager persistence.ExecutionManager + logger log.Logger + metricsHandler metrics.Handler + s3cli S3API // only set in test code historyIterator archiver.HistoryIterator } @@ -91,44 +70,51 @@ type ( // NewHistoryArchiver creates a new archiver.HistoryArchiver based on s3 func NewHistoryArchiver( - container *archiver.HistoryBootstrapContainer, - config *config.S3Archiver, + executionManager persistence.ExecutionManager, + logger log.Logger, + metricsHandler metrics.Handler, + s3config *config.S3Archiver, ) (archiver.HistoryArchiver, error) { - return newHistoryArchiver(container, config, nil) + return newHistoryArchiver(executionManager, logger, metricsHandler, s3config, nil) } func newHistoryArchiver( - container *archiver.HistoryBootstrapContainer, - config *config.S3Archiver, + executionManager persistence.ExecutionManager, + logger log.Logger, + metricsHandler metrics.Handler, + s3config *config.S3Archiver, historyIterator archiver.HistoryIterator, ) (*historyArchiver, error) { - if len(config.Region) == 0 { + if len(s3config.Region) == 0 { return nil, errEmptyAwsRegion } - s3Config := &aws.Config{ - Endpoint: config.Endpoint, - Region: aws.String(config.Region), - S3ForcePathStyle: aws.Bool(config.S3ForcePathStyle), - LogLevel: (*aws.LogLevelType)(&config.LogLevel), - } - sess, err := session.NewSession(s3Config) + cfg, err := awsconfig.LoadDefaultConfig(context.Background(), + awsconfig.WithRegion(s3config.Region), + awsconfig.WithClientLogMode(aws.ClientLogMode(s3config.LogLevel)), + ) if err != nil { return nil, err } return &historyArchiver{ - container: container, - s3cli: s3.New(sess), + executionManager: executionManager, + logger: logger, + metricsHandler: metricsHandler, + s3cli: s3.NewFromConfig(cfg, func(o *s3.Options) { + o.BaseEndpoint = s3config.Endpoint + o.UsePathStyle = s3config.S3ForcePathStyle + }), historyIterator: historyIterator, }, nil } + func (h *historyArchiver) Archive( ctx context.Context, URI archiver.URI, request *archiver.ArchiveHistoryRequest, opts ...archiver.ArchiveOption, ) (err error) { - handler := h.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) + handler := h.metricsHandler.WithTags(metrics.OperationTag(metrics.HistoryArchiverScope), metrics.NamespaceTag(request.Namespace)) featureCatalog := archiver.GetFeatureCatalog(opts...) startTime := time.Now().UTC() defer func() { @@ -145,7 +131,7 @@ func (h *historyArchiver) Archive( } }() - logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.container.Logger, request, URI.String()) + logger := archiver.TagLoggerWithArchiveHistoryRequestAndURI(h.logger, request, URI.String()) if err := SoftValidateURI(URI); err != nil { logger.Error(archiver.ArchiveNonRetryableErrorMsg, tag.ArchivalArchiveFailReason(archiver.ErrReasonInvalidURI), tag.Error(err)) @@ -160,7 +146,7 @@ func (h *historyArchiver) Archive( var progress uploadProgress historyIterator := h.historyIterator if historyIterator == nil { // will only be set by testing code - historyIterator = loadHistoryIterator(ctx, request, h.container.ExecutionManager, featureCatalog, &progress) + historyIterator = loadHistoryIterator(ctx, request, h.executionManager, featureCatalog, &progress) } for historyIterator.HasNext() { historyBlob, err := historyIterator.Next(ctx) @@ -366,13 +352,13 @@ func (h *historyArchiver) getHighestVersion(ctx context.Context, URI archiver.UR ctx, cancel := ensureContextTimeout(ctx) defer cancel() var prefix = constructHistoryKeyPrefix(URI.Path(), request.NamespaceID, request.WorkflowID, request.RunID) + "/" - results, err := h.s3cli.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{ + results, err := h.s3cli.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ Bucket: aws.String(URI.Hostname()), Prefix: aws.String(prefix), Delimiter: aws.String("/"), }) if err != nil { - if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchBucket { + if _, ok := errors.AsType[*types.NoSuchBucket](err); ok { return nil, serviceerror.NewInvalidArgument(errBucketNotExists.Error()) } return nil, err @@ -396,26 +382,5 @@ func (h *historyArchiver) getHighestVersion(ctx context.Context, URI archiver.UR } func isRetryableError(err error) bool { - if err == nil { - return false - } - if aerr, ok := err.(awserr.Error); ok { - return isStatusCodeRetryable(aerr) || request.IsErrorRetryable(aerr) || request.IsErrorThrottle(aerr) - } - return false -} - -func isStatusCodeRetryable(err error) bool { - if aerr, ok := err.(awserr.Error); ok { - if rerr, ok := err.(awserr.RequestFailure); ok { - if rerr.StatusCode() == 429 { - return true - } - if rerr.StatusCode() >= 500 && rerr.StatusCode() != 501 { - return true - } - } - return isStatusCodeRetryable(aerr.OrigErr()) - } - return false + return awsRetryer.IsErrorRetryable(err) } diff --git a/common/archiver/s3store/history_archiver_test.go b/common/archiver/s3store/history_archiver_test.go index 26956973809..1c1d4854886 100644 --- a/common/archiver/s3store/history_archiver_test.go +++ b/common/archiver/s3store/history_archiver_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package s3store import ( @@ -36,18 +12,15 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/golang/mock/gomock" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" - "google.golang.org/protobuf/types/known/timestamppb" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/archiver" @@ -55,7 +28,9 @@ import ( "go.temporal.io/server/common/codec" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" - "go.temporal.io/server/common/util" + "go.temporal.io/server/common/persistence" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -76,7 +51,9 @@ type historyArchiverSuite struct { *require.Assertions suite.Suite s3cli *mocks.MockS3API - container *archiver.HistoryBootstrapContainer + executionManager persistence.ExecutionManager + logger log.Logger + metricsHandler metrics.Handler testArchivalURI archiver.URI historyBatchesV1 []*archiverspb.HistoryBlob historyBatchesV100 []*archiverspb.HistoryBlob @@ -98,10 +75,8 @@ func (s *historyArchiverSuite) TearDownSuite() { func (s *historyArchiverSuite) SetupTest() { s.Assertions = require.New(s.T()) - s.container = &archiver.HistoryBootstrapContainer{ - Logger: log.NewNoopLogger(), - MetricsHandler: metrics.NoopMetricsHandler, - } + s.logger = log.NewNoopLogger() + s.metricsHandler = metrics.NoopMetricsHandler s.controller = gomock.NewController(s.T()) s.s3cli = mocks.NewMockS3API(s.controller) @@ -112,7 +87,7 @@ func (s *historyArchiverSuite) SetupTest() { func setupFsEmulation(s3cli *mocks.MockS3API) { fs := make(map[string][]byte) - putObjectFn := func(_ aws.Context, input *s3.PutObjectInput, _ ...request.Option) (*s3.PutObjectOutput, error) { + putObjectFn := func(_ context.Context, input *s3.PutObjectInput, _ ...func(*s3.Options)) (*s3.PutObjectOutput, error) { buf := new(bytes.Buffer) if _, err := buf.ReadFrom(input.Body); err != nil { return nil, err @@ -121,9 +96,9 @@ func setupFsEmulation(s3cli *mocks.MockS3API) { return &s3.PutObjectOutput{}, nil } - s3cli.EXPECT().ListObjectsV2WithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(_ context.Context, input *s3.ListObjectsV2Input, opts ...request.Option) (*s3.ListObjectsV2Output, error) { - objects := make([]*s3.Object, 0) + s3cli.EXPECT().ListObjectsV2(gomock.Any(), gomock.Any()).DoAndReturn( + func(_ context.Context, input *s3.ListObjectsV2Input, opts ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) { + objects := make([]types.Object, 0) commonPrefixMap := map[string]bool{} for k := range fs { if strings.HasPrefix(k, *input.Bucket+*input.Prefix) { @@ -131,7 +106,7 @@ func setupFsEmulation(s3cli *mocks.MockS3API) { keyWithoutPrefix := key[len(*input.Prefix):] index := strings.Index(keyWithoutPrefix, "/") if index == -1 || input.Delimiter == nil { - objects = append(objects, &s3.Object{ + objects = append(objects, types.Object{ Key: aws.String(key), }) } else { @@ -139,9 +114,9 @@ func setupFsEmulation(s3cli *mocks.MockS3API) { } } } - commonPrefixes := make([]*s3.CommonPrefix, 0) + commonPrefixes := make([]types.CommonPrefix, 0) for k := range commonPrefixMap { - commonPrefixes = append(commonPrefixes, &s3.CommonPrefix{ + commonPrefixes = append(commonPrefixes, types.CommonPrefix{ Prefix: aws.String(k), }) } @@ -170,7 +145,7 @@ func setupFsEmulation(s3cli *mocks.MockS3API) { var nextContinuationToken *string if len(objects) > start+maxKeys { isTruncated = true - nextContinuationToken = util.Ptr(fmt.Sprintf("%d", start+maxKeys)) + nextContinuationToken = new(fmt.Sprintf("%d", start+maxKeys)) objects = objects[start : start+maxKeys] } else { objects = objects[start:] @@ -186,7 +161,7 @@ func setupFsEmulation(s3cli *mocks.MockS3API) { if len(commonPrefixes) > start+maxKeys { isTruncated = true - nextContinuationToken = util.Ptr(fmt.Sprintf("%d", start+maxKeys)) + nextContinuationToken = new(fmt.Sprintf("%d", start+maxKeys)) commonPrefixes = commonPrefixes[start : start+maxKeys] } else if len(commonPrefixes) > 0 { commonPrefixes = commonPrefixes[start:] @@ -199,23 +174,23 @@ func setupFsEmulation(s3cli *mocks.MockS3API) { NextContinuationToken: nextContinuationToken, }, nil }).AnyTimes() - s3cli.EXPECT().PutObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn(putObjectFn).AnyTimes() + s3cli.EXPECT().PutObject(gomock.Any(), gomock.Any()).DoAndReturn(putObjectFn).AnyTimes() - s3cli.EXPECT().HeadObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx aws.Context, input *s3.HeadObjectInput, options ...request.Option) (*s3.HeadObjectOutput, error) { + s3cli.EXPECT().HeadObject(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, input *s3.HeadObjectInput, options ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { _, ok := fs[*input.Bucket+*input.Key] if !ok { - return nil, awserr.New("NotFound", "", nil) + return nil, &smithy.GenericAPIError{Code: "NotFound", Message: ""} } return &s3.HeadObjectOutput{}, nil }).AnyTimes() - s3cli.EXPECT().GetObjectWithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx aws.Context, input *s3.GetObjectInput, options ...request.Option) (*s3.GetObjectOutput, error) { + s3cli.EXPECT().GetObject(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, input *s3.GetObjectInput, options ...func(*s3.Options)) (*s3.GetObjectOutput, error) { _, ok := fs[*input.Bucket+*input.Key] if !ok { - return nil, awserr.New(s3.ErrCodeNoSuchKey, "", nil) + return nil, &types.NoSuchKey{} } return &s3.GetObjectOutput{ @@ -247,10 +222,10 @@ func (s *historyArchiverSuite) TestValidateURI() { }, } - s.s3cli.EXPECT().HeadBucketWithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx aws.Context, input *s3.HeadBucketInput, options ...request.Option) (*s3.HeadBucketOutput, error) { + s.s3cli.EXPECT().HeadBucket(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, input *s3.HeadBucketInput, options ...func(*s3.Options)) (*s3.HeadBucketOutput, error) { if *input.Bucket != s.testArchivalURI.Hostname() { - return nil, awserr.New("NotFound", "", nil) + return nil, &smithy.GenericAPIError{Code: "NotFound", Message: ""} } return &s3.HeadBucketOutput{}, nil @@ -321,7 +296,14 @@ func (s *historyArchiverSuite) TestArchive_Fail_TimeoutWhenReadingHistory() { historyIterator := archiver.NewMockHistoryIterator(s.controller) gomock.InOrder( historyIterator.EXPECT().HasNext().Return(true), - historyIterator.EXPECT().Next(gomock.Any()).Return(nil, serviceerror.NewResourceExhausted(enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, "")), + historyIterator.EXPECT().Next(gomock.Any()).Return( + nil, + &serviceerror.ResourceExhausted{ + Cause: enumspb.RESOURCE_EXHAUSTED_CAUSE_RPS_LIMIT, + Scope: enumspb.RESOURCE_EXHAUSTED_SCOPE_NAMESPACE, + Message: "", + }, + ), ) historyArchiver := s.newTestHistoryArchiver(historyIterator) @@ -688,12 +670,12 @@ func (s *historyArchiverSuite) TestArchiveAndGet() { } func (s *historyArchiverSuite) newTestHistoryArchiver(historyIterator archiver.HistoryIterator) *historyArchiver { - // config := &config.S3Archiver{} - // archiver, err := newHistoryArchiver(s.container, config, historyIterator) archiver := &historyArchiver{ - container: s.container, - s3cli: s.s3cli, - historyIterator: historyIterator, + executionManager: s.executionManager, + logger: s.logger, + metricsHandler: s.metricsHandler, + s3cli: s.s3cli, + historyIterator: historyIterator, } return archiver } @@ -770,7 +752,7 @@ func (s *historyArchiverSuite) writeHistoryBatchesForGetTest(historyBatches []*a data, err := encoder.Encode(batch) s.Require().NoError(err) key := constructHistoryKey("", testNamespaceID, testWorkflowID, testRunID, version, i) - _, err = s.s3cli.PutObjectWithContext(context.Background(), &s3.PutObjectInput{ + _, err = s.s3cli.PutObject(context.Background(), &s3.PutObjectInput{ Bucket: aws.String(testBucket), Key: aws.String(key), Body: bytes.NewReader(data), @@ -780,7 +762,7 @@ func (s *historyArchiverSuite) writeHistoryBatchesForGetTest(historyBatches []*a } func (s *historyArchiverSuite) assertKeyExists(key string) { - _, err := s.s3cli.GetObjectWithContext(context.Background(), &s3.GetObjectInput{ + _, err := s.s3cli.GetObject(context.Background(), &s3.GetObjectInput{ Bucket: aws.String(testBucket), Key: aws.String(key), }) diff --git a/common/archiver/s3store/mocks/generate.go b/common/archiver/s3store/mocks/generate.go index f0f014b8d48..fb74067f86e 100644 --- a/common/archiver/s3store/mocks/generate.go +++ b/common/archiver/s3store/mocks/generate.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate ./generate.sh +//go:generate mockgen -package "$GOPACKAGE" -destination s3api.go -source ../s3iface.go S3API package mocks diff --git a/common/archiver/s3store/mocks/generate.sh b/common/archiver/s3store/mocks/generate.sh deleted file mode 100755 index bafe10c2218..00000000000 --- a/common/archiver/s3store/mocks/generate.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -set -e - -aws_sdk=$(go list -f '{{.Dir}}' github.com/aws/aws-sdk-go) -if [ -z "$aws_sdk" ]; then - echo "Can't locate aws-sdk-go source code" - exit 1 -fi - -mockgen -copyright_file ../../../../LICENSE -package "$GOPACKAGE" -source "${aws_sdk}/service/s3/s3iface/interface.go" | grep -v "^// Source: .*" > s3api.go diff --git a/common/archiver/s3store/mocks/s3api.go b/common/archiver/s3store/mocks/s3api.go index 3bc9a7605fa..9870fc4c65d 100644 --- a/common/archiver/s3store/mocks/s3api.go +++ b/common/archiver/s3store/mocks/s3api.go @@ -1,45 +1,27 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: ../s3iface.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package mocks -destination s3api.go -source ../s3iface.go S3API // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. // Package mocks is a generated GoMock package. package mocks import ( + context "context" reflect "reflect" - aws "github.com/aws/aws-sdk-go/aws" - request "github.com/aws/aws-sdk-go/aws/request" - s3 "github.com/aws/aws-sdk-go/service/s3" - gomock "github.com/golang/mock/gomock" + s3 "github.com/aws/aws-sdk-go-v2/service/s3" + gomock "go.uber.org/mock/gomock" ) // MockS3API is a mock of S3API interface. type MockS3API struct { ctrl *gomock.Controller recorder *MockS3APIMockRecorder + isgomock struct{} } // MockS3APIMockRecorder is the mock recorder for MockS3API. @@ -59,5149 +41,102 @@ func (m *MockS3API) EXPECT() *MockS3APIMockRecorder { return m.recorder } -// AbortMultipartUpload mocks base method. -func (m *MockS3API) AbortMultipartUpload(arg0 *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AbortMultipartUpload", arg0) - ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AbortMultipartUpload indicates an expected call of AbortMultipartUpload. -func (mr *MockS3APIMockRecorder) AbortMultipartUpload(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUpload", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUpload), arg0) -} - -// AbortMultipartUploadRequest mocks base method. -func (m *MockS3API) AbortMultipartUploadRequest(arg0 *s3.AbortMultipartUploadInput) (*request.Request, *s3.AbortMultipartUploadOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AbortMultipartUploadRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.AbortMultipartUploadOutput) - return ret0, ret1 -} - -// AbortMultipartUploadRequest indicates an expected call of AbortMultipartUploadRequest. -func (mr *MockS3APIMockRecorder) AbortMultipartUploadRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadRequest), arg0) -} - -// AbortMultipartUploadWithContext mocks base method. -func (m *MockS3API) AbortMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.AbortMultipartUploadInput, arg2 ...request.Option) (*s3.AbortMultipartUploadOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "AbortMultipartUploadWithContext", varargs...) - ret0, _ := ret[0].(*s3.AbortMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// AbortMultipartUploadWithContext indicates an expected call of AbortMultipartUploadWithContext. -func (mr *MockS3APIMockRecorder) AbortMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AbortMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).AbortMultipartUploadWithContext), varargs...) -} - -// CompleteMultipartUpload mocks base method. -func (m *MockS3API) CompleteMultipartUpload(arg0 *s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompleteMultipartUpload", arg0) - ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CompleteMultipartUpload indicates an expected call of CompleteMultipartUpload. -func (mr *MockS3APIMockRecorder) CompleteMultipartUpload(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUpload", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUpload), arg0) -} - -// CompleteMultipartUploadRequest mocks base method. -func (m *MockS3API) CompleteMultipartUploadRequest(arg0 *s3.CompleteMultipartUploadInput) (*request.Request, *s3.CompleteMultipartUploadOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CompleteMultipartUploadRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.CompleteMultipartUploadOutput) - return ret0, ret1 -} - -// CompleteMultipartUploadRequest indicates an expected call of CompleteMultipartUploadRequest. -func (mr *MockS3APIMockRecorder) CompleteMultipartUploadRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadRequest), arg0) -} - -// CompleteMultipartUploadWithContext mocks base method. -func (m *MockS3API) CompleteMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.CompleteMultipartUploadInput, arg2 ...request.Option) (*s3.CompleteMultipartUploadOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CompleteMultipartUploadWithContext", varargs...) - ret0, _ := ret[0].(*s3.CompleteMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CompleteMultipartUploadWithContext indicates an expected call of CompleteMultipartUploadWithContext. -func (mr *MockS3APIMockRecorder) CompleteMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CompleteMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CompleteMultipartUploadWithContext), varargs...) -} - -// CopyObject mocks base method. -func (m *MockS3API) CopyObject(arg0 *s3.CopyObjectInput) (*s3.CopyObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CopyObject", arg0) - ret0, _ := ret[0].(*s3.CopyObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CopyObject indicates an expected call of CopyObject. -func (mr *MockS3APIMockRecorder) CopyObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObject", reflect.TypeOf((*MockS3API)(nil).CopyObject), arg0) -} - -// CopyObjectRequest mocks base method. -func (m *MockS3API) CopyObjectRequest(arg0 *s3.CopyObjectInput) (*request.Request, *s3.CopyObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CopyObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.CopyObjectOutput) - return ret0, ret1 -} - -// CopyObjectRequest indicates an expected call of CopyObjectRequest. -func (mr *MockS3APIMockRecorder) CopyObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObjectRequest", reflect.TypeOf((*MockS3API)(nil).CopyObjectRequest), arg0) -} - -// CopyObjectWithContext mocks base method. -func (m *MockS3API) CopyObjectWithContext(arg0 aws.Context, arg1 *s3.CopyObjectInput, arg2 ...request.Option) (*s3.CopyObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CopyObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.CopyObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CopyObjectWithContext indicates an expected call of CopyObjectWithContext. -func (mr *MockS3APIMockRecorder) CopyObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyObjectWithContext", reflect.TypeOf((*MockS3API)(nil).CopyObjectWithContext), varargs...) -} - -// CreateBucket mocks base method. -func (m *MockS3API) CreateBucket(arg0 *s3.CreateBucketInput) (*s3.CreateBucketOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateBucket", arg0) - ret0, _ := ret[0].(*s3.CreateBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateBucket indicates an expected call of CreateBucket. -func (mr *MockS3APIMockRecorder) CreateBucket(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucket", reflect.TypeOf((*MockS3API)(nil).CreateBucket), arg0) -} - -// CreateBucketRequest mocks base method. -func (m *MockS3API) CreateBucketRequest(arg0 *s3.CreateBucketInput) (*request.Request, *s3.CreateBucketOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateBucketRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.CreateBucketOutput) - return ret0, ret1 -} - -// CreateBucketRequest indicates an expected call of CreateBucketRequest. -func (mr *MockS3APIMockRecorder) CreateBucketRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucketRequest", reflect.TypeOf((*MockS3API)(nil).CreateBucketRequest), arg0) -} - -// CreateBucketWithContext mocks base method. -func (m *MockS3API) CreateBucketWithContext(arg0 aws.Context, arg1 *s3.CreateBucketInput, arg2 ...request.Option) (*s3.CreateBucketOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CreateBucketWithContext", varargs...) - ret0, _ := ret[0].(*s3.CreateBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateBucketWithContext indicates an expected call of CreateBucketWithContext. -func (mr *MockS3APIMockRecorder) CreateBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBucketWithContext", reflect.TypeOf((*MockS3API)(nil).CreateBucketWithContext), varargs...) -} - -// CreateMultipartUpload mocks base method. -func (m *MockS3API) CreateMultipartUpload(arg0 *s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateMultipartUpload", arg0) - ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateMultipartUpload indicates an expected call of CreateMultipartUpload. -func (mr *MockS3APIMockRecorder) CreateMultipartUpload(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUpload", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUpload), arg0) -} - -// CreateMultipartUploadRequest mocks base method. -func (m *MockS3API) CreateMultipartUploadRequest(arg0 *s3.CreateMultipartUploadInput) (*request.Request, *s3.CreateMultipartUploadOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateMultipartUploadRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.CreateMultipartUploadOutput) - return ret0, ret1 -} - -// CreateMultipartUploadRequest indicates an expected call of CreateMultipartUploadRequest. -func (mr *MockS3APIMockRecorder) CreateMultipartUploadRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadRequest", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadRequest), arg0) -} - -// CreateMultipartUploadWithContext mocks base method. -func (m *MockS3API) CreateMultipartUploadWithContext(arg0 aws.Context, arg1 *s3.CreateMultipartUploadInput, arg2 ...request.Option) (*s3.CreateMultipartUploadOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "CreateMultipartUploadWithContext", varargs...) - ret0, _ := ret[0].(*s3.CreateMultipartUploadOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CreateMultipartUploadWithContext indicates an expected call of CreateMultipartUploadWithContext. -func (mr *MockS3APIMockRecorder) CreateMultipartUploadWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateMultipartUploadWithContext", reflect.TypeOf((*MockS3API)(nil).CreateMultipartUploadWithContext), varargs...) -} - -// DeleteBucket mocks base method. -func (m *MockS3API) DeleteBucket(arg0 *s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucket", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucket indicates an expected call of DeleteBucket. -func (mr *MockS3APIMockRecorder) DeleteBucket(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucket", reflect.TypeOf((*MockS3API)(nil).DeleteBucket), arg0) -} - -// DeleteBucketAnalyticsConfiguration mocks base method. -func (m *MockS3API) DeleteBucketAnalyticsConfiguration(arg0 *s3.DeleteBucketAnalyticsConfigurationInput) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfiguration", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketAnalyticsConfiguration indicates an expected call of DeleteBucketAnalyticsConfiguration. -func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfiguration), arg0) -} - -// DeleteBucketAnalyticsConfigurationRequest mocks base method. -func (m *MockS3API) DeleteBucketAnalyticsConfigurationRequest(arg0 *s3.DeleteBucketAnalyticsConfigurationInput) (*request.Request, *s3.DeleteBucketAnalyticsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketAnalyticsConfigurationOutput) - return ret0, ret1 -} - -// DeleteBucketAnalyticsConfigurationRequest indicates an expected call of DeleteBucketAnalyticsConfigurationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfigurationRequest), arg0) -} - -// DeleteBucketAnalyticsConfigurationWithContext mocks base method. -func (m *MockS3API) DeleteBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketAnalyticsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketAnalyticsConfigurationWithContext indicates an expected call of DeleteBucketAnalyticsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketAnalyticsConfigurationWithContext), varargs...) -} - -// DeleteBucketCors mocks base method. -func (m *MockS3API) DeleteBucketCors(arg0 *s3.DeleteBucketCorsInput) (*s3.DeleteBucketCorsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketCors", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketCors indicates an expected call of DeleteBucketCors. -func (mr *MockS3APIMockRecorder) DeleteBucketCors(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCors", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCors), arg0) -} - -// DeleteBucketCorsRequest mocks base method. -func (m *MockS3API) DeleteBucketCorsRequest(arg0 *s3.DeleteBucketCorsInput) (*request.Request, *s3.DeleteBucketCorsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketCorsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketCorsOutput) - return ret0, ret1 -} - -// DeleteBucketCorsRequest indicates an expected call of DeleteBucketCorsRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketCorsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCorsRequest), arg0) -} - -// DeleteBucketCorsWithContext mocks base method. -func (m *MockS3API) DeleteBucketCorsWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketCorsInput, arg2 ...request.Option) (*s3.DeleteBucketCorsOutput, error) { +// GetObject mocks base method. +func (m *MockS3API) GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) { m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { + varargs := []any{ctx, params} + for _, a := range optFns { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "DeleteBucketCorsWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketCorsWithContext indicates an expected call of DeleteBucketCorsWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketCorsWithContext), varargs...) -} - -// DeleteBucketEncryption mocks base method. -func (m *MockS3API) DeleteBucketEncryption(arg0 *s3.DeleteBucketEncryptionInput) (*s3.DeleteBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketEncryption", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketEncryptionOutput) + ret := m.ctrl.Call(m, "GetObject", varargs...) + ret0, _ := ret[0].(*s3.GetObjectOutput) ret1, _ := ret[1].(error) return ret0, ret1 } -// DeleteBucketEncryption indicates an expected call of DeleteBucketEncryption. -func (mr *MockS3APIMockRecorder) DeleteBucketEncryption(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryption", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryption), arg0) -} - -// DeleteBucketEncryptionRequest mocks base method. -func (m *MockS3API) DeleteBucketEncryptionRequest(arg0 *s3.DeleteBucketEncryptionInput) (*request.Request, *s3.DeleteBucketEncryptionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketEncryptionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketEncryptionOutput) - return ret0, ret1 -} - -// DeleteBucketEncryptionRequest indicates an expected call of DeleteBucketEncryptionRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketEncryptionRequest(arg0 interface{}) *gomock.Call { +// GetObject indicates an expected call of GetObject. +func (mr *MockS3APIMockRecorder) GetObject(ctx, params any, optFns ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryptionRequest), arg0) + varargs := append([]any{ctx, params}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockS3API)(nil).GetObject), varargs...) } -// DeleteBucketEncryptionWithContext mocks base method. -func (m *MockS3API) DeleteBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketEncryptionInput, arg2 ...request.Option) (*s3.DeleteBucketEncryptionOutput, error) { +// HeadBucket mocks base method. +func (m *MockS3API) HeadBucket(ctx context.Context, params *s3.HeadBucketInput, optFns ...func(*s3.Options)) (*s3.HeadBucketOutput, error) { m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { + varargs := []any{ctx, params} + for _, a := range optFns { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "DeleteBucketEncryptionWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketEncryptionWithContext indicates an expected call of DeleteBucketEncryptionWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketEncryptionWithContext), varargs...) -} - -// DeleteBucketIntelligentTieringConfiguration mocks base method. -func (m *MockS3API) DeleteBucketIntelligentTieringConfiguration(arg0 *s3.DeleteBucketIntelligentTieringConfigurationInput) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfiguration", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) + ret := m.ctrl.Call(m, "HeadBucket", varargs...) + ret0, _ := ret[0].(*s3.HeadBucketOutput) ret1, _ := ret[1].(error) return ret0, ret1 } -// DeleteBucketIntelligentTieringConfiguration indicates an expected call of DeleteBucketIntelligentTieringConfiguration. -func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfiguration), arg0) -} - -// DeleteBucketIntelligentTieringConfigurationRequest mocks base method. -func (m *MockS3API) DeleteBucketIntelligentTieringConfigurationRequest(arg0 *s3.DeleteBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.DeleteBucketIntelligentTieringConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) - return ret0, ret1 -} - -// DeleteBucketIntelligentTieringConfigurationRequest indicates an expected call of DeleteBucketIntelligentTieringConfigurationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { +// HeadBucket indicates an expected call of HeadBucket. +func (mr *MockS3APIMockRecorder) HeadBucket(ctx, params any, optFns ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfigurationRequest), arg0) + varargs := append([]any{ctx, params}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucket", reflect.TypeOf((*MockS3API)(nil).HeadBucket), varargs...) } -// DeleteBucketIntelligentTieringConfigurationWithContext mocks base method. -func (m *MockS3API) DeleteBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketIntelligentTieringConfigurationOutput, error) { +// HeadObject mocks base method. +func (m *MockS3API) HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { + varargs := []any{ctx, params} + for _, a := range optFns { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "DeleteBucketIntelligentTieringConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketIntelligentTieringConfigurationWithContext indicates an expected call of DeleteBucketIntelligentTieringConfigurationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketIntelligentTieringConfigurationWithContext), varargs...) -} - -// DeleteBucketInventoryConfiguration mocks base method. -func (m *MockS3API) DeleteBucketInventoryConfiguration(arg0 *s3.DeleteBucketInventoryConfigurationInput) (*s3.DeleteBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketInventoryConfiguration", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketInventoryConfigurationOutput) + ret := m.ctrl.Call(m, "HeadObject", varargs...) + ret0, _ := ret[0].(*s3.HeadObjectOutput) ret1, _ := ret[1].(error) return ret0, ret1 } -// DeleteBucketInventoryConfiguration indicates an expected call of DeleteBucketInventoryConfiguration. -func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfiguration), arg0) -} - -// DeleteBucketInventoryConfigurationRequest mocks base method. -func (m *MockS3API) DeleteBucketInventoryConfigurationRequest(arg0 *s3.DeleteBucketInventoryConfigurationInput) (*request.Request, *s3.DeleteBucketInventoryConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketInventoryConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketInventoryConfigurationOutput) - return ret0, ret1 -} - -// DeleteBucketInventoryConfigurationRequest indicates an expected call of DeleteBucketInventoryConfigurationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { +// HeadObject indicates an expected call of HeadObject. +func (mr *MockS3APIMockRecorder) HeadObject(ctx, params any, optFns ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfigurationRequest), arg0) + varargs := append([]any{ctx, params}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockS3API)(nil).HeadObject), varargs...) } -// DeleteBucketInventoryConfigurationWithContext mocks base method. -func (m *MockS3API) DeleteBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketInventoryConfigurationOutput, error) { +// ListObjectsV2 mocks base method. +func (m *MockS3API) ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) { m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { + varargs := []any{ctx, params} + for _, a := range optFns { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "DeleteBucketInventoryConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketInventoryConfigurationWithContext indicates an expected call of DeleteBucketInventoryConfigurationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketInventoryConfigurationWithContext), varargs...) -} - -// DeleteBucketLifecycle mocks base method. -func (m *MockS3API) DeleteBucketLifecycle(arg0 *s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketLifecycle", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketLifecycleOutput) + ret := m.ctrl.Call(m, "ListObjectsV2", varargs...) + ret0, _ := ret[0].(*s3.ListObjectsV2Output) ret1, _ := ret[1].(error) return ret0, ret1 } -// DeleteBucketLifecycle indicates an expected call of DeleteBucketLifecycle. -func (mr *MockS3APIMockRecorder) DeleteBucketLifecycle(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycle), arg0) -} - -// DeleteBucketLifecycleRequest mocks base method. -func (m *MockS3API) DeleteBucketLifecycleRequest(arg0 *s3.DeleteBucketLifecycleInput) (*request.Request, *s3.DeleteBucketLifecycleOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketLifecycleRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketLifecycleOutput) - return ret0, ret1 -} - -// DeleteBucketLifecycleRequest indicates an expected call of DeleteBucketLifecycleRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketLifecycleRequest(arg0 interface{}) *gomock.Call { +// ListObjectsV2 indicates an expected call of ListObjectsV2. +func (mr *MockS3APIMockRecorder) ListObjectsV2(ctx, params any, optFns ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycleRequest), arg0) + varargs := append([]any{ctx, params}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2), varargs...) } -// DeleteBucketLifecycleWithContext mocks base method. -func (m *MockS3API) DeleteBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketLifecycleInput, arg2 ...request.Option) (*s3.DeleteBucketLifecycleOutput, error) { +// PutObject mocks base method. +func (m *MockS3API) PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) { m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { + varargs := []any{ctx, params} + for _, a := range optFns { varargs = append(varargs, a) } - ret := m.ctrl.Call(m, "DeleteBucketLifecycleWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketLifecycleOutput) + ret := m.ctrl.Call(m, "PutObject", varargs...) + ret0, _ := ret[0].(*s3.PutObjectOutput) ret1, _ := ret[1].(error) return ret0, ret1 } -// DeleteBucketLifecycleWithContext indicates an expected call of DeleteBucketLifecycleWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketLifecycleWithContext), varargs...) -} - -// DeleteBucketMetricsConfiguration mocks base method. -func (m *MockS3API) DeleteBucketMetricsConfiguration(arg0 *s3.DeleteBucketMetricsConfigurationInput) (*s3.DeleteBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketMetricsConfiguration", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketMetricsConfiguration indicates an expected call of DeleteBucketMetricsConfiguration. -func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfiguration), arg0) -} - -// DeleteBucketMetricsConfigurationRequest mocks base method. -func (m *MockS3API) DeleteBucketMetricsConfigurationRequest(arg0 *s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketMetricsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketMetricsConfigurationOutput) - return ret0, ret1 -} - -// DeleteBucketMetricsConfigurationRequest indicates an expected call of DeleteBucketMetricsConfigurationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfigurationRequest), arg0) -} - -// DeleteBucketMetricsConfigurationWithContext mocks base method. -func (m *MockS3API) DeleteBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketMetricsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketMetricsConfigurationWithContext indicates an expected call of DeleteBucketMetricsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketMetricsConfigurationWithContext), varargs...) -} - -// DeleteBucketOwnershipControls mocks base method. -func (m *MockS3API) DeleteBucketOwnershipControls(arg0 *s3.DeleteBucketOwnershipControlsInput) (*s3.DeleteBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketOwnershipControls", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketOwnershipControls indicates an expected call of DeleteBucketOwnershipControls. -func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControls(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControls), arg0) -} - -// DeleteBucketOwnershipControlsRequest mocks base method. -func (m *MockS3API) DeleteBucketOwnershipControlsRequest(arg0 *s3.DeleteBucketOwnershipControlsInput) (*request.Request, *s3.DeleteBucketOwnershipControlsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketOwnershipControlsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketOwnershipControlsOutput) - return ret0, ret1 -} - -// DeleteBucketOwnershipControlsRequest indicates an expected call of DeleteBucketOwnershipControlsRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControlsRequest), arg0) -} - -// DeleteBucketOwnershipControlsWithContext mocks base method. -func (m *MockS3API) DeleteBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.DeleteBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketOwnershipControlsWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketOwnershipControlsWithContext indicates an expected call of DeleteBucketOwnershipControlsWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketOwnershipControlsWithContext), varargs...) -} - -// DeleteBucketPolicy mocks base method. -func (m *MockS3API) DeleteBucketPolicy(arg0 *s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketPolicy", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketPolicy indicates an expected call of DeleteBucketPolicy. -func (mr *MockS3APIMockRecorder) DeleteBucketPolicy(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicy", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicy), arg0) -} - -// DeleteBucketPolicyRequest mocks base method. -func (m *MockS3API) DeleteBucketPolicyRequest(arg0 *s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketPolicyRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketPolicyOutput) - return ret0, ret1 -} - -// DeleteBucketPolicyRequest indicates an expected call of DeleteBucketPolicyRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketPolicyRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicyRequest), arg0) -} - -// DeleteBucketPolicyWithContext mocks base method. -func (m *MockS3API) DeleteBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketPolicyInput, arg2 ...request.Option) (*s3.DeleteBucketPolicyOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketPolicyWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketPolicyWithContext indicates an expected call of DeleteBucketPolicyWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketPolicyWithContext), varargs...) -} - -// DeleteBucketReplication mocks base method. -func (m *MockS3API) DeleteBucketReplication(arg0 *s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketReplication", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketReplication indicates an expected call of DeleteBucketReplication. -func (mr *MockS3APIMockRecorder) DeleteBucketReplication(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplication", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplication), arg0) -} - -// DeleteBucketReplicationRequest mocks base method. -func (m *MockS3API) DeleteBucketReplicationRequest(arg0 *s3.DeleteBucketReplicationInput) (*request.Request, *s3.DeleteBucketReplicationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketReplicationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketReplicationOutput) - return ret0, ret1 -} - -// DeleteBucketReplicationRequest indicates an expected call of DeleteBucketReplicationRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketReplicationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplicationRequest), arg0) -} - -// DeleteBucketReplicationWithContext mocks base method. -func (m *MockS3API) DeleteBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketReplicationInput, arg2 ...request.Option) (*s3.DeleteBucketReplicationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketReplicationWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketReplicationWithContext indicates an expected call of DeleteBucketReplicationWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketReplicationWithContext), varargs...) -} - -// DeleteBucketRequest mocks base method. -func (m *MockS3API) DeleteBucketRequest(arg0 *s3.DeleteBucketInput) (*request.Request, *s3.DeleteBucketOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketOutput) - return ret0, ret1 -} - -// DeleteBucketRequest indicates an expected call of DeleteBucketRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketRequest), arg0) -} - -// DeleteBucketTagging mocks base method. -func (m *MockS3API) DeleteBucketTagging(arg0 *s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketTagging", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketTagging indicates an expected call of DeleteBucketTagging. -func (mr *MockS3APIMockRecorder) DeleteBucketTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTagging", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTagging), arg0) -} - -// DeleteBucketTaggingRequest mocks base method. -func (m *MockS3API) DeleteBucketTaggingRequest(arg0 *s3.DeleteBucketTaggingInput) (*request.Request, *s3.DeleteBucketTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketTaggingOutput) - return ret0, ret1 -} - -// DeleteBucketTaggingRequest indicates an expected call of DeleteBucketTaggingRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTaggingRequest), arg0) -} - -// DeleteBucketTaggingWithContext mocks base method. -func (m *MockS3API) DeleteBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketTaggingInput, arg2 ...request.Option) (*s3.DeleteBucketTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketTaggingWithContext indicates an expected call of DeleteBucketTaggingWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketTaggingWithContext), varargs...) -} - -// DeleteBucketWebsite mocks base method. -func (m *MockS3API) DeleteBucketWebsite(arg0 *s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketWebsite", arg0) - ret0, _ := ret[0].(*s3.DeleteBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketWebsite indicates an expected call of DeleteBucketWebsite. -func (mr *MockS3APIMockRecorder) DeleteBucketWebsite(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsite", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsite), arg0) -} - -// DeleteBucketWebsiteRequest mocks base method. -func (m *MockS3API) DeleteBucketWebsiteRequest(arg0 *s3.DeleteBucketWebsiteInput) (*request.Request, *s3.DeleteBucketWebsiteOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteBucketWebsiteRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteBucketWebsiteOutput) - return ret0, ret1 -} - -// DeleteBucketWebsiteRequest indicates an expected call of DeleteBucketWebsiteRequest. -func (mr *MockS3APIMockRecorder) DeleteBucketWebsiteRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsiteRequest), arg0) -} - -// DeleteBucketWebsiteWithContext mocks base method. -func (m *MockS3API) DeleteBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketWebsiteInput, arg2 ...request.Option) (*s3.DeleteBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketWebsiteWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketWebsiteWithContext indicates an expected call of DeleteBucketWebsiteWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWebsiteWithContext), varargs...) -} - -// DeleteBucketWithContext mocks base method. -func (m *MockS3API) DeleteBucketWithContext(arg0 aws.Context, arg1 *s3.DeleteBucketInput, arg2 ...request.Option) (*s3.DeleteBucketOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteBucketWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteBucketWithContext indicates an expected call of DeleteBucketWithContext. -func (mr *MockS3APIMockRecorder) DeleteBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteBucketWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteBucketWithContext), varargs...) -} - -// DeleteObject mocks base method. -func (m *MockS3API) DeleteObject(arg0 *s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObject", arg0) - ret0, _ := ret[0].(*s3.DeleteObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObject indicates an expected call of DeleteObject. -func (mr *MockS3APIMockRecorder) DeleteObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObject", reflect.TypeOf((*MockS3API)(nil).DeleteObject), arg0) -} - -// DeleteObjectRequest mocks base method. -func (m *MockS3API) DeleteObjectRequest(arg0 *s3.DeleteObjectInput) (*request.Request, *s3.DeleteObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteObjectOutput) - return ret0, ret1 -} - -// DeleteObjectRequest indicates an expected call of DeleteObjectRequest. -func (mr *MockS3APIMockRecorder) DeleteObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectRequest), arg0) -} - -// DeleteObjectTagging mocks base method. -func (m *MockS3API) DeleteObjectTagging(arg0 *s3.DeleteObjectTaggingInput) (*s3.DeleteObjectTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjectTagging", arg0) - ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjectTagging indicates an expected call of DeleteObjectTagging. -func (mr *MockS3APIMockRecorder) DeleteObjectTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTagging", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTagging), arg0) -} - -// DeleteObjectTaggingRequest mocks base method. -func (m *MockS3API) DeleteObjectTaggingRequest(arg0 *s3.DeleteObjectTaggingInput) (*request.Request, *s3.DeleteObjectTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjectTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteObjectTaggingOutput) - return ret0, ret1 -} - -// DeleteObjectTaggingRequest indicates an expected call of DeleteObjectTaggingRequest. -func (mr *MockS3APIMockRecorder) DeleteObjectTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTaggingRequest), arg0) -} - -// DeleteObjectTaggingWithContext mocks base method. -func (m *MockS3API) DeleteObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectTaggingInput, arg2 ...request.Option) (*s3.DeleteObjectTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteObjectTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjectTaggingWithContext indicates an expected call of DeleteObjectTaggingWithContext. -func (mr *MockS3APIMockRecorder) DeleteObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectTaggingWithContext), varargs...) -} - -// DeleteObjectWithContext mocks base method. -func (m *MockS3API) DeleteObjectWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectInput, arg2 ...request.Option) (*s3.DeleteObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjectWithContext indicates an expected call of DeleteObjectWithContext. -func (mr *MockS3APIMockRecorder) DeleteObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectWithContext), varargs...) -} - -// DeleteObjects mocks base method. -func (m *MockS3API) DeleteObjects(arg0 *s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjects", arg0) - ret0, _ := ret[0].(*s3.DeleteObjectsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjects indicates an expected call of DeleteObjects. -func (mr *MockS3APIMockRecorder) DeleteObjects(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjects", reflect.TypeOf((*MockS3API)(nil).DeleteObjects), arg0) -} - -// DeleteObjectsRequest mocks base method. -func (m *MockS3API) DeleteObjectsRequest(arg0 *s3.DeleteObjectsInput) (*request.Request, *s3.DeleteObjectsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteObjectsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeleteObjectsOutput) - return ret0, ret1 -} - -// DeleteObjectsRequest indicates an expected call of DeleteObjectsRequest. -func (mr *MockS3APIMockRecorder) DeleteObjectsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsRequest", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsRequest), arg0) -} - -// DeleteObjectsWithContext mocks base method. -func (m *MockS3API) DeleteObjectsWithContext(arg0 aws.Context, arg1 *s3.DeleteObjectsInput, arg2 ...request.Option) (*s3.DeleteObjectsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeleteObjectsWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeleteObjectsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeleteObjectsWithContext indicates an expected call of DeleteObjectsWithContext. -func (mr *MockS3APIMockRecorder) DeleteObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).DeleteObjectsWithContext), varargs...) -} - -// DeletePublicAccessBlock mocks base method. -func (m *MockS3API) DeletePublicAccessBlock(arg0 *s3.DeletePublicAccessBlockInput) (*s3.DeletePublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeletePublicAccessBlock", arg0) - ret0, _ := ret[0].(*s3.DeletePublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeletePublicAccessBlock indicates an expected call of DeletePublicAccessBlock. -func (mr *MockS3APIMockRecorder) DeletePublicAccessBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlock), arg0) -} - -// DeletePublicAccessBlockRequest mocks base method. -func (m *MockS3API) DeletePublicAccessBlockRequest(arg0 *s3.DeletePublicAccessBlockInput) (*request.Request, *s3.DeletePublicAccessBlockOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeletePublicAccessBlockRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.DeletePublicAccessBlockOutput) - return ret0, ret1 -} - -// DeletePublicAccessBlockRequest indicates an expected call of DeletePublicAccessBlockRequest. -func (mr *MockS3APIMockRecorder) DeletePublicAccessBlockRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlockRequest), arg0) -} - -// DeletePublicAccessBlockWithContext mocks base method. -func (m *MockS3API) DeletePublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.DeletePublicAccessBlockInput, arg2 ...request.Option) (*s3.DeletePublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "DeletePublicAccessBlockWithContext", varargs...) - ret0, _ := ret[0].(*s3.DeletePublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// DeletePublicAccessBlockWithContext indicates an expected call of DeletePublicAccessBlockWithContext. -func (mr *MockS3APIMockRecorder) DeletePublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).DeletePublicAccessBlockWithContext), varargs...) -} - -// GetBucketAccelerateConfiguration mocks base method. -func (m *MockS3API) GetBucketAccelerateConfiguration(arg0 *s3.GetBucketAccelerateConfigurationInput) (*s3.GetBucketAccelerateConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAccelerateConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketAccelerateConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAccelerateConfiguration indicates an expected call of GetBucketAccelerateConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfiguration), arg0) -} - -// GetBucketAccelerateConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketAccelerateConfigurationRequest(arg0 *s3.GetBucketAccelerateConfigurationInput) (*request.Request, *s3.GetBucketAccelerateConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAccelerateConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketAccelerateConfigurationOutput) - return ret0, ret1 -} - -// GetBucketAccelerateConfigurationRequest indicates an expected call of GetBucketAccelerateConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfigurationRequest), arg0) -} - -// GetBucketAccelerateConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketAccelerateConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketAccelerateConfigurationInput, arg2 ...request.Option) (*s3.GetBucketAccelerateConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketAccelerateConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketAccelerateConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAccelerateConfigurationWithContext indicates an expected call of GetBucketAccelerateConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketAccelerateConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAccelerateConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAccelerateConfigurationWithContext), varargs...) -} - -// GetBucketAcl mocks base method. -func (m *MockS3API) GetBucketAcl(arg0 *s3.GetBucketAclInput) (*s3.GetBucketAclOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAcl", arg0) - ret0, _ := ret[0].(*s3.GetBucketAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAcl indicates an expected call of GetBucketAcl. -func (mr *MockS3APIMockRecorder) GetBucketAcl(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAcl", reflect.TypeOf((*MockS3API)(nil).GetBucketAcl), arg0) -} - -// GetBucketAclRequest mocks base method. -func (m *MockS3API) GetBucketAclRequest(arg0 *s3.GetBucketAclInput) (*request.Request, *s3.GetBucketAclOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAclRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketAclOutput) - return ret0, ret1 -} - -// GetBucketAclRequest indicates an expected call of GetBucketAclRequest. -func (mr *MockS3APIMockRecorder) GetBucketAclRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAclRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAclRequest), arg0) -} - -// GetBucketAclWithContext mocks base method. -func (m *MockS3API) GetBucketAclWithContext(arg0 aws.Context, arg1 *s3.GetBucketAclInput, arg2 ...request.Option) (*s3.GetBucketAclOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketAclWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAclWithContext indicates an expected call of GetBucketAclWithContext. -func (mr *MockS3APIMockRecorder) GetBucketAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAclWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAclWithContext), varargs...) -} - -// GetBucketAnalyticsConfiguration mocks base method. -func (m *MockS3API) GetBucketAnalyticsConfiguration(arg0 *s3.GetBucketAnalyticsConfigurationInput) (*s3.GetBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAnalyticsConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAnalyticsConfiguration indicates an expected call of GetBucketAnalyticsConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfiguration), arg0) -} - -// GetBucketAnalyticsConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketAnalyticsConfigurationRequest(arg0 *s3.GetBucketAnalyticsConfigurationInput) (*request.Request, *s3.GetBucketAnalyticsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketAnalyticsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketAnalyticsConfigurationOutput) - return ret0, ret1 -} - -// GetBucketAnalyticsConfigurationRequest indicates an expected call of GetBucketAnalyticsConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfigurationRequest), arg0) -} - -// GetBucketAnalyticsConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.GetBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketAnalyticsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketAnalyticsConfigurationWithContext indicates an expected call of GetBucketAnalyticsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketAnalyticsConfigurationWithContext), varargs...) -} - -// GetBucketCors mocks base method. -func (m *MockS3API) GetBucketCors(arg0 *s3.GetBucketCorsInput) (*s3.GetBucketCorsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketCors", arg0) - ret0, _ := ret[0].(*s3.GetBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketCors indicates an expected call of GetBucketCors. -func (mr *MockS3APIMockRecorder) GetBucketCors(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCors", reflect.TypeOf((*MockS3API)(nil).GetBucketCors), arg0) -} - -// GetBucketCorsRequest mocks base method. -func (m *MockS3API) GetBucketCorsRequest(arg0 *s3.GetBucketCorsInput) (*request.Request, *s3.GetBucketCorsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketCorsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketCorsOutput) - return ret0, ret1 -} - -// GetBucketCorsRequest indicates an expected call of GetBucketCorsRequest. -func (mr *MockS3APIMockRecorder) GetBucketCorsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketCorsRequest), arg0) -} - -// GetBucketCorsWithContext mocks base method. -func (m *MockS3API) GetBucketCorsWithContext(arg0 aws.Context, arg1 *s3.GetBucketCorsInput, arg2 ...request.Option) (*s3.GetBucketCorsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketCorsWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketCorsWithContext indicates an expected call of GetBucketCorsWithContext. -func (mr *MockS3APIMockRecorder) GetBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketCorsWithContext), varargs...) -} - -// GetBucketEncryption mocks base method. -func (m *MockS3API) GetBucketEncryption(arg0 *s3.GetBucketEncryptionInput) (*s3.GetBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketEncryption", arg0) - ret0, _ := ret[0].(*s3.GetBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketEncryption indicates an expected call of GetBucketEncryption. -func (mr *MockS3APIMockRecorder) GetBucketEncryption(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryption", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryption), arg0) -} - -// GetBucketEncryptionRequest mocks base method. -func (m *MockS3API) GetBucketEncryptionRequest(arg0 *s3.GetBucketEncryptionInput) (*request.Request, *s3.GetBucketEncryptionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketEncryptionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketEncryptionOutput) - return ret0, ret1 -} - -// GetBucketEncryptionRequest indicates an expected call of GetBucketEncryptionRequest. -func (mr *MockS3APIMockRecorder) GetBucketEncryptionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryptionRequest), arg0) -} - -// GetBucketEncryptionWithContext mocks base method. -func (m *MockS3API) GetBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.GetBucketEncryptionInput, arg2 ...request.Option) (*s3.GetBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketEncryptionWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketEncryptionWithContext indicates an expected call of GetBucketEncryptionWithContext. -func (mr *MockS3APIMockRecorder) GetBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketEncryptionWithContext), varargs...) -} - -// GetBucketIntelligentTieringConfiguration mocks base method. -func (m *MockS3API) GetBucketIntelligentTieringConfiguration(arg0 *s3.GetBucketIntelligentTieringConfigurationInput) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketIntelligentTieringConfiguration indicates an expected call of GetBucketIntelligentTieringConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfiguration), arg0) -} - -// GetBucketIntelligentTieringConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketIntelligentTieringConfigurationRequest(arg0 *s3.GetBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.GetBucketIntelligentTieringConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketIntelligentTieringConfigurationOutput) - return ret0, ret1 -} - -// GetBucketIntelligentTieringConfigurationRequest indicates an expected call of GetBucketIntelligentTieringConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfigurationRequest), arg0) -} - -// GetBucketIntelligentTieringConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.GetBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketIntelligentTieringConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketIntelligentTieringConfigurationWithContext indicates an expected call of GetBucketIntelligentTieringConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketIntelligentTieringConfigurationWithContext), varargs...) -} - -// GetBucketInventoryConfiguration mocks base method. -func (m *MockS3API) GetBucketInventoryConfiguration(arg0 *s3.GetBucketInventoryConfigurationInput) (*s3.GetBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketInventoryConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketInventoryConfiguration indicates an expected call of GetBucketInventoryConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfiguration), arg0) -} - -// GetBucketInventoryConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketInventoryConfigurationRequest(arg0 *s3.GetBucketInventoryConfigurationInput) (*request.Request, *s3.GetBucketInventoryConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketInventoryConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketInventoryConfigurationOutput) - return ret0, ret1 -} - -// GetBucketInventoryConfigurationRequest indicates an expected call of GetBucketInventoryConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfigurationRequest), arg0) -} - -// GetBucketInventoryConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.GetBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketInventoryConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketInventoryConfigurationWithContext indicates an expected call of GetBucketInventoryConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketInventoryConfigurationWithContext), varargs...) -} - -// GetBucketLifecycle mocks base method. -func (m *MockS3API) GetBucketLifecycle(arg0 *s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLifecycle", arg0) - ret0, _ := ret[0].(*s3.GetBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLifecycle indicates an expected call of GetBucketLifecycle. -func (mr *MockS3APIMockRecorder) GetBucketLifecycle(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycle), arg0) -} - -// GetBucketLifecycleConfiguration mocks base method. -func (m *MockS3API) GetBucketLifecycleConfiguration(arg0 *s3.GetBucketLifecycleConfigurationInput) (*s3.GetBucketLifecycleConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLifecycleConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketLifecycleConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLifecycleConfiguration indicates an expected call of GetBucketLifecycleConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfiguration), arg0) -} - -// GetBucketLifecycleConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketLifecycleConfigurationRequest(arg0 *s3.GetBucketLifecycleConfigurationInput) (*request.Request, *s3.GetBucketLifecycleConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLifecycleConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketLifecycleConfigurationOutput) - return ret0, ret1 -} - -// GetBucketLifecycleConfigurationRequest indicates an expected call of GetBucketLifecycleConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfigurationRequest), arg0) -} - -// GetBucketLifecycleConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketLifecycleConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketLifecycleConfigurationInput, arg2 ...request.Option) (*s3.GetBucketLifecycleConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketLifecycleConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketLifecycleConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLifecycleConfigurationWithContext indicates an expected call of GetBucketLifecycleConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleConfigurationWithContext), varargs...) -} - -// GetBucketLifecycleRequest mocks base method. -func (m *MockS3API) GetBucketLifecycleRequest(arg0 *s3.GetBucketLifecycleInput) (*request.Request, *s3.GetBucketLifecycleOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLifecycleRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketLifecycleOutput) - return ret0, ret1 -} - -// GetBucketLifecycleRequest indicates an expected call of GetBucketLifecycleRequest. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleRequest), arg0) -} - -// GetBucketLifecycleWithContext mocks base method. -func (m *MockS3API) GetBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.GetBucketLifecycleInput, arg2 ...request.Option) (*s3.GetBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketLifecycleWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLifecycleWithContext indicates an expected call of GetBucketLifecycleWithContext. -func (mr *MockS3APIMockRecorder) GetBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLifecycleWithContext), varargs...) -} - -// GetBucketLocation mocks base method. -func (m *MockS3API) GetBucketLocation(arg0 *s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLocation", arg0) - ret0, _ := ret[0].(*s3.GetBucketLocationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLocation indicates an expected call of GetBucketLocation. -func (mr *MockS3APIMockRecorder) GetBucketLocation(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocation", reflect.TypeOf((*MockS3API)(nil).GetBucketLocation), arg0) -} - -// GetBucketLocationRequest mocks base method. -func (m *MockS3API) GetBucketLocationRequest(arg0 *s3.GetBucketLocationInput) (*request.Request, *s3.GetBucketLocationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLocationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketLocationOutput) - return ret0, ret1 -} - -// GetBucketLocationRequest indicates an expected call of GetBucketLocationRequest. -func (mr *MockS3APIMockRecorder) GetBucketLocationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLocationRequest), arg0) -} - -// GetBucketLocationWithContext mocks base method. -func (m *MockS3API) GetBucketLocationWithContext(arg0 aws.Context, arg1 *s3.GetBucketLocationInput, arg2 ...request.Option) (*s3.GetBucketLocationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketLocationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketLocationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLocationWithContext indicates an expected call of GetBucketLocationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketLocationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLocationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLocationWithContext), varargs...) -} - -// GetBucketLogging mocks base method. -func (m *MockS3API) GetBucketLogging(arg0 *s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLogging", arg0) - ret0, _ := ret[0].(*s3.GetBucketLoggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLogging indicates an expected call of GetBucketLogging. -func (mr *MockS3APIMockRecorder) GetBucketLogging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLogging", reflect.TypeOf((*MockS3API)(nil).GetBucketLogging), arg0) -} - -// GetBucketLoggingRequest mocks base method. -func (m *MockS3API) GetBucketLoggingRequest(arg0 *s3.GetBucketLoggingInput) (*request.Request, *s3.GetBucketLoggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketLoggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketLoggingOutput) - return ret0, ret1 -} - -// GetBucketLoggingRequest indicates an expected call of GetBucketLoggingRequest. -func (mr *MockS3APIMockRecorder) GetBucketLoggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLoggingRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketLoggingRequest), arg0) -} - -// GetBucketLoggingWithContext mocks base method. -func (m *MockS3API) GetBucketLoggingWithContext(arg0 aws.Context, arg1 *s3.GetBucketLoggingInput, arg2 ...request.Option) (*s3.GetBucketLoggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketLoggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketLoggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketLoggingWithContext indicates an expected call of GetBucketLoggingWithContext. -func (mr *MockS3APIMockRecorder) GetBucketLoggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketLoggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketLoggingWithContext), varargs...) -} - -// GetBucketMetricsConfiguration mocks base method. -func (m *MockS3API) GetBucketMetricsConfiguration(arg0 *s3.GetBucketMetricsConfigurationInput) (*s3.GetBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketMetricsConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketMetricsConfiguration indicates an expected call of GetBucketMetricsConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfiguration), arg0) -} - -// GetBucketMetricsConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketMetricsConfigurationRequest(arg0 *s3.GetBucketMetricsConfigurationInput) (*request.Request, *s3.GetBucketMetricsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketMetricsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketMetricsConfigurationOutput) - return ret0, ret1 -} - -// GetBucketMetricsConfigurationRequest indicates an expected call of GetBucketMetricsConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfigurationRequest), arg0) -} - -// GetBucketMetricsConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.GetBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketMetricsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketMetricsConfigurationWithContext indicates an expected call of GetBucketMetricsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketMetricsConfigurationWithContext), varargs...) -} - -// GetBucketNotification mocks base method. -func (m *MockS3API) GetBucketNotification(arg0 *s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketNotification", arg0) - ret0, _ := ret[0].(*s3.NotificationConfigurationDeprecated) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketNotification indicates an expected call of GetBucketNotification. -func (mr *MockS3APIMockRecorder) GetBucketNotification(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotification", reflect.TypeOf((*MockS3API)(nil).GetBucketNotification), arg0) -} - -// GetBucketNotificationConfiguration mocks base method. -func (m *MockS3API) GetBucketNotificationConfiguration(arg0 *s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketNotificationConfiguration", arg0) - ret0, _ := ret[0].(*s3.NotificationConfiguration) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketNotificationConfiguration indicates an expected call of GetBucketNotificationConfiguration. -func (mr *MockS3APIMockRecorder) GetBucketNotificationConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfiguration", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfiguration), arg0) -} - -// GetBucketNotificationConfigurationRequest mocks base method. -func (m *MockS3API) GetBucketNotificationConfigurationRequest(arg0 *s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketNotificationConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.NotificationConfiguration) - return ret0, ret1 -} - -// GetBucketNotificationConfigurationRequest indicates an expected call of GetBucketNotificationConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetBucketNotificationConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfigurationRequest), arg0) -} - -// GetBucketNotificationConfigurationWithContext mocks base method. -func (m *MockS3API) GetBucketNotificationConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetBucketNotificationConfigurationRequest, arg2 ...request.Option) (*s3.NotificationConfiguration, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketNotificationConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.NotificationConfiguration) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketNotificationConfigurationWithContext indicates an expected call of GetBucketNotificationConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketNotificationConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationConfigurationWithContext), varargs...) -} - -// GetBucketNotificationRequest mocks base method. -func (m *MockS3API) GetBucketNotificationRequest(arg0 *s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfigurationDeprecated) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketNotificationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.NotificationConfigurationDeprecated) - return ret0, ret1 -} - -// GetBucketNotificationRequest indicates an expected call of GetBucketNotificationRequest. -func (mr *MockS3APIMockRecorder) GetBucketNotificationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationRequest), arg0) -} - -// GetBucketNotificationWithContext mocks base method. -func (m *MockS3API) GetBucketNotificationWithContext(arg0 aws.Context, arg1 *s3.GetBucketNotificationConfigurationRequest, arg2 ...request.Option) (*s3.NotificationConfigurationDeprecated, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketNotificationWithContext", varargs...) - ret0, _ := ret[0].(*s3.NotificationConfigurationDeprecated) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketNotificationWithContext indicates an expected call of GetBucketNotificationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketNotificationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketNotificationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketNotificationWithContext), varargs...) -} - -// GetBucketOwnershipControls mocks base method. -func (m *MockS3API) GetBucketOwnershipControls(arg0 *s3.GetBucketOwnershipControlsInput) (*s3.GetBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketOwnershipControls", arg0) - ret0, _ := ret[0].(*s3.GetBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketOwnershipControls indicates an expected call of GetBucketOwnershipControls. -func (mr *MockS3APIMockRecorder) GetBucketOwnershipControls(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControls), arg0) -} - -// GetBucketOwnershipControlsRequest mocks base method. -func (m *MockS3API) GetBucketOwnershipControlsRequest(arg0 *s3.GetBucketOwnershipControlsInput) (*request.Request, *s3.GetBucketOwnershipControlsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketOwnershipControlsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketOwnershipControlsOutput) - return ret0, ret1 -} - -// GetBucketOwnershipControlsRequest indicates an expected call of GetBucketOwnershipControlsRequest. -func (mr *MockS3APIMockRecorder) GetBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControlsRequest), arg0) -} - -// GetBucketOwnershipControlsWithContext mocks base method. -func (m *MockS3API) GetBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.GetBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.GetBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketOwnershipControlsWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketOwnershipControlsWithContext indicates an expected call of GetBucketOwnershipControlsWithContext. -func (mr *MockS3APIMockRecorder) GetBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketOwnershipControlsWithContext), varargs...) -} - -// GetBucketPolicy mocks base method. -func (m *MockS3API) GetBucketPolicy(arg0 *s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketPolicy", arg0) - ret0, _ := ret[0].(*s3.GetBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketPolicy indicates an expected call of GetBucketPolicy. -func (mr *MockS3APIMockRecorder) GetBucketPolicy(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicy", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicy), arg0) -} - -// GetBucketPolicyRequest mocks base method. -func (m *MockS3API) GetBucketPolicyRequest(arg0 *s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketPolicyRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketPolicyOutput) - return ret0, ret1 -} - -// GetBucketPolicyRequest indicates an expected call of GetBucketPolicyRequest. -func (mr *MockS3APIMockRecorder) GetBucketPolicyRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyRequest), arg0) -} - -// GetBucketPolicyStatus mocks base method. -func (m *MockS3API) GetBucketPolicyStatus(arg0 *s3.GetBucketPolicyStatusInput) (*s3.GetBucketPolicyStatusOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketPolicyStatus", arg0) - ret0, _ := ret[0].(*s3.GetBucketPolicyStatusOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketPolicyStatus indicates an expected call of GetBucketPolicyStatus. -func (mr *MockS3APIMockRecorder) GetBucketPolicyStatus(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatus", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatus), arg0) -} - -// GetBucketPolicyStatusRequest mocks base method. -func (m *MockS3API) GetBucketPolicyStatusRequest(arg0 *s3.GetBucketPolicyStatusInput) (*request.Request, *s3.GetBucketPolicyStatusOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketPolicyStatusRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketPolicyStatusOutput) - return ret0, ret1 -} - -// GetBucketPolicyStatusRequest indicates an expected call of GetBucketPolicyStatusRequest. -func (mr *MockS3APIMockRecorder) GetBucketPolicyStatusRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatusRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatusRequest), arg0) -} - -// GetBucketPolicyStatusWithContext mocks base method. -func (m *MockS3API) GetBucketPolicyStatusWithContext(arg0 aws.Context, arg1 *s3.GetBucketPolicyStatusInput, arg2 ...request.Option) (*s3.GetBucketPolicyStatusOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketPolicyStatusWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketPolicyStatusOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketPolicyStatusWithContext indicates an expected call of GetBucketPolicyStatusWithContext. -func (mr *MockS3APIMockRecorder) GetBucketPolicyStatusWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyStatusWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyStatusWithContext), varargs...) -} - -// GetBucketPolicyWithContext mocks base method. -func (m *MockS3API) GetBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.GetBucketPolicyInput, arg2 ...request.Option) (*s3.GetBucketPolicyOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketPolicyWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketPolicyWithContext indicates an expected call of GetBucketPolicyWithContext. -func (mr *MockS3APIMockRecorder) GetBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketPolicyWithContext), varargs...) -} - -// GetBucketReplication mocks base method. -func (m *MockS3API) GetBucketReplication(arg0 *s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketReplication", arg0) - ret0, _ := ret[0].(*s3.GetBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketReplication indicates an expected call of GetBucketReplication. -func (mr *MockS3APIMockRecorder) GetBucketReplication(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplication", reflect.TypeOf((*MockS3API)(nil).GetBucketReplication), arg0) -} - -// GetBucketReplicationRequest mocks base method. -func (m *MockS3API) GetBucketReplicationRequest(arg0 *s3.GetBucketReplicationInput) (*request.Request, *s3.GetBucketReplicationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketReplicationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketReplicationOutput) - return ret0, ret1 -} - -// GetBucketReplicationRequest indicates an expected call of GetBucketReplicationRequest. -func (mr *MockS3APIMockRecorder) GetBucketReplicationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketReplicationRequest), arg0) -} - -// GetBucketReplicationWithContext mocks base method. -func (m *MockS3API) GetBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.GetBucketReplicationInput, arg2 ...request.Option) (*s3.GetBucketReplicationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketReplicationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketReplicationWithContext indicates an expected call of GetBucketReplicationWithContext. -func (mr *MockS3APIMockRecorder) GetBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketReplicationWithContext), varargs...) -} - -// GetBucketRequestPayment mocks base method. -func (m *MockS3API) GetBucketRequestPayment(arg0 *s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketRequestPayment", arg0) - ret0, _ := ret[0].(*s3.GetBucketRequestPaymentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketRequestPayment indicates an expected call of GetBucketRequestPayment. -func (mr *MockS3APIMockRecorder) GetBucketRequestPayment(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPayment", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPayment), arg0) -} - -// GetBucketRequestPaymentRequest mocks base method. -func (m *MockS3API) GetBucketRequestPaymentRequest(arg0 *s3.GetBucketRequestPaymentInput) (*request.Request, *s3.GetBucketRequestPaymentOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketRequestPaymentRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketRequestPaymentOutput) - return ret0, ret1 -} - -// GetBucketRequestPaymentRequest indicates an expected call of GetBucketRequestPaymentRequest. -func (mr *MockS3APIMockRecorder) GetBucketRequestPaymentRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPaymentRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPaymentRequest), arg0) -} - -// GetBucketRequestPaymentWithContext mocks base method. -func (m *MockS3API) GetBucketRequestPaymentWithContext(arg0 aws.Context, arg1 *s3.GetBucketRequestPaymentInput, arg2 ...request.Option) (*s3.GetBucketRequestPaymentOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketRequestPaymentWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketRequestPaymentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketRequestPaymentWithContext indicates an expected call of GetBucketRequestPaymentWithContext. -func (mr *MockS3APIMockRecorder) GetBucketRequestPaymentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketRequestPaymentWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketRequestPaymentWithContext), varargs...) -} - -// GetBucketTagging mocks base method. -func (m *MockS3API) GetBucketTagging(arg0 *s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketTagging", arg0) - ret0, _ := ret[0].(*s3.GetBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketTagging indicates an expected call of GetBucketTagging. -func (mr *MockS3APIMockRecorder) GetBucketTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTagging", reflect.TypeOf((*MockS3API)(nil).GetBucketTagging), arg0) -} - -// GetBucketTaggingRequest mocks base method. -func (m *MockS3API) GetBucketTaggingRequest(arg0 *s3.GetBucketTaggingInput) (*request.Request, *s3.GetBucketTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketTaggingOutput) - return ret0, ret1 -} - -// GetBucketTaggingRequest indicates an expected call of GetBucketTaggingRequest. -func (mr *MockS3APIMockRecorder) GetBucketTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketTaggingRequest), arg0) -} - -// GetBucketTaggingWithContext mocks base method. -func (m *MockS3API) GetBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.GetBucketTaggingInput, arg2 ...request.Option) (*s3.GetBucketTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketTaggingWithContext indicates an expected call of GetBucketTaggingWithContext. -func (mr *MockS3APIMockRecorder) GetBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketTaggingWithContext), varargs...) -} - -// GetBucketVersioning mocks base method. -func (m *MockS3API) GetBucketVersioning(arg0 *s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketVersioning", arg0) - ret0, _ := ret[0].(*s3.GetBucketVersioningOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketVersioning indicates an expected call of GetBucketVersioning. -func (mr *MockS3APIMockRecorder) GetBucketVersioning(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioning", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioning), arg0) -} - -// GetBucketVersioningRequest mocks base method. -func (m *MockS3API) GetBucketVersioningRequest(arg0 *s3.GetBucketVersioningInput) (*request.Request, *s3.GetBucketVersioningOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketVersioningRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketVersioningOutput) - return ret0, ret1 -} - -// GetBucketVersioningRequest indicates an expected call of GetBucketVersioningRequest. -func (mr *MockS3APIMockRecorder) GetBucketVersioningRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioningRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioningRequest), arg0) -} - -// GetBucketVersioningWithContext mocks base method. -func (m *MockS3API) GetBucketVersioningWithContext(arg0 aws.Context, arg1 *s3.GetBucketVersioningInput, arg2 ...request.Option) (*s3.GetBucketVersioningOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketVersioningWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketVersioningOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketVersioningWithContext indicates an expected call of GetBucketVersioningWithContext. -func (mr *MockS3APIMockRecorder) GetBucketVersioningWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketVersioningWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketVersioningWithContext), varargs...) -} - -// GetBucketWebsite mocks base method. -func (m *MockS3API) GetBucketWebsite(arg0 *s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketWebsite", arg0) - ret0, _ := ret[0].(*s3.GetBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketWebsite indicates an expected call of GetBucketWebsite. -func (mr *MockS3APIMockRecorder) GetBucketWebsite(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsite", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsite), arg0) -} - -// GetBucketWebsiteRequest mocks base method. -func (m *MockS3API) GetBucketWebsiteRequest(arg0 *s3.GetBucketWebsiteInput) (*request.Request, *s3.GetBucketWebsiteOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBucketWebsiteRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetBucketWebsiteOutput) - return ret0, ret1 -} - -// GetBucketWebsiteRequest indicates an expected call of GetBucketWebsiteRequest. -func (mr *MockS3APIMockRecorder) GetBucketWebsiteRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsiteRequest), arg0) -} - -// GetBucketWebsiteWithContext mocks base method. -func (m *MockS3API) GetBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.GetBucketWebsiteInput, arg2 ...request.Option) (*s3.GetBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetBucketWebsiteWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBucketWebsiteWithContext indicates an expected call of GetBucketWebsiteWithContext. -func (mr *MockS3APIMockRecorder) GetBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).GetBucketWebsiteWithContext), varargs...) -} - -// GetObject mocks base method. -func (m *MockS3API) GetObject(arg0 *s3.GetObjectInput) (*s3.GetObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObject", arg0) - ret0, _ := ret[0].(*s3.GetObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObject indicates an expected call of GetObject. -func (mr *MockS3APIMockRecorder) GetObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObject", reflect.TypeOf((*MockS3API)(nil).GetObject), arg0) -} - -// GetObjectAcl mocks base method. -func (m *MockS3API) GetObjectAcl(arg0 *s3.GetObjectAclInput) (*s3.GetObjectAclOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectAcl", arg0) - ret0, _ := ret[0].(*s3.GetObjectAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectAcl indicates an expected call of GetObjectAcl. -func (mr *MockS3APIMockRecorder) GetObjectAcl(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAcl", reflect.TypeOf((*MockS3API)(nil).GetObjectAcl), arg0) -} - -// GetObjectAclRequest mocks base method. -func (m *MockS3API) GetObjectAclRequest(arg0 *s3.GetObjectAclInput) (*request.Request, *s3.GetObjectAclOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectAclRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectAclOutput) - return ret0, ret1 -} - -// GetObjectAclRequest indicates an expected call of GetObjectAclRequest. -func (mr *MockS3APIMockRecorder) GetObjectAclRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAclRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectAclRequest), arg0) -} - -// GetObjectAclWithContext mocks base method. -func (m *MockS3API) GetObjectAclWithContext(arg0 aws.Context, arg1 *s3.GetObjectAclInput, arg2 ...request.Option) (*s3.GetObjectAclOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectAclWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectAclWithContext indicates an expected call of GetObjectAclWithContext. -func (mr *MockS3APIMockRecorder) GetObjectAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAclWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectAclWithContext), varargs...) -} - -// GetObjectAttributes mocks base method. -func (m *MockS3API) GetObjectAttributes(arg0 *s3.GetObjectAttributesInput) (*s3.GetObjectAttributesOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectAttributes", arg0) - ret0, _ := ret[0].(*s3.GetObjectAttributesOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectAttributes indicates an expected call of GetObjectAttributes. -func (mr *MockS3APIMockRecorder) GetObjectAttributes(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributes", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributes), arg0) -} - -// GetObjectAttributesRequest mocks base method. -func (m *MockS3API) GetObjectAttributesRequest(arg0 *s3.GetObjectAttributesInput) (*request.Request, *s3.GetObjectAttributesOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectAttributesRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectAttributesOutput) - return ret0, ret1 -} - -// GetObjectAttributesRequest indicates an expected call of GetObjectAttributesRequest. -func (mr *MockS3APIMockRecorder) GetObjectAttributesRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributesRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributesRequest), arg0) -} - -// GetObjectAttributesWithContext mocks base method. -func (m *MockS3API) GetObjectAttributesWithContext(arg0 aws.Context, arg1 *s3.GetObjectAttributesInput, arg2 ...request.Option) (*s3.GetObjectAttributesOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectAttributesWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectAttributesOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectAttributesWithContext indicates an expected call of GetObjectAttributesWithContext. -func (mr *MockS3APIMockRecorder) GetObjectAttributesWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectAttributesWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectAttributesWithContext), varargs...) -} - -// GetObjectLegalHold mocks base method. -func (m *MockS3API) GetObjectLegalHold(arg0 *s3.GetObjectLegalHoldInput) (*s3.GetObjectLegalHoldOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectLegalHold", arg0) - ret0, _ := ret[0].(*s3.GetObjectLegalHoldOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectLegalHold indicates an expected call of GetObjectLegalHold. -func (mr *MockS3APIMockRecorder) GetObjectLegalHold(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHold", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHold), arg0) -} - -// GetObjectLegalHoldRequest mocks base method. -func (m *MockS3API) GetObjectLegalHoldRequest(arg0 *s3.GetObjectLegalHoldInput) (*request.Request, *s3.GetObjectLegalHoldOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectLegalHoldRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectLegalHoldOutput) - return ret0, ret1 -} - -// GetObjectLegalHoldRequest indicates an expected call of GetObjectLegalHoldRequest. -func (mr *MockS3APIMockRecorder) GetObjectLegalHoldRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHoldRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHoldRequest), arg0) -} - -// GetObjectLegalHoldWithContext mocks base method. -func (m *MockS3API) GetObjectLegalHoldWithContext(arg0 aws.Context, arg1 *s3.GetObjectLegalHoldInput, arg2 ...request.Option) (*s3.GetObjectLegalHoldOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectLegalHoldWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectLegalHoldOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectLegalHoldWithContext indicates an expected call of GetObjectLegalHoldWithContext. -func (mr *MockS3APIMockRecorder) GetObjectLegalHoldWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLegalHoldWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectLegalHoldWithContext), varargs...) -} - -// GetObjectLockConfiguration mocks base method. -func (m *MockS3API) GetObjectLockConfiguration(arg0 *s3.GetObjectLockConfigurationInput) (*s3.GetObjectLockConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectLockConfiguration", arg0) - ret0, _ := ret[0].(*s3.GetObjectLockConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectLockConfiguration indicates an expected call of GetObjectLockConfiguration. -func (mr *MockS3APIMockRecorder) GetObjectLockConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfiguration", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfiguration), arg0) -} - -// GetObjectLockConfigurationRequest mocks base method. -func (m *MockS3API) GetObjectLockConfigurationRequest(arg0 *s3.GetObjectLockConfigurationInput) (*request.Request, *s3.GetObjectLockConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectLockConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectLockConfigurationOutput) - return ret0, ret1 -} - -// GetObjectLockConfigurationRequest indicates an expected call of GetObjectLockConfigurationRequest. -func (mr *MockS3APIMockRecorder) GetObjectLockConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfigurationRequest), arg0) -} - -// GetObjectLockConfigurationWithContext mocks base method. -func (m *MockS3API) GetObjectLockConfigurationWithContext(arg0 aws.Context, arg1 *s3.GetObjectLockConfigurationInput, arg2 ...request.Option) (*s3.GetObjectLockConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectLockConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectLockConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectLockConfigurationWithContext indicates an expected call of GetObjectLockConfigurationWithContext. -func (mr *MockS3APIMockRecorder) GetObjectLockConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectLockConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectLockConfigurationWithContext), varargs...) -} - -// GetObjectRequest mocks base method. -func (m *MockS3API) GetObjectRequest(arg0 *s3.GetObjectInput) (*request.Request, *s3.GetObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectOutput) - return ret0, ret1 -} - -// GetObjectRequest indicates an expected call of GetObjectRequest. -func (mr *MockS3APIMockRecorder) GetObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectRequest), arg0) -} - -// GetObjectRetention mocks base method. -func (m *MockS3API) GetObjectRetention(arg0 *s3.GetObjectRetentionInput) (*s3.GetObjectRetentionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectRetention", arg0) - ret0, _ := ret[0].(*s3.GetObjectRetentionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectRetention indicates an expected call of GetObjectRetention. -func (mr *MockS3APIMockRecorder) GetObjectRetention(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetention", reflect.TypeOf((*MockS3API)(nil).GetObjectRetention), arg0) -} - -// GetObjectRetentionRequest mocks base method. -func (m *MockS3API) GetObjectRetentionRequest(arg0 *s3.GetObjectRetentionInput) (*request.Request, *s3.GetObjectRetentionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectRetentionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectRetentionOutput) - return ret0, ret1 -} - -// GetObjectRetentionRequest indicates an expected call of GetObjectRetentionRequest. -func (mr *MockS3APIMockRecorder) GetObjectRetentionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetentionRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectRetentionRequest), arg0) -} - -// GetObjectRetentionWithContext mocks base method. -func (m *MockS3API) GetObjectRetentionWithContext(arg0 aws.Context, arg1 *s3.GetObjectRetentionInput, arg2 ...request.Option) (*s3.GetObjectRetentionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectRetentionWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectRetentionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectRetentionWithContext indicates an expected call of GetObjectRetentionWithContext. -func (mr *MockS3APIMockRecorder) GetObjectRetentionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectRetentionWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectRetentionWithContext), varargs...) -} - -// GetObjectTagging mocks base method. -func (m *MockS3API) GetObjectTagging(arg0 *s3.GetObjectTaggingInput) (*s3.GetObjectTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectTagging", arg0) - ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectTagging indicates an expected call of GetObjectTagging. -func (mr *MockS3APIMockRecorder) GetObjectTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTagging", reflect.TypeOf((*MockS3API)(nil).GetObjectTagging), arg0) -} - -// GetObjectTaggingRequest mocks base method. -func (m *MockS3API) GetObjectTaggingRequest(arg0 *s3.GetObjectTaggingInput) (*request.Request, *s3.GetObjectTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectTaggingOutput) - return ret0, ret1 -} - -// GetObjectTaggingRequest indicates an expected call of GetObjectTaggingRequest. -func (mr *MockS3APIMockRecorder) GetObjectTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectTaggingRequest), arg0) -} - -// GetObjectTaggingWithContext mocks base method. -func (m *MockS3API) GetObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.GetObjectTaggingInput, arg2 ...request.Option) (*s3.GetObjectTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectTaggingWithContext indicates an expected call of GetObjectTaggingWithContext. -func (mr *MockS3APIMockRecorder) GetObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectTaggingWithContext), varargs...) -} - -// GetObjectTorrent mocks base method. -func (m *MockS3API) GetObjectTorrent(arg0 *s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectTorrent", arg0) - ret0, _ := ret[0].(*s3.GetObjectTorrentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectTorrent indicates an expected call of GetObjectTorrent. -func (mr *MockS3APIMockRecorder) GetObjectTorrent(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrent", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrent), arg0) -} - -// GetObjectTorrentRequest mocks base method. -func (m *MockS3API) GetObjectTorrentRequest(arg0 *s3.GetObjectTorrentInput) (*request.Request, *s3.GetObjectTorrentOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetObjectTorrentRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetObjectTorrentOutput) - return ret0, ret1 -} - -// GetObjectTorrentRequest indicates an expected call of GetObjectTorrentRequest. -func (mr *MockS3APIMockRecorder) GetObjectTorrentRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrentRequest", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrentRequest), arg0) -} - -// GetObjectTorrentWithContext mocks base method. -func (m *MockS3API) GetObjectTorrentWithContext(arg0 aws.Context, arg1 *s3.GetObjectTorrentInput, arg2 ...request.Option) (*s3.GetObjectTorrentOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectTorrentWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectTorrentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectTorrentWithContext indicates an expected call of GetObjectTorrentWithContext. -func (mr *MockS3APIMockRecorder) GetObjectTorrentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectTorrentWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectTorrentWithContext), varargs...) -} - -// GetObjectWithContext mocks base method. -func (m *MockS3API) GetObjectWithContext(arg0 aws.Context, arg1 *s3.GetObjectInput, arg2 ...request.Option) (*s3.GetObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetObjectWithContext indicates an expected call of GetObjectWithContext. -func (mr *MockS3APIMockRecorder) GetObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetObjectWithContext", reflect.TypeOf((*MockS3API)(nil).GetObjectWithContext), varargs...) -} - -// GetPublicAccessBlock mocks base method. -func (m *MockS3API) GetPublicAccessBlock(arg0 *s3.GetPublicAccessBlockInput) (*s3.GetPublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPublicAccessBlock", arg0) - ret0, _ := ret[0].(*s3.GetPublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPublicAccessBlock indicates an expected call of GetPublicAccessBlock. -func (mr *MockS3APIMockRecorder) GetPublicAccessBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlock), arg0) -} - -// GetPublicAccessBlockRequest mocks base method. -func (m *MockS3API) GetPublicAccessBlockRequest(arg0 *s3.GetPublicAccessBlockInput) (*request.Request, *s3.GetPublicAccessBlockOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPublicAccessBlockRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.GetPublicAccessBlockOutput) - return ret0, ret1 -} - -// GetPublicAccessBlockRequest indicates an expected call of GetPublicAccessBlockRequest. -func (mr *MockS3APIMockRecorder) GetPublicAccessBlockRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlockRequest), arg0) -} - -// GetPublicAccessBlockWithContext mocks base method. -func (m *MockS3API) GetPublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.GetPublicAccessBlockInput, arg2 ...request.Option) (*s3.GetPublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "GetPublicAccessBlockWithContext", varargs...) - ret0, _ := ret[0].(*s3.GetPublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPublicAccessBlockWithContext indicates an expected call of GetPublicAccessBlockWithContext. -func (mr *MockS3APIMockRecorder) GetPublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).GetPublicAccessBlockWithContext), varargs...) -} - -// HeadBucket mocks base method. -func (m *MockS3API) HeadBucket(arg0 *s3.HeadBucketInput) (*s3.HeadBucketOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadBucket", arg0) - ret0, _ := ret[0].(*s3.HeadBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeadBucket indicates an expected call of HeadBucket. -func (mr *MockS3APIMockRecorder) HeadBucket(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucket", reflect.TypeOf((*MockS3API)(nil).HeadBucket), arg0) -} - -// HeadBucketRequest mocks base method. -func (m *MockS3API) HeadBucketRequest(arg0 *s3.HeadBucketInput) (*request.Request, *s3.HeadBucketOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadBucketRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.HeadBucketOutput) - return ret0, ret1 -} - -// HeadBucketRequest indicates an expected call of HeadBucketRequest. -func (mr *MockS3APIMockRecorder) HeadBucketRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucketRequest", reflect.TypeOf((*MockS3API)(nil).HeadBucketRequest), arg0) -} - -// HeadBucketWithContext mocks base method. -func (m *MockS3API) HeadBucketWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.Option) (*s3.HeadBucketOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "HeadBucketWithContext", varargs...) - ret0, _ := ret[0].(*s3.HeadBucketOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeadBucketWithContext indicates an expected call of HeadBucketWithContext. -func (mr *MockS3APIMockRecorder) HeadBucketWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadBucketWithContext", reflect.TypeOf((*MockS3API)(nil).HeadBucketWithContext), varargs...) -} - -// HeadObject mocks base method. -func (m *MockS3API) HeadObject(arg0 *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadObject", arg0) - ret0, _ := ret[0].(*s3.HeadObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeadObject indicates an expected call of HeadObject. -func (mr *MockS3APIMockRecorder) HeadObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObject", reflect.TypeOf((*MockS3API)(nil).HeadObject), arg0) -} - -// HeadObjectRequest mocks base method. -func (m *MockS3API) HeadObjectRequest(arg0 *s3.HeadObjectInput) (*request.Request, *s3.HeadObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HeadObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.HeadObjectOutput) - return ret0, ret1 -} - -// HeadObjectRequest indicates an expected call of HeadObjectRequest. -func (mr *MockS3APIMockRecorder) HeadObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectRequest", reflect.TypeOf((*MockS3API)(nil).HeadObjectRequest), arg0) -} - -// HeadObjectWithContext mocks base method. -func (m *MockS3API) HeadObjectWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.Option) (*s3.HeadObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "HeadObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.HeadObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HeadObjectWithContext indicates an expected call of HeadObjectWithContext. -func (mr *MockS3APIMockRecorder) HeadObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HeadObjectWithContext", reflect.TypeOf((*MockS3API)(nil).HeadObjectWithContext), varargs...) -} - -// ListBucketAnalyticsConfigurations mocks base method. -func (m *MockS3API) ListBucketAnalyticsConfigurations(arg0 *s3.ListBucketAnalyticsConfigurationsInput) (*s3.ListBucketAnalyticsConfigurationsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurations", arg0) - ret0, _ := ret[0].(*s3.ListBucketAnalyticsConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketAnalyticsConfigurations indicates an expected call of ListBucketAnalyticsConfigurations. -func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurations(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurations), arg0) -} - -// ListBucketAnalyticsConfigurationsRequest mocks base method. -func (m *MockS3API) ListBucketAnalyticsConfigurationsRequest(arg0 *s3.ListBucketAnalyticsConfigurationsInput) (*request.Request, *s3.ListBucketAnalyticsConfigurationsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurationsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketAnalyticsConfigurationsOutput) - return ret0, ret1 -} - -// ListBucketAnalyticsConfigurationsRequest indicates an expected call of ListBucketAnalyticsConfigurationsRequest. -func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurationsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurationsRequest), arg0) -} - -// ListBucketAnalyticsConfigurationsWithContext mocks base method. -func (m *MockS3API) ListBucketAnalyticsConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketAnalyticsConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketAnalyticsConfigurationsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketAnalyticsConfigurationsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketAnalyticsConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketAnalyticsConfigurationsWithContext indicates an expected call of ListBucketAnalyticsConfigurationsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketAnalyticsConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketAnalyticsConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketAnalyticsConfigurationsWithContext), varargs...) -} - -// ListBucketIntelligentTieringConfigurations mocks base method. -func (m *MockS3API) ListBucketIntelligentTieringConfigurations(arg0 *s3.ListBucketIntelligentTieringConfigurationsInput) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurations", arg0) - ret0, _ := ret[0].(*s3.ListBucketIntelligentTieringConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketIntelligentTieringConfigurations indicates an expected call of ListBucketIntelligentTieringConfigurations. -func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurations(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurations), arg0) -} - -// ListBucketIntelligentTieringConfigurationsRequest mocks base method. -func (m *MockS3API) ListBucketIntelligentTieringConfigurationsRequest(arg0 *s3.ListBucketIntelligentTieringConfigurationsInput) (*request.Request, *s3.ListBucketIntelligentTieringConfigurationsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurationsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketIntelligentTieringConfigurationsOutput) - return ret0, ret1 -} - -// ListBucketIntelligentTieringConfigurationsRequest indicates an expected call of ListBucketIntelligentTieringConfigurationsRequest. -func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurationsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurationsRequest), arg0) -} - -// ListBucketIntelligentTieringConfigurationsWithContext mocks base method. -func (m *MockS3API) ListBucketIntelligentTieringConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketIntelligentTieringConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketIntelligentTieringConfigurationsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketIntelligentTieringConfigurationsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketIntelligentTieringConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketIntelligentTieringConfigurationsWithContext indicates an expected call of ListBucketIntelligentTieringConfigurationsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketIntelligentTieringConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketIntelligentTieringConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketIntelligentTieringConfigurationsWithContext), varargs...) -} - -// ListBucketInventoryConfigurations mocks base method. -func (m *MockS3API) ListBucketInventoryConfigurations(arg0 *s3.ListBucketInventoryConfigurationsInput) (*s3.ListBucketInventoryConfigurationsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketInventoryConfigurations", arg0) - ret0, _ := ret[0].(*s3.ListBucketInventoryConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketInventoryConfigurations indicates an expected call of ListBucketInventoryConfigurations. -func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurations(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurations), arg0) -} - -// ListBucketInventoryConfigurationsRequest mocks base method. -func (m *MockS3API) ListBucketInventoryConfigurationsRequest(arg0 *s3.ListBucketInventoryConfigurationsInput) (*request.Request, *s3.ListBucketInventoryConfigurationsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketInventoryConfigurationsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketInventoryConfigurationsOutput) - return ret0, ret1 -} - -// ListBucketInventoryConfigurationsRequest indicates an expected call of ListBucketInventoryConfigurationsRequest. -func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurationsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurationsRequest), arg0) -} - -// ListBucketInventoryConfigurationsWithContext mocks base method. -func (m *MockS3API) ListBucketInventoryConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketInventoryConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketInventoryConfigurationsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketInventoryConfigurationsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketInventoryConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketInventoryConfigurationsWithContext indicates an expected call of ListBucketInventoryConfigurationsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketInventoryConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketInventoryConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketInventoryConfigurationsWithContext), varargs...) -} - -// ListBucketMetricsConfigurations mocks base method. -func (m *MockS3API) ListBucketMetricsConfigurations(arg0 *s3.ListBucketMetricsConfigurationsInput) (*s3.ListBucketMetricsConfigurationsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketMetricsConfigurations", arg0) - ret0, _ := ret[0].(*s3.ListBucketMetricsConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketMetricsConfigurations indicates an expected call of ListBucketMetricsConfigurations. -func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurations(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurations", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurations), arg0) -} - -// ListBucketMetricsConfigurationsRequest mocks base method. -func (m *MockS3API) ListBucketMetricsConfigurationsRequest(arg0 *s3.ListBucketMetricsConfigurationsInput) (*request.Request, *s3.ListBucketMetricsConfigurationsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketMetricsConfigurationsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketMetricsConfigurationsOutput) - return ret0, ret1 -} - -// ListBucketMetricsConfigurationsRequest indicates an expected call of ListBucketMetricsConfigurationsRequest. -func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurationsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurationsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurationsRequest), arg0) -} - -// ListBucketMetricsConfigurationsWithContext mocks base method. -func (m *MockS3API) ListBucketMetricsConfigurationsWithContext(arg0 aws.Context, arg1 *s3.ListBucketMetricsConfigurationsInput, arg2 ...request.Option) (*s3.ListBucketMetricsConfigurationsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketMetricsConfigurationsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketMetricsConfigurationsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketMetricsConfigurationsWithContext indicates an expected call of ListBucketMetricsConfigurationsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketMetricsConfigurationsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketMetricsConfigurationsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketMetricsConfigurationsWithContext), varargs...) -} - -// ListBuckets mocks base method. -func (m *MockS3API) ListBuckets(arg0 *s3.ListBucketsInput) (*s3.ListBucketsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBuckets", arg0) - ret0, _ := ret[0].(*s3.ListBucketsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBuckets indicates an expected call of ListBuckets. -func (mr *MockS3APIMockRecorder) ListBuckets(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBuckets", reflect.TypeOf((*MockS3API)(nil).ListBuckets), arg0) -} - -// ListBucketsRequest mocks base method. -func (m *MockS3API) ListBucketsRequest(arg0 *s3.ListBucketsInput) (*request.Request, *s3.ListBucketsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListBucketsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListBucketsOutput) - return ret0, ret1 -} - -// ListBucketsRequest indicates an expected call of ListBucketsRequest. -func (mr *MockS3APIMockRecorder) ListBucketsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsRequest", reflect.TypeOf((*MockS3API)(nil).ListBucketsRequest), arg0) -} - -// ListBucketsWithContext mocks base method. -func (m *MockS3API) ListBucketsWithContext(arg0 aws.Context, arg1 *s3.ListBucketsInput, arg2 ...request.Option) (*s3.ListBucketsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListBucketsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListBucketsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListBucketsWithContext indicates an expected call of ListBucketsWithContext. -func (mr *MockS3APIMockRecorder) ListBucketsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListBucketsWithContext", reflect.TypeOf((*MockS3API)(nil).ListBucketsWithContext), varargs...) -} - -// ListMultipartUploads mocks base method. -func (m *MockS3API) ListMultipartUploads(arg0 *s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListMultipartUploads", arg0) - ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListMultipartUploads indicates an expected call of ListMultipartUploads. -func (mr *MockS3APIMockRecorder) ListMultipartUploads(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploads", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploads), arg0) -} - -// ListMultipartUploadsPages mocks base method. -func (m *MockS3API) ListMultipartUploadsPages(arg0 *s3.ListMultipartUploadsInput, arg1 func(*s3.ListMultipartUploadsOutput, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListMultipartUploadsPages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListMultipartUploadsPages indicates an expected call of ListMultipartUploadsPages. -func (mr *MockS3APIMockRecorder) ListMultipartUploadsPages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsPages", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsPages), arg0, arg1) -} - -// ListMultipartUploadsPagesWithContext mocks base method. -func (m *MockS3API) ListMultipartUploadsPagesWithContext(arg0 aws.Context, arg1 *s3.ListMultipartUploadsInput, arg2 func(*s3.ListMultipartUploadsOutput, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListMultipartUploadsPagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListMultipartUploadsPagesWithContext indicates an expected call of ListMultipartUploadsPagesWithContext. -func (mr *MockS3APIMockRecorder) ListMultipartUploadsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsPagesWithContext), varargs...) -} - -// ListMultipartUploadsRequest mocks base method. -func (m *MockS3API) ListMultipartUploadsRequest(arg0 *s3.ListMultipartUploadsInput) (*request.Request, *s3.ListMultipartUploadsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListMultipartUploadsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListMultipartUploadsOutput) - return ret0, ret1 -} - -// ListMultipartUploadsRequest indicates an expected call of ListMultipartUploadsRequest. -func (mr *MockS3APIMockRecorder) ListMultipartUploadsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsRequest", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsRequest), arg0) -} - -// ListMultipartUploadsWithContext mocks base method. -func (m *MockS3API) ListMultipartUploadsWithContext(arg0 aws.Context, arg1 *s3.ListMultipartUploadsInput, arg2 ...request.Option) (*s3.ListMultipartUploadsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListMultipartUploadsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListMultipartUploadsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListMultipartUploadsWithContext indicates an expected call of ListMultipartUploadsWithContext. -func (mr *MockS3APIMockRecorder) ListMultipartUploadsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListMultipartUploadsWithContext", reflect.TypeOf((*MockS3API)(nil).ListMultipartUploadsWithContext), varargs...) -} - -// ListObjectVersions mocks base method. -func (m *MockS3API) ListObjectVersions(arg0 *s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectVersions", arg0) - ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectVersions indicates an expected call of ListObjectVersions. -func (mr *MockS3APIMockRecorder) ListObjectVersions(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersions", reflect.TypeOf((*MockS3API)(nil).ListObjectVersions), arg0) -} - -// ListObjectVersionsPages mocks base method. -func (m *MockS3API) ListObjectVersionsPages(arg0 *s3.ListObjectVersionsInput, arg1 func(*s3.ListObjectVersionsOutput, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectVersionsPages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectVersionsPages indicates an expected call of ListObjectVersionsPages. -func (mr *MockS3APIMockRecorder) ListObjectVersionsPages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsPages", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsPages), arg0, arg1) -} - -// ListObjectVersionsPagesWithContext mocks base method. -func (m *MockS3API) ListObjectVersionsPagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectVersionsInput, arg2 func(*s3.ListObjectVersionsOutput, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectVersionsPagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectVersionsPagesWithContext indicates an expected call of ListObjectVersionsPagesWithContext. -func (mr *MockS3APIMockRecorder) ListObjectVersionsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsPagesWithContext), varargs...) -} - -// ListObjectVersionsRequest mocks base method. -func (m *MockS3API) ListObjectVersionsRequest(arg0 *s3.ListObjectVersionsInput) (*request.Request, *s3.ListObjectVersionsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectVersionsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListObjectVersionsOutput) - return ret0, ret1 -} - -// ListObjectVersionsRequest indicates an expected call of ListObjectVersionsRequest. -func (mr *MockS3APIMockRecorder) ListObjectVersionsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsRequest", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsRequest), arg0) -} - -// ListObjectVersionsWithContext mocks base method. -func (m *MockS3API) ListObjectVersionsWithContext(arg0 aws.Context, arg1 *s3.ListObjectVersionsInput, arg2 ...request.Option) (*s3.ListObjectVersionsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectVersionsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListObjectVersionsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectVersionsWithContext indicates an expected call of ListObjectVersionsWithContext. -func (mr *MockS3APIMockRecorder) ListObjectVersionsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectVersionsWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectVersionsWithContext), varargs...) -} - -// ListObjects mocks base method. -func (m *MockS3API) ListObjects(arg0 *s3.ListObjectsInput) (*s3.ListObjectsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjects", arg0) - ret0, _ := ret[0].(*s3.ListObjectsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjects indicates an expected call of ListObjects. -func (mr *MockS3APIMockRecorder) ListObjects(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjects", reflect.TypeOf((*MockS3API)(nil).ListObjects), arg0) -} - -// ListObjectsPages mocks base method. -func (m *MockS3API) ListObjectsPages(arg0 *s3.ListObjectsInput, arg1 func(*s3.ListObjectsOutput, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsPages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectsPages indicates an expected call of ListObjectsPages. -func (mr *MockS3APIMockRecorder) ListObjectsPages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsPages", reflect.TypeOf((*MockS3API)(nil).ListObjectsPages), arg0, arg1) -} - -// ListObjectsPagesWithContext mocks base method. -func (m *MockS3API) ListObjectsPagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectsInput, arg2 func(*s3.ListObjectsOutput, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectsPagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectsPagesWithContext indicates an expected call of ListObjectsPagesWithContext. -func (mr *MockS3APIMockRecorder) ListObjectsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsPagesWithContext), varargs...) -} - -// ListObjectsRequest mocks base method. -func (m *MockS3API) ListObjectsRequest(arg0 *s3.ListObjectsInput) (*request.Request, *s3.ListObjectsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListObjectsOutput) - return ret0, ret1 -} - -// ListObjectsRequest indicates an expected call of ListObjectsRequest. -func (mr *MockS3APIMockRecorder) ListObjectsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsRequest", reflect.TypeOf((*MockS3API)(nil).ListObjectsRequest), arg0) -} - -// ListObjectsV2 mocks base method. -func (m *MockS3API) ListObjectsV2(arg0 *s3.ListObjectsV2Input) (*s3.ListObjectsV2Output, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsV2", arg0) - ret0, _ := ret[0].(*s3.ListObjectsV2Output) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectsV2 indicates an expected call of ListObjectsV2. -func (mr *MockS3APIMockRecorder) ListObjectsV2(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2), arg0) -} - -// ListObjectsV2Pages mocks base method. -func (m *MockS3API) ListObjectsV2Pages(arg0 *s3.ListObjectsV2Input, arg1 func(*s3.ListObjectsV2Output, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsV2Pages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectsV2Pages indicates an expected call of ListObjectsV2Pages. -func (mr *MockS3APIMockRecorder) ListObjectsV2Pages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2Pages", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2Pages), arg0, arg1) -} - -// ListObjectsV2PagesWithContext mocks base method. -func (m *MockS3API) ListObjectsV2PagesWithContext(arg0 aws.Context, arg1 *s3.ListObjectsV2Input, arg2 func(*s3.ListObjectsV2Output, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectsV2PagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListObjectsV2PagesWithContext indicates an expected call of ListObjectsV2PagesWithContext. -func (mr *MockS3APIMockRecorder) ListObjectsV2PagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2PagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2PagesWithContext), varargs...) -} - -// ListObjectsV2Request mocks base method. -func (m *MockS3API) ListObjectsV2Request(arg0 *s3.ListObjectsV2Input) (*request.Request, *s3.ListObjectsV2Output) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListObjectsV2Request", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListObjectsV2Output) - return ret0, ret1 -} - -// ListObjectsV2Request indicates an expected call of ListObjectsV2Request. -func (mr *MockS3APIMockRecorder) ListObjectsV2Request(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2Request", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2Request), arg0) -} - -// ListObjectsV2WithContext mocks base method. -func (m *MockS3API) ListObjectsV2WithContext(arg0 aws.Context, arg1 *s3.ListObjectsV2Input, arg2 ...request.Option) (*s3.ListObjectsV2Output, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectsV2WithContext", varargs...) - ret0, _ := ret[0].(*s3.ListObjectsV2Output) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectsV2WithContext indicates an expected call of ListObjectsV2WithContext. -func (mr *MockS3APIMockRecorder) ListObjectsV2WithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsV2WithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsV2WithContext), varargs...) -} - -// ListObjectsWithContext mocks base method. -func (m *MockS3API) ListObjectsWithContext(arg0 aws.Context, arg1 *s3.ListObjectsInput, arg2 ...request.Option) (*s3.ListObjectsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListObjectsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListObjectsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListObjectsWithContext indicates an expected call of ListObjectsWithContext. -func (mr *MockS3APIMockRecorder) ListObjectsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListObjectsWithContext", reflect.TypeOf((*MockS3API)(nil).ListObjectsWithContext), varargs...) -} - -// ListParts mocks base method. -func (m *MockS3API) ListParts(arg0 *s3.ListPartsInput) (*s3.ListPartsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListParts", arg0) - ret0, _ := ret[0].(*s3.ListPartsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListParts indicates an expected call of ListParts. -func (mr *MockS3APIMockRecorder) ListParts(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListParts", reflect.TypeOf((*MockS3API)(nil).ListParts), arg0) -} - -// ListPartsPages mocks base method. -func (m *MockS3API) ListPartsPages(arg0 *s3.ListPartsInput, arg1 func(*s3.ListPartsOutput, bool) bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPartsPages", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListPartsPages indicates an expected call of ListPartsPages. -func (mr *MockS3APIMockRecorder) ListPartsPages(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsPages", reflect.TypeOf((*MockS3API)(nil).ListPartsPages), arg0, arg1) -} - -// ListPartsPagesWithContext mocks base method. -func (m *MockS3API) ListPartsPagesWithContext(arg0 aws.Context, arg1 *s3.ListPartsInput, arg2 func(*s3.ListPartsOutput, bool) bool, arg3 ...request.Option) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListPartsPagesWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// ListPartsPagesWithContext indicates an expected call of ListPartsPagesWithContext. -func (mr *MockS3APIMockRecorder) ListPartsPagesWithContext(arg0, arg1, arg2 interface{}, arg3 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsPagesWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsPagesWithContext), varargs...) -} - -// ListPartsRequest mocks base method. -func (m *MockS3API) ListPartsRequest(arg0 *s3.ListPartsInput) (*request.Request, *s3.ListPartsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPartsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.ListPartsOutput) - return ret0, ret1 -} - -// ListPartsRequest indicates an expected call of ListPartsRequest. -func (mr *MockS3APIMockRecorder) ListPartsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsRequest", reflect.TypeOf((*MockS3API)(nil).ListPartsRequest), arg0) -} - -// ListPartsWithContext mocks base method. -func (m *MockS3API) ListPartsWithContext(arg0 aws.Context, arg1 *s3.ListPartsInput, arg2 ...request.Option) (*s3.ListPartsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "ListPartsWithContext", varargs...) - ret0, _ := ret[0].(*s3.ListPartsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ListPartsWithContext indicates an expected call of ListPartsWithContext. -func (mr *MockS3APIMockRecorder) ListPartsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPartsWithContext", reflect.TypeOf((*MockS3API)(nil).ListPartsWithContext), varargs...) -} - -// PutBucketAccelerateConfiguration mocks base method. -func (m *MockS3API) PutBucketAccelerateConfiguration(arg0 *s3.PutBucketAccelerateConfigurationInput) (*s3.PutBucketAccelerateConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAccelerateConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketAccelerateConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAccelerateConfiguration indicates an expected call of PutBucketAccelerateConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfiguration), arg0) -} - -// PutBucketAccelerateConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketAccelerateConfigurationRequest(arg0 *s3.PutBucketAccelerateConfigurationInput) (*request.Request, *s3.PutBucketAccelerateConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAccelerateConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketAccelerateConfigurationOutput) - return ret0, ret1 -} - -// PutBucketAccelerateConfigurationRequest indicates an expected call of PutBucketAccelerateConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfigurationRequest), arg0) -} - -// PutBucketAccelerateConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketAccelerateConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketAccelerateConfigurationInput, arg2 ...request.Option) (*s3.PutBucketAccelerateConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketAccelerateConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketAccelerateConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAccelerateConfigurationWithContext indicates an expected call of PutBucketAccelerateConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketAccelerateConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAccelerateConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAccelerateConfigurationWithContext), varargs...) -} - -// PutBucketAcl mocks base method. -func (m *MockS3API) PutBucketAcl(arg0 *s3.PutBucketAclInput) (*s3.PutBucketAclOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAcl", arg0) - ret0, _ := ret[0].(*s3.PutBucketAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAcl indicates an expected call of PutBucketAcl. -func (mr *MockS3APIMockRecorder) PutBucketAcl(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAcl", reflect.TypeOf((*MockS3API)(nil).PutBucketAcl), arg0) -} - -// PutBucketAclRequest mocks base method. -func (m *MockS3API) PutBucketAclRequest(arg0 *s3.PutBucketAclInput) (*request.Request, *s3.PutBucketAclOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAclRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketAclOutput) - return ret0, ret1 -} - -// PutBucketAclRequest indicates an expected call of PutBucketAclRequest. -func (mr *MockS3APIMockRecorder) PutBucketAclRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAclRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAclRequest), arg0) -} - -// PutBucketAclWithContext mocks base method. -func (m *MockS3API) PutBucketAclWithContext(arg0 aws.Context, arg1 *s3.PutBucketAclInput, arg2 ...request.Option) (*s3.PutBucketAclOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketAclWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAclWithContext indicates an expected call of PutBucketAclWithContext. -func (mr *MockS3APIMockRecorder) PutBucketAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAclWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAclWithContext), varargs...) -} - -// PutBucketAnalyticsConfiguration mocks base method. -func (m *MockS3API) PutBucketAnalyticsConfiguration(arg0 *s3.PutBucketAnalyticsConfigurationInput) (*s3.PutBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAnalyticsConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAnalyticsConfiguration indicates an expected call of PutBucketAnalyticsConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfiguration), arg0) -} - -// PutBucketAnalyticsConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketAnalyticsConfigurationRequest(arg0 *s3.PutBucketAnalyticsConfigurationInput) (*request.Request, *s3.PutBucketAnalyticsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketAnalyticsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketAnalyticsConfigurationOutput) - return ret0, ret1 -} - -// PutBucketAnalyticsConfigurationRequest indicates an expected call of PutBucketAnalyticsConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfigurationRequest), arg0) -} - -// PutBucketAnalyticsConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketAnalyticsConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketAnalyticsConfigurationInput, arg2 ...request.Option) (*s3.PutBucketAnalyticsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketAnalyticsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketAnalyticsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketAnalyticsConfigurationWithContext indicates an expected call of PutBucketAnalyticsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketAnalyticsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketAnalyticsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketAnalyticsConfigurationWithContext), varargs...) -} - -// PutBucketCors mocks base method. -func (m *MockS3API) PutBucketCors(arg0 *s3.PutBucketCorsInput) (*s3.PutBucketCorsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketCors", arg0) - ret0, _ := ret[0].(*s3.PutBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketCors indicates an expected call of PutBucketCors. -func (mr *MockS3APIMockRecorder) PutBucketCors(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCors", reflect.TypeOf((*MockS3API)(nil).PutBucketCors), arg0) -} - -// PutBucketCorsRequest mocks base method. -func (m *MockS3API) PutBucketCorsRequest(arg0 *s3.PutBucketCorsInput) (*request.Request, *s3.PutBucketCorsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketCorsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketCorsOutput) - return ret0, ret1 -} - -// PutBucketCorsRequest indicates an expected call of PutBucketCorsRequest. -func (mr *MockS3APIMockRecorder) PutBucketCorsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCorsRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketCorsRequest), arg0) -} - -// PutBucketCorsWithContext mocks base method. -func (m *MockS3API) PutBucketCorsWithContext(arg0 aws.Context, arg1 *s3.PutBucketCorsInput, arg2 ...request.Option) (*s3.PutBucketCorsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketCorsWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketCorsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketCorsWithContext indicates an expected call of PutBucketCorsWithContext. -func (mr *MockS3APIMockRecorder) PutBucketCorsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketCorsWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketCorsWithContext), varargs...) -} - -// PutBucketEncryption mocks base method. -func (m *MockS3API) PutBucketEncryption(arg0 *s3.PutBucketEncryptionInput) (*s3.PutBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketEncryption", arg0) - ret0, _ := ret[0].(*s3.PutBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketEncryption indicates an expected call of PutBucketEncryption. -func (mr *MockS3APIMockRecorder) PutBucketEncryption(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryption", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryption), arg0) -} - -// PutBucketEncryptionRequest mocks base method. -func (m *MockS3API) PutBucketEncryptionRequest(arg0 *s3.PutBucketEncryptionInput) (*request.Request, *s3.PutBucketEncryptionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketEncryptionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketEncryptionOutput) - return ret0, ret1 -} - -// PutBucketEncryptionRequest indicates an expected call of PutBucketEncryptionRequest. -func (mr *MockS3APIMockRecorder) PutBucketEncryptionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryptionRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryptionRequest), arg0) -} - -// PutBucketEncryptionWithContext mocks base method. -func (m *MockS3API) PutBucketEncryptionWithContext(arg0 aws.Context, arg1 *s3.PutBucketEncryptionInput, arg2 ...request.Option) (*s3.PutBucketEncryptionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketEncryptionWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketEncryptionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketEncryptionWithContext indicates an expected call of PutBucketEncryptionWithContext. -func (mr *MockS3APIMockRecorder) PutBucketEncryptionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketEncryptionWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketEncryptionWithContext), varargs...) -} - -// PutBucketIntelligentTieringConfiguration mocks base method. -func (m *MockS3API) PutBucketIntelligentTieringConfiguration(arg0 *s3.PutBucketIntelligentTieringConfigurationInput) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketIntelligentTieringConfiguration indicates an expected call of PutBucketIntelligentTieringConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfiguration), arg0) -} - -// PutBucketIntelligentTieringConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketIntelligentTieringConfigurationRequest(arg0 *s3.PutBucketIntelligentTieringConfigurationInput) (*request.Request, *s3.PutBucketIntelligentTieringConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketIntelligentTieringConfigurationOutput) - return ret0, ret1 -} - -// PutBucketIntelligentTieringConfigurationRequest indicates an expected call of PutBucketIntelligentTieringConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfigurationRequest), arg0) -} - -// PutBucketIntelligentTieringConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketIntelligentTieringConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketIntelligentTieringConfigurationInput, arg2 ...request.Option) (*s3.PutBucketIntelligentTieringConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketIntelligentTieringConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketIntelligentTieringConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketIntelligentTieringConfigurationWithContext indicates an expected call of PutBucketIntelligentTieringConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketIntelligentTieringConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketIntelligentTieringConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketIntelligentTieringConfigurationWithContext), varargs...) -} - -// PutBucketInventoryConfiguration mocks base method. -func (m *MockS3API) PutBucketInventoryConfiguration(arg0 *s3.PutBucketInventoryConfigurationInput) (*s3.PutBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketInventoryConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketInventoryConfiguration indicates an expected call of PutBucketInventoryConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketInventoryConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfiguration), arg0) -} - -// PutBucketInventoryConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketInventoryConfigurationRequest(arg0 *s3.PutBucketInventoryConfigurationInput) (*request.Request, *s3.PutBucketInventoryConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketInventoryConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketInventoryConfigurationOutput) - return ret0, ret1 -} - -// PutBucketInventoryConfigurationRequest indicates an expected call of PutBucketInventoryConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketInventoryConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfigurationRequest), arg0) -} - -// PutBucketInventoryConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketInventoryConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketInventoryConfigurationInput, arg2 ...request.Option) (*s3.PutBucketInventoryConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketInventoryConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketInventoryConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketInventoryConfigurationWithContext indicates an expected call of PutBucketInventoryConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketInventoryConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketInventoryConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketInventoryConfigurationWithContext), varargs...) -} - -// PutBucketLifecycle mocks base method. -func (m *MockS3API) PutBucketLifecycle(arg0 *s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLifecycle", arg0) - ret0, _ := ret[0].(*s3.PutBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLifecycle indicates an expected call of PutBucketLifecycle. -func (mr *MockS3APIMockRecorder) PutBucketLifecycle(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycle", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycle), arg0) -} - -// PutBucketLifecycleConfiguration mocks base method. -func (m *MockS3API) PutBucketLifecycleConfiguration(arg0 *s3.PutBucketLifecycleConfigurationInput) (*s3.PutBucketLifecycleConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLifecycleConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketLifecycleConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLifecycleConfiguration indicates an expected call of PutBucketLifecycleConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfiguration), arg0) -} - -// PutBucketLifecycleConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketLifecycleConfigurationRequest(arg0 *s3.PutBucketLifecycleConfigurationInput) (*request.Request, *s3.PutBucketLifecycleConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLifecycleConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketLifecycleConfigurationOutput) - return ret0, ret1 -} - -// PutBucketLifecycleConfigurationRequest indicates an expected call of PutBucketLifecycleConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfigurationRequest), arg0) -} - -// PutBucketLifecycleConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketLifecycleConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketLifecycleConfigurationInput, arg2 ...request.Option) (*s3.PutBucketLifecycleConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketLifecycleConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketLifecycleConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLifecycleConfigurationWithContext indicates an expected call of PutBucketLifecycleConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleConfigurationWithContext), varargs...) -} - -// PutBucketLifecycleRequest mocks base method. -func (m *MockS3API) PutBucketLifecycleRequest(arg0 *s3.PutBucketLifecycleInput) (*request.Request, *s3.PutBucketLifecycleOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLifecycleRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketLifecycleOutput) - return ret0, ret1 -} - -// PutBucketLifecycleRequest indicates an expected call of PutBucketLifecycleRequest. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleRequest), arg0) -} - -// PutBucketLifecycleWithContext mocks base method. -func (m *MockS3API) PutBucketLifecycleWithContext(arg0 aws.Context, arg1 *s3.PutBucketLifecycleInput, arg2 ...request.Option) (*s3.PutBucketLifecycleOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketLifecycleWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketLifecycleOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLifecycleWithContext indicates an expected call of PutBucketLifecycleWithContext. -func (mr *MockS3APIMockRecorder) PutBucketLifecycleWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLifecycleWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLifecycleWithContext), varargs...) -} - -// PutBucketLogging mocks base method. -func (m *MockS3API) PutBucketLogging(arg0 *s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLogging", arg0) - ret0, _ := ret[0].(*s3.PutBucketLoggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLogging indicates an expected call of PutBucketLogging. -func (mr *MockS3APIMockRecorder) PutBucketLogging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLogging", reflect.TypeOf((*MockS3API)(nil).PutBucketLogging), arg0) -} - -// PutBucketLoggingRequest mocks base method. -func (m *MockS3API) PutBucketLoggingRequest(arg0 *s3.PutBucketLoggingInput) (*request.Request, *s3.PutBucketLoggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketLoggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketLoggingOutput) - return ret0, ret1 -} - -// PutBucketLoggingRequest indicates an expected call of PutBucketLoggingRequest. -func (mr *MockS3APIMockRecorder) PutBucketLoggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLoggingRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketLoggingRequest), arg0) -} - -// PutBucketLoggingWithContext mocks base method. -func (m *MockS3API) PutBucketLoggingWithContext(arg0 aws.Context, arg1 *s3.PutBucketLoggingInput, arg2 ...request.Option) (*s3.PutBucketLoggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketLoggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketLoggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketLoggingWithContext indicates an expected call of PutBucketLoggingWithContext. -func (mr *MockS3APIMockRecorder) PutBucketLoggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketLoggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketLoggingWithContext), varargs...) -} - -// PutBucketMetricsConfiguration mocks base method. -func (m *MockS3API) PutBucketMetricsConfiguration(arg0 *s3.PutBucketMetricsConfigurationInput) (*s3.PutBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketMetricsConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketMetricsConfiguration indicates an expected call of PutBucketMetricsConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketMetricsConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfiguration), arg0) -} - -// PutBucketMetricsConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketMetricsConfigurationRequest(arg0 *s3.PutBucketMetricsConfigurationInput) (*request.Request, *s3.PutBucketMetricsConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketMetricsConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketMetricsConfigurationOutput) - return ret0, ret1 -} - -// PutBucketMetricsConfigurationRequest indicates an expected call of PutBucketMetricsConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketMetricsConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfigurationRequest), arg0) -} - -// PutBucketMetricsConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketMetricsConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketMetricsConfigurationInput, arg2 ...request.Option) (*s3.PutBucketMetricsConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketMetricsConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketMetricsConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketMetricsConfigurationWithContext indicates an expected call of PutBucketMetricsConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketMetricsConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketMetricsConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketMetricsConfigurationWithContext), varargs...) -} - -// PutBucketNotification mocks base method. -func (m *MockS3API) PutBucketNotification(arg0 *s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketNotification", arg0) - ret0, _ := ret[0].(*s3.PutBucketNotificationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketNotification indicates an expected call of PutBucketNotification. -func (mr *MockS3APIMockRecorder) PutBucketNotification(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotification", reflect.TypeOf((*MockS3API)(nil).PutBucketNotification), arg0) -} - -// PutBucketNotificationConfiguration mocks base method. -func (m *MockS3API) PutBucketNotificationConfiguration(arg0 *s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketNotificationConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutBucketNotificationConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketNotificationConfiguration indicates an expected call of PutBucketNotificationConfiguration. -func (mr *MockS3APIMockRecorder) PutBucketNotificationConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfiguration", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfiguration), arg0) -} - -// PutBucketNotificationConfigurationRequest mocks base method. -func (m *MockS3API) PutBucketNotificationConfigurationRequest(arg0 *s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketNotificationConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketNotificationConfigurationOutput) - return ret0, ret1 -} - -// PutBucketNotificationConfigurationRequest indicates an expected call of PutBucketNotificationConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutBucketNotificationConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfigurationRequest), arg0) -} - -// PutBucketNotificationConfigurationWithContext mocks base method. -func (m *MockS3API) PutBucketNotificationConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutBucketNotificationConfigurationInput, arg2 ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketNotificationConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketNotificationConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketNotificationConfigurationWithContext indicates an expected call of PutBucketNotificationConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketNotificationConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationConfigurationWithContext), varargs...) -} - -// PutBucketNotificationRequest mocks base method. -func (m *MockS3API) PutBucketNotificationRequest(arg0 *s3.PutBucketNotificationInput) (*request.Request, *s3.PutBucketNotificationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketNotificationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketNotificationOutput) - return ret0, ret1 -} - -// PutBucketNotificationRequest indicates an expected call of PutBucketNotificationRequest. -func (mr *MockS3APIMockRecorder) PutBucketNotificationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationRequest), arg0) -} - -// PutBucketNotificationWithContext mocks base method. -func (m *MockS3API) PutBucketNotificationWithContext(arg0 aws.Context, arg1 *s3.PutBucketNotificationInput, arg2 ...request.Option) (*s3.PutBucketNotificationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketNotificationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketNotificationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketNotificationWithContext indicates an expected call of PutBucketNotificationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketNotificationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketNotificationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketNotificationWithContext), varargs...) -} - -// PutBucketOwnershipControls mocks base method. -func (m *MockS3API) PutBucketOwnershipControls(arg0 *s3.PutBucketOwnershipControlsInput) (*s3.PutBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketOwnershipControls", arg0) - ret0, _ := ret[0].(*s3.PutBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketOwnershipControls indicates an expected call of PutBucketOwnershipControls. -func (mr *MockS3APIMockRecorder) PutBucketOwnershipControls(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControls", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControls), arg0) -} - -// PutBucketOwnershipControlsRequest mocks base method. -func (m *MockS3API) PutBucketOwnershipControlsRequest(arg0 *s3.PutBucketOwnershipControlsInput) (*request.Request, *s3.PutBucketOwnershipControlsOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketOwnershipControlsRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketOwnershipControlsOutput) - return ret0, ret1 -} - -// PutBucketOwnershipControlsRequest indicates an expected call of PutBucketOwnershipControlsRequest. -func (mr *MockS3APIMockRecorder) PutBucketOwnershipControlsRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControlsRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControlsRequest), arg0) -} - -// PutBucketOwnershipControlsWithContext mocks base method. -func (m *MockS3API) PutBucketOwnershipControlsWithContext(arg0 aws.Context, arg1 *s3.PutBucketOwnershipControlsInput, arg2 ...request.Option) (*s3.PutBucketOwnershipControlsOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketOwnershipControlsWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketOwnershipControlsOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketOwnershipControlsWithContext indicates an expected call of PutBucketOwnershipControlsWithContext. -func (mr *MockS3APIMockRecorder) PutBucketOwnershipControlsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketOwnershipControlsWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketOwnershipControlsWithContext), varargs...) -} - -// PutBucketPolicy mocks base method. -func (m *MockS3API) PutBucketPolicy(arg0 *s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketPolicy", arg0) - ret0, _ := ret[0].(*s3.PutBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketPolicy indicates an expected call of PutBucketPolicy. -func (mr *MockS3APIMockRecorder) PutBucketPolicy(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicy", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicy), arg0) -} - -// PutBucketPolicyRequest mocks base method. -func (m *MockS3API) PutBucketPolicyRequest(arg0 *s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketPolicyRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketPolicyOutput) - return ret0, ret1 -} - -// PutBucketPolicyRequest indicates an expected call of PutBucketPolicyRequest. -func (mr *MockS3APIMockRecorder) PutBucketPolicyRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicyRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicyRequest), arg0) -} - -// PutBucketPolicyWithContext mocks base method. -func (m *MockS3API) PutBucketPolicyWithContext(arg0 aws.Context, arg1 *s3.PutBucketPolicyInput, arg2 ...request.Option) (*s3.PutBucketPolicyOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketPolicyWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketPolicyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketPolicyWithContext indicates an expected call of PutBucketPolicyWithContext. -func (mr *MockS3APIMockRecorder) PutBucketPolicyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketPolicyWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketPolicyWithContext), varargs...) -} - -// PutBucketReplication mocks base method. -func (m *MockS3API) PutBucketReplication(arg0 *s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketReplication", arg0) - ret0, _ := ret[0].(*s3.PutBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketReplication indicates an expected call of PutBucketReplication. -func (mr *MockS3APIMockRecorder) PutBucketReplication(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplication", reflect.TypeOf((*MockS3API)(nil).PutBucketReplication), arg0) -} - -// PutBucketReplicationRequest mocks base method. -func (m *MockS3API) PutBucketReplicationRequest(arg0 *s3.PutBucketReplicationInput) (*request.Request, *s3.PutBucketReplicationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketReplicationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketReplicationOutput) - return ret0, ret1 -} - -// PutBucketReplicationRequest indicates an expected call of PutBucketReplicationRequest. -func (mr *MockS3APIMockRecorder) PutBucketReplicationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplicationRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketReplicationRequest), arg0) -} - -// PutBucketReplicationWithContext mocks base method. -func (m *MockS3API) PutBucketReplicationWithContext(arg0 aws.Context, arg1 *s3.PutBucketReplicationInput, arg2 ...request.Option) (*s3.PutBucketReplicationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketReplicationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketReplicationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketReplicationWithContext indicates an expected call of PutBucketReplicationWithContext. -func (mr *MockS3APIMockRecorder) PutBucketReplicationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketReplicationWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketReplicationWithContext), varargs...) -} - -// PutBucketRequestPayment mocks base method. -func (m *MockS3API) PutBucketRequestPayment(arg0 *s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketRequestPayment", arg0) - ret0, _ := ret[0].(*s3.PutBucketRequestPaymentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketRequestPayment indicates an expected call of PutBucketRequestPayment. -func (mr *MockS3APIMockRecorder) PutBucketRequestPayment(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPayment", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPayment), arg0) -} - -// PutBucketRequestPaymentRequest mocks base method. -func (m *MockS3API) PutBucketRequestPaymentRequest(arg0 *s3.PutBucketRequestPaymentInput) (*request.Request, *s3.PutBucketRequestPaymentOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketRequestPaymentRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketRequestPaymentOutput) - return ret0, ret1 -} - -// PutBucketRequestPaymentRequest indicates an expected call of PutBucketRequestPaymentRequest. -func (mr *MockS3APIMockRecorder) PutBucketRequestPaymentRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPaymentRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPaymentRequest), arg0) -} - -// PutBucketRequestPaymentWithContext mocks base method. -func (m *MockS3API) PutBucketRequestPaymentWithContext(arg0 aws.Context, arg1 *s3.PutBucketRequestPaymentInput, arg2 ...request.Option) (*s3.PutBucketRequestPaymentOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketRequestPaymentWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketRequestPaymentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketRequestPaymentWithContext indicates an expected call of PutBucketRequestPaymentWithContext. -func (mr *MockS3APIMockRecorder) PutBucketRequestPaymentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketRequestPaymentWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketRequestPaymentWithContext), varargs...) -} - -// PutBucketTagging mocks base method. -func (m *MockS3API) PutBucketTagging(arg0 *s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketTagging", arg0) - ret0, _ := ret[0].(*s3.PutBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketTagging indicates an expected call of PutBucketTagging. -func (mr *MockS3APIMockRecorder) PutBucketTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTagging", reflect.TypeOf((*MockS3API)(nil).PutBucketTagging), arg0) -} - -// PutBucketTaggingRequest mocks base method. -func (m *MockS3API) PutBucketTaggingRequest(arg0 *s3.PutBucketTaggingInput) (*request.Request, *s3.PutBucketTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketTaggingOutput) - return ret0, ret1 -} - -// PutBucketTaggingRequest indicates an expected call of PutBucketTaggingRequest. -func (mr *MockS3APIMockRecorder) PutBucketTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTaggingRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketTaggingRequest), arg0) -} - -// PutBucketTaggingWithContext mocks base method. -func (m *MockS3API) PutBucketTaggingWithContext(arg0 aws.Context, arg1 *s3.PutBucketTaggingInput, arg2 ...request.Option) (*s3.PutBucketTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketTaggingWithContext indicates an expected call of PutBucketTaggingWithContext. -func (mr *MockS3APIMockRecorder) PutBucketTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketTaggingWithContext), varargs...) -} - -// PutBucketVersioning mocks base method. -func (m *MockS3API) PutBucketVersioning(arg0 *s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketVersioning", arg0) - ret0, _ := ret[0].(*s3.PutBucketVersioningOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketVersioning indicates an expected call of PutBucketVersioning. -func (mr *MockS3APIMockRecorder) PutBucketVersioning(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioning", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioning), arg0) -} - -// PutBucketVersioningRequest mocks base method. -func (m *MockS3API) PutBucketVersioningRequest(arg0 *s3.PutBucketVersioningInput) (*request.Request, *s3.PutBucketVersioningOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketVersioningRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketVersioningOutput) - return ret0, ret1 -} - -// PutBucketVersioningRequest indicates an expected call of PutBucketVersioningRequest. -func (mr *MockS3APIMockRecorder) PutBucketVersioningRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioningRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioningRequest), arg0) -} - -// PutBucketVersioningWithContext mocks base method. -func (m *MockS3API) PutBucketVersioningWithContext(arg0 aws.Context, arg1 *s3.PutBucketVersioningInput, arg2 ...request.Option) (*s3.PutBucketVersioningOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketVersioningWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketVersioningOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketVersioningWithContext indicates an expected call of PutBucketVersioningWithContext. -func (mr *MockS3APIMockRecorder) PutBucketVersioningWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketVersioningWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketVersioningWithContext), varargs...) -} - -// PutBucketWebsite mocks base method. -func (m *MockS3API) PutBucketWebsite(arg0 *s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketWebsite", arg0) - ret0, _ := ret[0].(*s3.PutBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketWebsite indicates an expected call of PutBucketWebsite. -func (mr *MockS3APIMockRecorder) PutBucketWebsite(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsite", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsite), arg0) -} - -// PutBucketWebsiteRequest mocks base method. -func (m *MockS3API) PutBucketWebsiteRequest(arg0 *s3.PutBucketWebsiteInput) (*request.Request, *s3.PutBucketWebsiteOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutBucketWebsiteRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutBucketWebsiteOutput) - return ret0, ret1 -} - -// PutBucketWebsiteRequest indicates an expected call of PutBucketWebsiteRequest. -func (mr *MockS3APIMockRecorder) PutBucketWebsiteRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsiteRequest", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsiteRequest), arg0) -} - -// PutBucketWebsiteWithContext mocks base method. -func (m *MockS3API) PutBucketWebsiteWithContext(arg0 aws.Context, arg1 *s3.PutBucketWebsiteInput, arg2 ...request.Option) (*s3.PutBucketWebsiteOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutBucketWebsiteWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutBucketWebsiteOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutBucketWebsiteWithContext indicates an expected call of PutBucketWebsiteWithContext. -func (mr *MockS3APIMockRecorder) PutBucketWebsiteWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBucketWebsiteWithContext", reflect.TypeOf((*MockS3API)(nil).PutBucketWebsiteWithContext), varargs...) -} - -// PutObject mocks base method. -func (m *MockS3API) PutObject(arg0 *s3.PutObjectInput) (*s3.PutObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObject", arg0) - ret0, _ := ret[0].(*s3.PutObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObject indicates an expected call of PutObject. -func (mr *MockS3APIMockRecorder) PutObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObject", reflect.TypeOf((*MockS3API)(nil).PutObject), arg0) -} - -// PutObjectAcl mocks base method. -func (m *MockS3API) PutObjectAcl(arg0 *s3.PutObjectAclInput) (*s3.PutObjectAclOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectAcl", arg0) - ret0, _ := ret[0].(*s3.PutObjectAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectAcl indicates an expected call of PutObjectAcl. -func (mr *MockS3APIMockRecorder) PutObjectAcl(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAcl", reflect.TypeOf((*MockS3API)(nil).PutObjectAcl), arg0) -} - -// PutObjectAclRequest mocks base method. -func (m *MockS3API) PutObjectAclRequest(arg0 *s3.PutObjectAclInput) (*request.Request, *s3.PutObjectAclOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectAclRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectAclOutput) - return ret0, ret1 -} - -// PutObjectAclRequest indicates an expected call of PutObjectAclRequest. -func (mr *MockS3APIMockRecorder) PutObjectAclRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAclRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectAclRequest), arg0) -} - -// PutObjectAclWithContext mocks base method. -func (m *MockS3API) PutObjectAclWithContext(arg0 aws.Context, arg1 *s3.PutObjectAclInput, arg2 ...request.Option) (*s3.PutObjectAclOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectAclWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectAclOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectAclWithContext indicates an expected call of PutObjectAclWithContext. -func (mr *MockS3APIMockRecorder) PutObjectAclWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectAclWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectAclWithContext), varargs...) -} - -// PutObjectLegalHold mocks base method. -func (m *MockS3API) PutObjectLegalHold(arg0 *s3.PutObjectLegalHoldInput) (*s3.PutObjectLegalHoldOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectLegalHold", arg0) - ret0, _ := ret[0].(*s3.PutObjectLegalHoldOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectLegalHold indicates an expected call of PutObjectLegalHold. -func (mr *MockS3APIMockRecorder) PutObjectLegalHold(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHold", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHold), arg0) -} - -// PutObjectLegalHoldRequest mocks base method. -func (m *MockS3API) PutObjectLegalHoldRequest(arg0 *s3.PutObjectLegalHoldInput) (*request.Request, *s3.PutObjectLegalHoldOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectLegalHoldRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectLegalHoldOutput) - return ret0, ret1 -} - -// PutObjectLegalHoldRequest indicates an expected call of PutObjectLegalHoldRequest. -func (mr *MockS3APIMockRecorder) PutObjectLegalHoldRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHoldRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHoldRequest), arg0) -} - -// PutObjectLegalHoldWithContext mocks base method. -func (m *MockS3API) PutObjectLegalHoldWithContext(arg0 aws.Context, arg1 *s3.PutObjectLegalHoldInput, arg2 ...request.Option) (*s3.PutObjectLegalHoldOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectLegalHoldWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectLegalHoldOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectLegalHoldWithContext indicates an expected call of PutObjectLegalHoldWithContext. -func (mr *MockS3APIMockRecorder) PutObjectLegalHoldWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLegalHoldWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectLegalHoldWithContext), varargs...) -} - -// PutObjectLockConfiguration mocks base method. -func (m *MockS3API) PutObjectLockConfiguration(arg0 *s3.PutObjectLockConfigurationInput) (*s3.PutObjectLockConfigurationOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectLockConfiguration", arg0) - ret0, _ := ret[0].(*s3.PutObjectLockConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectLockConfiguration indicates an expected call of PutObjectLockConfiguration. -func (mr *MockS3APIMockRecorder) PutObjectLockConfiguration(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfiguration", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfiguration), arg0) -} - -// PutObjectLockConfigurationRequest mocks base method. -func (m *MockS3API) PutObjectLockConfigurationRequest(arg0 *s3.PutObjectLockConfigurationInput) (*request.Request, *s3.PutObjectLockConfigurationOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectLockConfigurationRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectLockConfigurationOutput) - return ret0, ret1 -} - -// PutObjectLockConfigurationRequest indicates an expected call of PutObjectLockConfigurationRequest. -func (mr *MockS3APIMockRecorder) PutObjectLockConfigurationRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfigurationRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfigurationRequest), arg0) -} - -// PutObjectLockConfigurationWithContext mocks base method. -func (m *MockS3API) PutObjectLockConfigurationWithContext(arg0 aws.Context, arg1 *s3.PutObjectLockConfigurationInput, arg2 ...request.Option) (*s3.PutObjectLockConfigurationOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectLockConfigurationWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectLockConfigurationOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectLockConfigurationWithContext indicates an expected call of PutObjectLockConfigurationWithContext. -func (mr *MockS3APIMockRecorder) PutObjectLockConfigurationWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectLockConfigurationWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectLockConfigurationWithContext), varargs...) -} - -// PutObjectRequest mocks base method. -func (m *MockS3API) PutObjectRequest(arg0 *s3.PutObjectInput) (*request.Request, *s3.PutObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectOutput) - return ret0, ret1 -} - -// PutObjectRequest indicates an expected call of PutObjectRequest. -func (mr *MockS3APIMockRecorder) PutObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectRequest), arg0) -} - -// PutObjectRetention mocks base method. -func (m *MockS3API) PutObjectRetention(arg0 *s3.PutObjectRetentionInput) (*s3.PutObjectRetentionOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectRetention", arg0) - ret0, _ := ret[0].(*s3.PutObjectRetentionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectRetention indicates an expected call of PutObjectRetention. -func (mr *MockS3APIMockRecorder) PutObjectRetention(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetention", reflect.TypeOf((*MockS3API)(nil).PutObjectRetention), arg0) -} - -// PutObjectRetentionRequest mocks base method. -func (m *MockS3API) PutObjectRetentionRequest(arg0 *s3.PutObjectRetentionInput) (*request.Request, *s3.PutObjectRetentionOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectRetentionRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectRetentionOutput) - return ret0, ret1 -} - -// PutObjectRetentionRequest indicates an expected call of PutObjectRetentionRequest. -func (mr *MockS3APIMockRecorder) PutObjectRetentionRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetentionRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectRetentionRequest), arg0) -} - -// PutObjectRetentionWithContext mocks base method. -func (m *MockS3API) PutObjectRetentionWithContext(arg0 aws.Context, arg1 *s3.PutObjectRetentionInput, arg2 ...request.Option) (*s3.PutObjectRetentionOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectRetentionWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectRetentionOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectRetentionWithContext indicates an expected call of PutObjectRetentionWithContext. -func (mr *MockS3APIMockRecorder) PutObjectRetentionWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectRetentionWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectRetentionWithContext), varargs...) -} - -// PutObjectTagging mocks base method. -func (m *MockS3API) PutObjectTagging(arg0 *s3.PutObjectTaggingInput) (*s3.PutObjectTaggingOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectTagging", arg0) - ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectTagging indicates an expected call of PutObjectTagging. -func (mr *MockS3APIMockRecorder) PutObjectTagging(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTagging", reflect.TypeOf((*MockS3API)(nil).PutObjectTagging), arg0) -} - -// PutObjectTaggingRequest mocks base method. -func (m *MockS3API) PutObjectTaggingRequest(arg0 *s3.PutObjectTaggingInput) (*request.Request, *s3.PutObjectTaggingOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutObjectTaggingRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutObjectTaggingOutput) - return ret0, ret1 -} - -// PutObjectTaggingRequest indicates an expected call of PutObjectTaggingRequest. -func (mr *MockS3APIMockRecorder) PutObjectTaggingRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTaggingRequest", reflect.TypeOf((*MockS3API)(nil).PutObjectTaggingRequest), arg0) -} - -// PutObjectTaggingWithContext mocks base method. -func (m *MockS3API) PutObjectTaggingWithContext(arg0 aws.Context, arg1 *s3.PutObjectTaggingInput, arg2 ...request.Option) (*s3.PutObjectTaggingOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectTaggingWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectTaggingOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectTaggingWithContext indicates an expected call of PutObjectTaggingWithContext. -func (mr *MockS3APIMockRecorder) PutObjectTaggingWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectTaggingWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectTaggingWithContext), varargs...) -} - -// PutObjectWithContext mocks base method. -func (m *MockS3API) PutObjectWithContext(arg0 aws.Context, arg1 *s3.PutObjectInput, arg2 ...request.Option) (*s3.PutObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutObjectWithContext indicates an expected call of PutObjectWithContext. -func (mr *MockS3APIMockRecorder) PutObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObjectWithContext", reflect.TypeOf((*MockS3API)(nil).PutObjectWithContext), varargs...) -} - -// PutPublicAccessBlock mocks base method. -func (m *MockS3API) PutPublicAccessBlock(arg0 *s3.PutPublicAccessBlockInput) (*s3.PutPublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutPublicAccessBlock", arg0) - ret0, _ := ret[0].(*s3.PutPublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutPublicAccessBlock indicates an expected call of PutPublicAccessBlock. -func (mr *MockS3APIMockRecorder) PutPublicAccessBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlock", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlock), arg0) -} - -// PutPublicAccessBlockRequest mocks base method. -func (m *MockS3API) PutPublicAccessBlockRequest(arg0 *s3.PutPublicAccessBlockInput) (*request.Request, *s3.PutPublicAccessBlockOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PutPublicAccessBlockRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.PutPublicAccessBlockOutput) - return ret0, ret1 -} - -// PutPublicAccessBlockRequest indicates an expected call of PutPublicAccessBlockRequest. -func (mr *MockS3APIMockRecorder) PutPublicAccessBlockRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlockRequest", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlockRequest), arg0) -} - -// PutPublicAccessBlockWithContext mocks base method. -func (m *MockS3API) PutPublicAccessBlockWithContext(arg0 aws.Context, arg1 *s3.PutPublicAccessBlockInput, arg2 ...request.Option) (*s3.PutPublicAccessBlockOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "PutPublicAccessBlockWithContext", varargs...) - ret0, _ := ret[0].(*s3.PutPublicAccessBlockOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PutPublicAccessBlockWithContext indicates an expected call of PutPublicAccessBlockWithContext. -func (mr *MockS3APIMockRecorder) PutPublicAccessBlockWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutPublicAccessBlockWithContext", reflect.TypeOf((*MockS3API)(nil).PutPublicAccessBlockWithContext), varargs...) -} - -// RestoreObject mocks base method. -func (m *MockS3API) RestoreObject(arg0 *s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestoreObject", arg0) - ret0, _ := ret[0].(*s3.RestoreObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RestoreObject indicates an expected call of RestoreObject. -func (mr *MockS3APIMockRecorder) RestoreObject(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObject", reflect.TypeOf((*MockS3API)(nil).RestoreObject), arg0) -} - -// RestoreObjectRequest mocks base method. -func (m *MockS3API) RestoreObjectRequest(arg0 *s3.RestoreObjectInput) (*request.Request, *s3.RestoreObjectOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RestoreObjectRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.RestoreObjectOutput) - return ret0, ret1 -} - -// RestoreObjectRequest indicates an expected call of RestoreObjectRequest. -func (mr *MockS3APIMockRecorder) RestoreObjectRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObjectRequest", reflect.TypeOf((*MockS3API)(nil).RestoreObjectRequest), arg0) -} - -// RestoreObjectWithContext mocks base method. -func (m *MockS3API) RestoreObjectWithContext(arg0 aws.Context, arg1 *s3.RestoreObjectInput, arg2 ...request.Option) (*s3.RestoreObjectOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "RestoreObjectWithContext", varargs...) - ret0, _ := ret[0].(*s3.RestoreObjectOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RestoreObjectWithContext indicates an expected call of RestoreObjectWithContext. -func (mr *MockS3APIMockRecorder) RestoreObjectWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RestoreObjectWithContext", reflect.TypeOf((*MockS3API)(nil).RestoreObjectWithContext), varargs...) -} - -// SelectObjectContent mocks base method. -func (m *MockS3API) SelectObjectContent(arg0 *s3.SelectObjectContentInput) (*s3.SelectObjectContentOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SelectObjectContent", arg0) - ret0, _ := ret[0].(*s3.SelectObjectContentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SelectObjectContent indicates an expected call of SelectObjectContent. -func (mr *MockS3APIMockRecorder) SelectObjectContent(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContent", reflect.TypeOf((*MockS3API)(nil).SelectObjectContent), arg0) -} - -// SelectObjectContentRequest mocks base method. -func (m *MockS3API) SelectObjectContentRequest(arg0 *s3.SelectObjectContentInput) (*request.Request, *s3.SelectObjectContentOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SelectObjectContentRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.SelectObjectContentOutput) - return ret0, ret1 -} - -// SelectObjectContentRequest indicates an expected call of SelectObjectContentRequest. -func (mr *MockS3APIMockRecorder) SelectObjectContentRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContentRequest", reflect.TypeOf((*MockS3API)(nil).SelectObjectContentRequest), arg0) -} - -// SelectObjectContentWithContext mocks base method. -func (m *MockS3API) SelectObjectContentWithContext(arg0 aws.Context, arg1 *s3.SelectObjectContentInput, arg2 ...request.Option) (*s3.SelectObjectContentOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "SelectObjectContentWithContext", varargs...) - ret0, _ := ret[0].(*s3.SelectObjectContentOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// SelectObjectContentWithContext indicates an expected call of SelectObjectContentWithContext. -func (mr *MockS3APIMockRecorder) SelectObjectContentWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectObjectContentWithContext", reflect.TypeOf((*MockS3API)(nil).SelectObjectContentWithContext), varargs...) -} - -// UploadPart mocks base method. -func (m *MockS3API) UploadPart(arg0 *s3.UploadPartInput) (*s3.UploadPartOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UploadPart", arg0) - ret0, _ := ret[0].(*s3.UploadPartOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UploadPart indicates an expected call of UploadPart. -func (mr *MockS3APIMockRecorder) UploadPart(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPart", reflect.TypeOf((*MockS3API)(nil).UploadPart), arg0) -} - -// UploadPartCopy mocks base method. -func (m *MockS3API) UploadPartCopy(arg0 *s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UploadPartCopy", arg0) - ret0, _ := ret[0].(*s3.UploadPartCopyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UploadPartCopy indicates an expected call of UploadPartCopy. -func (mr *MockS3APIMockRecorder) UploadPartCopy(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopy", reflect.TypeOf((*MockS3API)(nil).UploadPartCopy), arg0) -} - -// UploadPartCopyRequest mocks base method. -func (m *MockS3API) UploadPartCopyRequest(arg0 *s3.UploadPartCopyInput) (*request.Request, *s3.UploadPartCopyOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UploadPartCopyRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.UploadPartCopyOutput) - return ret0, ret1 -} - -// UploadPartCopyRequest indicates an expected call of UploadPartCopyRequest. -func (mr *MockS3APIMockRecorder) UploadPartCopyRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyRequest", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyRequest), arg0) -} - -// UploadPartCopyWithContext mocks base method. -func (m *MockS3API) UploadPartCopyWithContext(arg0 aws.Context, arg1 *s3.UploadPartCopyInput, arg2 ...request.Option) (*s3.UploadPartCopyOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UploadPartCopyWithContext", varargs...) - ret0, _ := ret[0].(*s3.UploadPartCopyOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UploadPartCopyWithContext indicates an expected call of UploadPartCopyWithContext. -func (mr *MockS3APIMockRecorder) UploadPartCopyWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartCopyWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartCopyWithContext), varargs...) -} - -// UploadPartRequest mocks base method. -func (m *MockS3API) UploadPartRequest(arg0 *s3.UploadPartInput) (*request.Request, *s3.UploadPartOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UploadPartRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.UploadPartOutput) - return ret0, ret1 -} - -// UploadPartRequest indicates an expected call of UploadPartRequest. -func (mr *MockS3APIMockRecorder) UploadPartRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartRequest", reflect.TypeOf((*MockS3API)(nil).UploadPartRequest), arg0) -} - -// UploadPartWithContext mocks base method. -func (m *MockS3API) UploadPartWithContext(arg0 aws.Context, arg1 *s3.UploadPartInput, arg2 ...request.Option) (*s3.UploadPartOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "UploadPartWithContext", varargs...) - ret0, _ := ret[0].(*s3.UploadPartOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// UploadPartWithContext indicates an expected call of UploadPartWithContext. -func (mr *MockS3APIMockRecorder) UploadPartWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UploadPartWithContext", reflect.TypeOf((*MockS3API)(nil).UploadPartWithContext), varargs...) -} - -// WaitUntilBucketExists mocks base method. -func (m *MockS3API) WaitUntilBucketExists(arg0 *s3.HeadBucketInput) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitUntilBucketExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilBucketExists indicates an expected call of WaitUntilBucketExists. -func (mr *MockS3APIMockRecorder) WaitUntilBucketExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketExists), arg0) -} - -// WaitUntilBucketExistsWithContext mocks base method. -func (m *MockS3API) WaitUntilBucketExistsWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.WaiterOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WaitUntilBucketExistsWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilBucketExistsWithContext indicates an expected call of WaitUntilBucketExistsWithContext. -func (mr *MockS3APIMockRecorder) WaitUntilBucketExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketExistsWithContext), varargs...) -} - -// WaitUntilBucketNotExists mocks base method. -func (m *MockS3API) WaitUntilBucketNotExists(arg0 *s3.HeadBucketInput) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitUntilBucketNotExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilBucketNotExists indicates an expected call of WaitUntilBucketNotExists. -func (mr *MockS3APIMockRecorder) WaitUntilBucketNotExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketNotExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketNotExists), arg0) -} - -// WaitUntilBucketNotExistsWithContext mocks base method. -func (m *MockS3API) WaitUntilBucketNotExistsWithContext(arg0 aws.Context, arg1 *s3.HeadBucketInput, arg2 ...request.WaiterOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WaitUntilBucketNotExistsWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilBucketNotExistsWithContext indicates an expected call of WaitUntilBucketNotExistsWithContext. -func (mr *MockS3APIMockRecorder) WaitUntilBucketNotExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilBucketNotExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilBucketNotExistsWithContext), varargs...) -} - -// WaitUntilObjectExists mocks base method. -func (m *MockS3API) WaitUntilObjectExists(arg0 *s3.HeadObjectInput) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitUntilObjectExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilObjectExists indicates an expected call of WaitUntilObjectExists. -func (mr *MockS3APIMockRecorder) WaitUntilObjectExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectExists), arg0) -} - -// WaitUntilObjectExistsWithContext mocks base method. -func (m *MockS3API) WaitUntilObjectExistsWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.WaiterOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WaitUntilObjectExistsWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilObjectExistsWithContext indicates an expected call of WaitUntilObjectExistsWithContext. -func (mr *MockS3APIMockRecorder) WaitUntilObjectExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectExistsWithContext), varargs...) -} - -// WaitUntilObjectNotExists mocks base method. -func (m *MockS3API) WaitUntilObjectNotExists(arg0 *s3.HeadObjectInput) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WaitUntilObjectNotExists", arg0) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilObjectNotExists indicates an expected call of WaitUntilObjectNotExists. -func (mr *MockS3APIMockRecorder) WaitUntilObjectNotExists(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectNotExists", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectNotExists), arg0) -} - -// WaitUntilObjectNotExistsWithContext mocks base method. -func (m *MockS3API) WaitUntilObjectNotExistsWithContext(arg0 aws.Context, arg1 *s3.HeadObjectInput, arg2 ...request.WaiterOption) error { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WaitUntilObjectNotExistsWithContext", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// WaitUntilObjectNotExistsWithContext indicates an expected call of WaitUntilObjectNotExistsWithContext. -func (mr *MockS3APIMockRecorder) WaitUntilObjectNotExistsWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WaitUntilObjectNotExistsWithContext", reflect.TypeOf((*MockS3API)(nil).WaitUntilObjectNotExistsWithContext), varargs...) -} - -// WriteGetObjectResponse mocks base method. -func (m *MockS3API) WriteGetObjectResponse(arg0 *s3.WriteGetObjectResponseInput) (*s3.WriteGetObjectResponseOutput, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteGetObjectResponse", arg0) - ret0, _ := ret[0].(*s3.WriteGetObjectResponseOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// WriteGetObjectResponse indicates an expected call of WriteGetObjectResponse. -func (mr *MockS3APIMockRecorder) WriteGetObjectResponse(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponse", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponse), arg0) -} - -// WriteGetObjectResponseRequest mocks base method. -func (m *MockS3API) WriteGetObjectResponseRequest(arg0 *s3.WriteGetObjectResponseInput) (*request.Request, *s3.WriteGetObjectResponseOutput) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "WriteGetObjectResponseRequest", arg0) - ret0, _ := ret[0].(*request.Request) - ret1, _ := ret[1].(*s3.WriteGetObjectResponseOutput) - return ret0, ret1 -} - -// WriteGetObjectResponseRequest indicates an expected call of WriteGetObjectResponseRequest. -func (mr *MockS3APIMockRecorder) WriteGetObjectResponseRequest(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponseRequest", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponseRequest), arg0) -} - -// WriteGetObjectResponseWithContext mocks base method. -func (m *MockS3API) WriteGetObjectResponseWithContext(arg0 aws.Context, arg1 *s3.WriteGetObjectResponseInput, arg2 ...request.Option) (*s3.WriteGetObjectResponseOutput, error) { - m.ctrl.T.Helper() - varargs := []interface{}{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "WriteGetObjectResponseWithContext", varargs...) - ret0, _ := ret[0].(*s3.WriteGetObjectResponseOutput) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// WriteGetObjectResponseWithContext indicates an expected call of WriteGetObjectResponseWithContext. -func (mr *MockS3APIMockRecorder) WriteGetObjectResponseWithContext(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { +// PutObject indicates an expected call of PutObject. +func (mr *MockS3APIMockRecorder) PutObject(ctx, params any, optFns ...any) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteGetObjectResponseWithContext", reflect.TypeOf((*MockS3API)(nil).WriteGetObjectResponseWithContext), varargs...) + varargs := append([]any{ctx, params}, optFns...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutObject", reflect.TypeOf((*MockS3API)(nil).PutObject), varargs...) } diff --git a/common/archiver/s3store/query_parser.go b/common/archiver/s3store/query_parser.go index bbc26e4c57b..1e8843ef8b4 100644 --- a/common/archiver/s3store/query_parser.go +++ b/common/archiver/s3store/query_parser.go @@ -1,40 +1,14 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../../LICENSE -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser +//go:generate mockgen -package $GOPACKAGE -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser package s3store import ( "errors" "fmt" - "strconv" "time" "github.com/temporalio/sqlparser" - - "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/sqlquery" "go.temporal.io/server/common/util" ) @@ -71,10 +45,6 @@ const ( PrecisionMinute = "Minute" PrecisionSecond = "Second" ) -const ( - queryTemplate = "select * from dummy where %s" - defaultDateTimeFormat = time.RFC3339 -) // NewQueryParser creates a new query parser for filestore func NewQueryParser() QueryParser { @@ -82,7 +52,7 @@ func NewQueryParser() QueryParser { } func (p *queryParser) Parse(query string) (*parsedQuery, error) { - stmt, err := sqlparser.Parse(fmt.Sprintf(queryTemplate, query)) + stmt, err := sqlparser.Parse(fmt.Sprintf(sqlquery.QueryTemplate, query)) if err != nil { return nil, err } @@ -153,7 +123,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, switch colNameStr { case WorkflowTypeName: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -165,7 +135,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.workflowTypeName = util.Ptr(val) case WorkflowID: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -177,7 +147,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.workflowID = util.Ptr(val) case CloseTime: - timestamp, err := convertToTime(valStr) + timestamp, err := sqlquery.ConvertToTime(valStr) if err != nil { return err } @@ -186,7 +156,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.closeTime = ×tamp case StartTime: - timestamp, err := convertToTime(valStr) + timestamp, err := sqlquery.ConvertToTime(valStr) if err != nil { return err } @@ -195,7 +165,7 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, } parsedQuery.startTime = ×tamp case SearchPrecision: - val, err := extractStringValue(valStr) + val, err := sqlquery.ExtractStringValue(valStr) if err != nil { return err } @@ -221,26 +191,3 @@ func (p *queryParser) convertComparisonExpr(compExpr *sqlparser.ComparisonExpr, return nil } - -func convertToTime(timeStr string) (time.Time, error) { - ts, err := strconv.ParseInt(timeStr, 10, 64) - if err == nil { - return timestamp.UnixOrZeroTime(ts), nil - } - timestampStr, err := extractStringValue(timeStr) - if err != nil { - return time.Time{}, err - } - parsedTime, err := time.Parse(defaultDateTimeFormat, timestampStr) - if err != nil { - return time.Time{}, err - } - return parsedTime, nil -} - -func extractStringValue(s string) (string, error) { - if len(s) >= 2 && s[0] == '\'' && s[len(s)-1] == '\'' { - return s[1 : len(s)-1], nil - } - return "", fmt.Errorf("value %s is not a string value", s) -} diff --git a/common/archiver/s3store/query_parser_mock.go b/common/archiver/s3store/query_parser_mock.go index b67994b7adf..bba25088e87 100644 --- a/common/archiver/s3store/query_parser_mock.go +++ b/common/archiver/s3store/query_parser_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: query_parser.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package s3store -source query_parser.go -destination query_parser_mock.go -mock_names Interface=MockQueryParser // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: query_parser.go // Package s3store is a generated GoMock package. package s3store @@ -31,13 +12,14 @@ package s3store import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockQueryParser is a mock of QueryParser interface. type MockQueryParser struct { ctrl *gomock.Controller recorder *MockQueryParserMockRecorder + isgomock struct{} } // MockQueryParserMockRecorder is the mock recorder for MockQueryParser. @@ -67,7 +49,7 @@ func (m *MockQueryParser) Parse(query string) (*parsedQuery, error) { } // Parse indicates an expected call of Parse. -func (mr *MockQueryParserMockRecorder) Parse(query interface{}) *gomock.Call { +func (mr *MockQueryParserMockRecorder) Parse(query any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parse", reflect.TypeOf((*MockQueryParser)(nil).Parse), query) } diff --git a/common/archiver/s3store/query_parser_test.go b/common/archiver/s3store/query_parser_test.go index f40917cdb2e..cba343416bc 100644 --- a/common/archiver/s3store/query_parser_test.go +++ b/common/archiver/s3store/query_parser_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package s3store import ( @@ -30,7 +6,6 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "go.temporal.io/server/common/util" ) diff --git a/common/archiver/s3store/s3iface.go b/common/archiver/s3store/s3iface.go new file mode 100644 index 00000000000..9f5da7282cf --- /dev/null +++ b/common/archiver/s3store/s3iface.go @@ -0,0 +1,16 @@ +package s3store + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +// S3API is the subset of the S3 client API used by the s3store archiver. +type S3API interface { + HeadBucket(ctx context.Context, params *s3.HeadBucketInput, optFns ...func(*s3.Options)) (*s3.HeadBucketOutput, error) + HeadObject(ctx context.Context, params *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) + PutObject(ctx context.Context, params *s3.PutObjectInput, optFns ...func(*s3.Options)) (*s3.PutObjectOutput, error) + GetObject(ctx context.Context, params *s3.GetObjectInput, optFns ...func(*s3.Options)) (*s3.GetObjectOutput, error) + ListObjectsV2(ctx context.Context, params *s3.ListObjectsV2Input, optFns ...func(*s3.Options)) (*s3.ListObjectsV2Output, error) +} diff --git a/common/archiver/s3store/util.go b/common/archiver/s3store/util.go index a31446b21b8..f91f5f69bc7 100644 --- a/common/archiver/s3store/util.go +++ b/common/archiver/s3store/util.go @@ -1,53 +1,29 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package s3store import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + smithy "github.com/aws/smithy-go" commonpb "go.temporal.io/api/common/v1" historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" workflowpb "go.temporal.io/api/workflow/v1" - "go.uber.org/multierr" - "google.golang.org/protobuf/proto" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/codec" "go.temporal.io/server/common/searchattribute" + "go.uber.org/multierr" + "google.golang.org/protobuf/proto" ) // encoding & decoding util @@ -67,7 +43,7 @@ func decodeVisibilityRecord(data []byte) (*archiverspb.VisibilityRecord, error) return record, nil } -func SerializeToken(token interface{}) ([]byte, error) { +func SerializeToken(token any) ([]byte, error) { if token == nil { return nil, nil } @@ -99,10 +75,10 @@ func SoftValidateURI(URI archiver.URI) error { return nil } -func BucketExists(ctx context.Context, s3cli s3iface.S3API, URI archiver.URI) error { +func BucketExists(ctx context.Context, s3cli S3API, URI archiver.URI) error { ctx, cancel := ensureContextTimeout(ctx) defer cancel() - _, err := s3cli.HeadBucketWithContext(ctx, &s3.HeadBucketInput{ + _, err := s3cli.HeadBucket(ctx, &s3.HeadBucketInput{ Bucket: aws.String(URI.Hostname()), }) if err == nil { @@ -114,10 +90,10 @@ func BucketExists(ctx context.Context, s3cli s3iface.S3API, URI archiver.URI) er return err } -func KeyExists(ctx context.Context, s3cli s3iface.S3API, URI archiver.URI, key string) (bool, error) { +func KeyExists(ctx context.Context, s3cli S3API, URI archiver.URI, key string) (bool, error) { ctx, cancel := ensureContextTimeout(ctx) defer cancel() - _, err := s3cli.HeadObjectWithContext(ctx, &s3.HeadObjectInput{ + _, err := s3cli.HeadObject(ctx, &s3.HeadObjectInput{ Bucket: aws.String(URI.Hostname()), Key: aws.String(key), }) @@ -131,8 +107,8 @@ func KeyExists(ctx context.Context, s3cli s3iface.S3API, URI archiver.URI, key s } func IsNotFoundError(err error) bool { - aerr, ok := err.(awserr.Error) - return ok && (aerr.Code() == "NotFound") + apiErr, ok := errors.AsType[smithy.APIError](err) + return ok && apiErr.ErrorCode() == "NotFound" } // Key construction @@ -208,43 +184,39 @@ func ensureContextTimeout(ctx context.Context) (context.Context, context.CancelF } return context.WithTimeout(ctx, defaultBlobstoreTimeout) } -func Upload(ctx context.Context, s3cli s3iface.S3API, URI archiver.URI, key string, data []byte) error { +func Upload(ctx context.Context, s3cli S3API, URI archiver.URI, key string, data []byte) error { ctx, cancel := ensureContextTimeout(ctx) defer cancel() - _, err := s3cli.PutObjectWithContext(ctx, &s3.PutObjectInput{ + _, err := s3cli.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(URI.Hostname()), Key: aws.String(key), Body: bytes.NewReader(data), }) if err != nil { - if aerr, ok := err.(awserr.Error); ok { - if aerr.Code() == s3.ErrCodeNoSuchBucket { - return serviceerror.NewInvalidArgument(errBucketNotExists.Error()) - } + if _, ok := errors.AsType[*types.NoSuchBucket](err); ok { + return serviceerror.NewInvalidArgument(errBucketNotExists.Error()) } return err } return nil } -func Download(ctx context.Context, s3cli s3iface.S3API, URI archiver.URI, key string) ([]byte, error) { +func Download(ctx context.Context, s3cli S3API, URI archiver.URI, key string) ([]byte, error) { ctx, cancel := ensureContextTimeout(ctx) defer cancel() - result, err := s3cli.GetObjectWithContext(ctx, &s3.GetObjectInput{ + result, err := s3cli.GetObject(ctx, &s3.GetObjectInput{ Bucket: aws.String(URI.Hostname()), Key: aws.String(key), }) if err != nil { - if aerr, ok := err.(awserr.Error); ok { - if aerr.Code() == s3.ErrCodeNoSuchBucket { - return nil, serviceerror.NewInvalidArgument(errBucketNotExists.Error()) - } + if _, ok := errors.AsType[*types.NoSuchBucket](err); ok { + return nil, serviceerror.NewInvalidArgument(errBucketNotExists.Error()) + } - if aerr.Code() == s3.ErrCodeNoSuchKey { - return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error()) - } + if _, ok := errors.AsType[*types.NoSuchKey](err); ok { + return nil, serviceerror.NewNotFound(archiver.ErrHistoryNotExist.Error()) } return nil, err } @@ -291,12 +263,13 @@ func convertToExecutionInfo(record *archiverspb.VisibilityRecord, saTypeMap sear Type: &commonpb.WorkflowType{ Name: record.WorkflowTypeName, }, - StartTime: record.StartTime, - ExecutionTime: record.ExecutionTime, - CloseTime: record.CloseTime, - Status: record.Status, - HistoryLength: record.HistoryLength, - Memo: record.Memo, - SearchAttributes: searchAttributes, + StartTime: record.StartTime, + ExecutionTime: record.ExecutionTime, + CloseTime: record.CloseTime, + ExecutionDuration: record.ExecutionDuration, + Status: record.Status, + HistoryLength: record.HistoryLength, + Memo: record.Memo, + SearchAttributes: searchAttributes, }, nil } diff --git a/common/archiver/s3store/util_test.go b/common/archiver/s3store/util_test.go index 6c2ea65be03..34d5a629254 100644 --- a/common/archiver/s3store/util_test.go +++ b/common/archiver/s3store/util_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package s3store import ( diff --git a/common/archiver/s3store/visibility_archiver.go b/common/archiver/s3store/visibility_archiver.go index 9c0731f1051..d3eae16e89d 100644 --- a/common/archiver/s3store/visibility_archiver.go +++ b/common/archiver/s3store/visibility_archiver.go @@ -1,56 +1,32 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package s3store import ( "context" + "path" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/service/s3" "go.temporal.io/api/serviceerror" workflowpb "go.temporal.io/api/workflow/v1" - - "go.temporal.io/server/common/searchattribute" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/config" + "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/searchattribute" ) type ( visibilityArchiver struct { - container *archiver.VisibilityBootstrapContainer - s3cli s3iface.S3API - queryParser QueryParser + logger log.Logger + metricsHandler metrics.Handler + s3cli S3API + queryParser QueryParser } queryVisibilityRequest struct { @@ -78,28 +54,32 @@ const ( // NewVisibilityArchiver creates a new archiver.VisibilityArchiver based on s3 func NewVisibilityArchiver( - container *archiver.VisibilityBootstrapContainer, + logger log.Logger, + metricsHandler metrics.Handler, config *config.S3Archiver, ) (archiver.VisibilityArchiver, error) { - return newVisibilityArchiver(container, config) + return newVisibilityArchiver(logger, metricsHandler, config) } func newVisibilityArchiver( - container *archiver.VisibilityBootstrapContainer, - config *config.S3Archiver) (*visibilityArchiver, error) { - s3Config := &aws.Config{ - Endpoint: config.Endpoint, - Region: aws.String(config.Region), - S3ForcePathStyle: aws.Bool(config.S3ForcePathStyle), - LogLevel: (*aws.LogLevelType)(&config.LogLevel), - } - sess, err := session.NewSession(s3Config) + logger log.Logger, + metricsHandler metrics.Handler, + s3config *config.S3Archiver, +) (*visibilityArchiver, error) { + cfg, err := awsconfig.LoadDefaultConfig(context.Background(), + awsconfig.WithRegion(s3config.Region), + awsconfig.WithClientLogMode(aws.ClientLogMode(s3config.LogLevel)), + ) if err != nil { return nil, err } return &visibilityArchiver{ - container: container, - s3cli: s3.New(sess), + logger: logger, + metricsHandler: metricsHandler, + s3cli: s3.NewFromConfig(cfg, func(o *s3.Options) { + o.BaseEndpoint = s3config.Endpoint + o.UsePathStyle = s3config.S3ForcePathStyle + }), queryParser: NewQueryParser(), }, nil } @@ -110,10 +90,10 @@ func (v *visibilityArchiver) Archive( request *archiverspb.VisibilityRecord, opts ...archiver.ArchiveOption, ) (err error) { - handler := v.container.MetricsHandler.WithTags(metrics.OperationTag(metrics.VisibilityArchiverScope), metrics.NamespaceTag(request.Namespace)) + handler := v.metricsHandler.WithTags(metrics.OperationTag(metrics.VisibilityArchiverScope), metrics.NamespaceTag(request.Namespace)) featureCatalog := archiver.GetFeatureCatalog(opts...) startTime := time.Now().UTC() - logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.container.Logger, request, URI.String()) + logger := archiver.TagLoggerWithArchiveVisibilityRequestAndURI(v.logger, request, URI.String()) archiveFailReason := "" defer func() { metrics.ServiceLatency.With(handler).Record(time.Since(startTime)) @@ -244,11 +224,17 @@ func (v *visibilityArchiver) queryAll( nextPageToken: nextPageToken, parsedQuery: &parsedQuery{}, }, saTypeMap, searchPrefix, func(key string) bool { - // We only want to return entries for the closeTimeout secondary index, which will always be of the form: - // .../closeTimeout//, so we split the key on "/" and check that the third-to-last - // element is "closeTimeout". - elements := strings.Split(key, "/") - return len(elements) >= 3 && elements[len(elements)-3] == secondaryIndexKeyCloseTimeout + // We only want to return entries for the closeTimeout secondary index. Keys for this + // index are always of the form: + // .../closeTimeout// + // Walk from the end instead of splitting the whole string to avoid unnecessary + // allocations and to keep the logic clear: + // - drop + // - drop + // - check the remaining last segment equals "closeTimeout". + dir := path.Dir(key) // drop runID + dir = path.Dir(dir) // drop + return path.Base(dir) == secondaryIndexKeyCloseTimeout }) if err != nil { return nil, err @@ -331,10 +317,10 @@ func (v *visibilityArchiver) queryPrefix( if request.nextPageToken != nil { token = deserializeQueryVisibilityToken(request.nextPageToken) } - results, err := v.s3cli.ListObjectsV2WithContext(ctx, &s3.ListObjectsV2Input{ + results, err := v.s3cli.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ Bucket: aws.String(uri.Hostname()), Prefix: aws.String(prefix), - MaxKeys: aws.Int64(int64(request.pageSize)), + MaxKeys: aws.Int32(int32(request.pageSize)), ContinuationToken: token, }) if err != nil { diff --git a/common/archiver/s3store/visibility_archiver_test.go b/common/archiver/s3store/visibility_archiver_test.go index 3ce7400f531..0ce3e873b65 100644 --- a/common/archiver/s3store/visibility_archiver_test.go +++ b/common/archiver/s3store/visibility_archiver_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package s3store import ( @@ -31,19 +7,14 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/golang/mock/gomock" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/smithy-go" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" workflowpb "go.temporal.io/api/workflow/v1" - "google.golang.org/protobuf/types/known/timestamppb" - archiverspb "go.temporal.io/server/api/archiver/v1" "go.temporal.io/server/common/archiver" "go.temporal.io/server/common/archiver/s3store/mocks" @@ -53,7 +24,8 @@ import ( "go.temporal.io/server/common/payload" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/searchattribute" - "go.temporal.io/server/common/util" + "go.uber.org/mock/gomock" + "google.golang.org/protobuf/types/known/timestamppb" ) type visibilityArchiverSuite struct { @@ -61,7 +33,8 @@ type visibilityArchiverSuite struct { suite.Suite s3cli *mocks.MockS3API - container *archiver.VisibilityBootstrapContainer + logger log.Logger + metricsHandler metrics.Handler visibilityRecords []*archiverspb.VisibilityRecord controller *gomock.Controller @@ -99,10 +72,10 @@ func (s *visibilityArchiverSuite) TestValidateURI() { }, } - s.s3cli.EXPECT().HeadBucketWithContext(gomock.Any(), gomock.Any()).DoAndReturn( - func(ctx aws.Context, input *s3.HeadBucketInput, options ...request.Option) (*s3.HeadBucketOutput, error) { + s.s3cli.EXPECT().HeadBucket(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, input *s3.HeadBucketInput, options ...func(*s3.Options)) (*s3.HeadBucketOutput, error) { if *input.Bucket != s.testArchivalURI.Hostname() { - return nil, awserr.New("NotFound", "", nil) + return nil, &smithy.GenericAPIError{Code: "NotFound", Message: ""} } return &s3.HeadBucketOutput{}, nil @@ -118,9 +91,10 @@ func (s *visibilityArchiverSuite) TestValidateURI() { func (s *visibilityArchiverSuite) newTestVisibilityArchiver() *visibilityArchiver { return &visibilityArchiver{ - container: s.container, - s3cli: s.s3cli, - queryParser: NewQueryParser(), + logger: s.logger, + metricsHandler: s.metricsHandler, + s3cli: s.s3cli, + queryParser: NewQueryParser(), } } @@ -133,10 +107,8 @@ func (s *visibilityArchiverSuite) SetupSuite() { s.testArchivalURI, err = archiver.NewURI(testBucketURI) s.Require().NoError(err) - s.container = &archiver.VisibilityBootstrapContainer{ - Logger: log.NewNoopLogger(), - MetricsHandler: metrics.NoopMetricsHandler, - } + s.logger = log.NewNoopLogger() + s.metricsHandler = metrics.NoopMetricsHandler } func (s *visibilityArchiverSuite) TearDownSuite() { @@ -242,14 +214,14 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidURI() { NamespaceID: testNamespaceID, PageSize: 1, } - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidRequest() { visibilityArchiver := s.newTestVisibilityArchiver() - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{}, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, &archiver.QueryVisibilityRequest{}, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } @@ -263,7 +235,7 @@ func (s *visibilityArchiverSuite) TestQuery_Fail_InvalidQuery() { NamespaceID: "some random namespaceID", PageSize: 10, Query: "some invalid query", - }, searchattribute.TestNameTypeMap) + }, searchattribute.TestNameTypeMap()) s.Error(err) s.Nil(response) } @@ -272,9 +244,9 @@ func (s *visibilityArchiverSuite) TestQuery_Success_DirectoryNotExist() { visibilityArchiver := s.newTestVisibilityArchiver() mockParser := NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - workflowID: util.Ptr(testWorkflowID), + workflowID: new(testWorkflowID), closeTime: &time.Time{}, - searchPrecision: util.Ptr(PrecisionSecond), + searchPrecision: new(PrecisionSecond), }, nil) visibilityArchiver.queryParser = mockParser request := &archiver.QueryVisibilityRequest{ @@ -282,7 +254,7 @@ func (s *visibilityArchiverSuite) TestQuery_Success_DirectoryNotExist() { Query: "parsed by mockParser", PageSize: 1, } - response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), s.testArchivalURI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Empty(response.Executions) @@ -293,9 +265,9 @@ func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { visibilityArchiver := s.newTestVisibilityArchiver() mockParser := NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: util.Ptr(time.Unix(0, int64(1*time.Hour)).UTC()), - searchPrecision: util.Ptr(PrecisionHour), - workflowID: util.Ptr(testWorkflowID), + closeTime: new(time.Unix(0, int64(1*time.Hour)).UTC()), + searchPrecision: new(PrecisionHour), + workflowID: new(testWorkflowID), }, nil) visibilityArchiver.queryParser = mockParser request := &archiver.QueryVisibilityRequest{ @@ -305,12 +277,12 @@ func (s *visibilityArchiverSuite) TestQuery_Success_NoNextPageToken() { } URI, err := archiver.NewURI(testBucketURI) s.NoError(err) - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Nil(response.NextPageToken) s.Len(response.Executions, 2) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(response.Executions[0], ei) } @@ -319,9 +291,9 @@ func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { visibilityArchiver := s.newTestVisibilityArchiver() mockParser := NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: util.Ptr(time.Unix(0, 0).UTC()), - searchPrecision: util.Ptr(PrecisionDay), - workflowID: util.Ptr(testWorkflowID), + closeTime: new(time.Unix(0, 0).UTC()), + searchPrecision: new(PrecisionDay), + workflowID: new(testWorkflowID), }, nil).AnyTimes() visibilityArchiver.queryParser = mockParser request := &archiver.QueryVisibilityRequest{ @@ -331,25 +303,25 @@ func (s *visibilityArchiverSuite) TestQuery_Success_SmallPageSize() { } URI, err := archiver.NewURI(testBucketURI) s.NoError(err) - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.NotNil(response.NextPageToken) s.Len(response.Executions, 2) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, response.Executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, response.Executions[1]) request.NextPageToken = response.NextPageToken - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Nil(response.NextPageToken) s.Len(response.Executions, 1) - ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, response.Executions[0]) } @@ -364,7 +336,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_InvalidNamespace() { NextPageToken: nil, Query: "", } - _, err = arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) + _, err = arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap()) var svcErr *serviceerror.InvalidArgument @@ -383,7 +355,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_ZeroPageSize() { NextPageToken: nil, Query: "", } - _, err = arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) + _, err = arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap()) var svcErr *serviceerror.InvalidArgument @@ -405,7 +377,7 @@ func (s *visibilityArchiverSuite) TestQuery_EmptyQuery_Pagination() { NextPageToken: nextPageToken, Query: "", } - response, err := arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap) + response, err := arc.Query(context.Background(), uri, req, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) nextPageToken = response.NextPageToken @@ -535,52 +507,52 @@ func (s *visibilityArchiverSuite) TestArchiveAndQueryPrecisions() { for i, testData := range precisionTests { mockParser := NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: util.Ptr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - searchPrecision: util.Ptr(testData.precision), - workflowID: util.Ptr(testWorkflowID), + closeTime: new(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + searchPrecision: new(testData.precision), + workflowID: new(testWorkflowID), }, nil).AnyTimes() visibilityArchiver.queryParser = mockParser - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Len(response.Executions, 2, "Iteration ", i) mockParser = NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - startTime: util.Ptr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - searchPrecision: util.Ptr(testData.precision), - workflowID: util.Ptr(testWorkflowID), + startTime: new(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + searchPrecision: new(testData.precision), + workflowID: new(testWorkflowID), }, nil).AnyTimes() visibilityArchiver.queryParser = mockParser - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Len(response.Executions, 2, "Iteration ", i) mockParser = NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - closeTime: util.Ptr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - searchPrecision: util.Ptr(testData.precision), - workflowTypeName: util.Ptr(testWorkflowTypeName), + closeTime: new(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + searchPrecision: new(testData.precision), + workflowTypeName: new(testWorkflowTypeName), }, nil).AnyTimes() visibilityArchiver.queryParser = mockParser - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Len(response.Executions, 2, "Iteration ", i) mockParser = NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - startTime: util.Ptr(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), - searchPrecision: util.Ptr(testData.precision), - workflowTypeName: util.Ptr(testWorkflowTypeName), + startTime: new(time.Date(2000, 1, testData.day, testData.hour, testData.minute, testData.second, 0, time.UTC)), + searchPrecision: new(testData.precision), + workflowTypeName: new(testWorkflowTypeName), }, nil).AnyTimes() visibilityArchiver.queryParser = mockParser - response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err = visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) s.Len(response.Executions, 2, "Iteration ", i) @@ -598,7 +570,7 @@ func (s *visibilityArchiverSuite) TestArchiveAndQuery() { mockParser := NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - workflowID: util.Ptr(testWorkflowID), + workflowID: new(testWorkflowID), }, nil).AnyTimes() visibilityArchiver.queryParser = mockParser request := &archiver.QueryVisibilityRequest{ @@ -609,7 +581,7 @@ func (s *visibilityArchiverSuite) TestArchiveAndQuery() { executions := []*workflowpb.WorkflowExecutionInfo{} first := true for first || request.NextPageToken != nil { - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) executions = append(executions, response.Executions...) @@ -617,19 +589,19 @@ func (s *visibilityArchiverSuite) TestArchiveAndQuery() { first = false } s.Len(executions, 3) - ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err := convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, executions[1]) - ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, executions[2]) mockParser = NewMockQueryParser(s.controller) mockParser.EXPECT().Parse(gomock.Any()).Return(&parsedQuery{ - workflowTypeName: util.Ptr(testWorkflowTypeName), + workflowTypeName: new(testWorkflowTypeName), }, nil).AnyTimes() visibilityArchiver.queryParser = mockParser request = &archiver.QueryVisibilityRequest{ @@ -640,7 +612,7 @@ func (s *visibilityArchiverSuite) TestArchiveAndQuery() { executions = []*workflowpb.WorkflowExecutionInfo{} first = true for first || request.NextPageToken != nil { - response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap) + response, err := visibilityArchiver.Query(context.Background(), URI, request, searchattribute.TestNameTypeMap()) s.NoError(err) s.NotNil(response) executions = append(executions, response.Executions...) @@ -648,13 +620,13 @@ func (s *visibilityArchiverSuite) TestArchiveAndQuery() { first = false } s.Len(executions, 3) - ei, err = convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[0], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, executions[0]) - ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[1], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, executions[1]) - ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap) + ei, err = convertToExecutionInfo(s.visibilityRecords[2], searchattribute.TestNameTypeMap()) s.NoError(err) s.Equal(ei, executions[2]) } diff --git a/common/archiver/uri.go b/common/archiver/uri.go index 4ddbb5714fc..76ae9f7ee81 100644 --- a/common/archiver/uri.go +++ b/common/archiver/uri.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package archiver import ( diff --git a/common/archiver/uri_test.go b/common/archiver/uri_test.go index 97db485717d..f7230a17e20 100644 --- a/common/archiver/uri_test.go +++ b/common/archiver/uri_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package archiver import ( diff --git a/common/archiver/util.go b/common/archiver/util.go index 7a33d48534f..cd95eb70fcd 100644 --- a/common/archiver/util.go +++ b/common/archiver/util.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package archiver import ( diff --git a/common/auth/tls.go b/common/auth/tls.go index fbed34bcf08..df0f8563dfd 100644 --- a/common/auth/tls.go +++ b/common/auth/tls.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package auth type ( @@ -34,7 +10,7 @@ type ( // client certificate CertFile string `yaml:"certFile"` KeyFile string `yaml:"keyFile"` - CaFile string `yaml:"caFile"` //optional depending on server config + CaFile string `yaml:"caFile"` // optional depending on server config // If you want to verify the hostname and server cert (like a wildcard for cass cluster) then you should turn this on // This option is basically the inverse of InSecureSkipVerify diff --git a/common/auth/tls_config_helper.go b/common/auth/tls_config_helper.go index 40abbd1c400..d71ccf664d3 100644 --- a/common/auth/tls_config_helper.go +++ b/common/auth/tls_config_helper.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package auth import ( diff --git a/common/auth/tls_config_helper_test.go b/common/auth/tls_config_helper_test.go index 6907e3f70fa..62198d18344 100644 --- a/common/auth/tls_config_helper_test.go +++ b/common/auth/tls_config_helper_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package auth import ( @@ -35,8 +11,8 @@ import ( "os" "testing" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" + "go.uber.org/mock/gomock" ) var validBase64CaData, invalidBase64CaData, validBase64Certificate, invalidBase64Certificate, validBase64Key, invalidBase64Key string diff --git a/common/authorization/audience_mapper.go b/common/authorization/audience_mapper.go new file mode 100644 index 00000000000..b4d6c89b610 --- /dev/null +++ b/common/authorization/audience_mapper.go @@ -0,0 +1,29 @@ +package authorization + +import ( + "context" + + "go.temporal.io/server/common/config" + "google.golang.org/grpc" +) + +// AudienceMapper is a simple implementation of JWTAudienceMapper that returns the configured audience string. +type AudienceMapper struct { + JwtAudience string +} + +// Audience returns the configured audience string. +func (m *AudienceMapper) Audience(ctx context.Context, req any, info *grpc.UnaryServerInfo) string { + return m.JwtAudience +} + +// NewAudienceMapper returns a JWTAudienceMapper that always returns the given audience string. +func NewAudienceMapper(audience string) JWTAudienceMapper { + return &AudienceMapper{JwtAudience: audience} +} + +// GetAudienceMapperFromConfig returns a JWTAudienceMapper based on the provided Authorization config. +// Currently, it returns a static audience mapper using the Audience field. +func GetAudienceMapperFromConfig(cfg *config.Authorization) (JWTAudienceMapper, error) { + return NewAudienceMapper(cfg.Audience), nil +} diff --git a/common/authorization/audience_mapper_mock.go b/common/authorization/audience_mapper_mock.go new file mode 100644 index 00000000000..c4afb967fd4 --- /dev/null +++ b/common/authorization/audience_mapper_mock.go @@ -0,0 +1,56 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: audience_mapper.go +// +// Generated by this command: +// +// mockgen -package authorization -source audience_mapper.go -destination audience_mapper_mock.go +// + +// Package authorization is a generated GoMock package. +package authorization + +import ( + reflect "reflect" + + context "context" + gomock "go.uber.org/mock/gomock" + grpc "google.golang.org/grpc" +) + +// MockJWTAudienceMapper is a mock of JWTAudienceMapper interface. +type MockJWTAudienceMapper struct { + ctrl *gomock.Controller + recorder *MockJWTAudienceMapperMockRecorder + isgomock struct{} +} + +// MockJWTAudienceMapperMockRecorder is the mock recorder for MockJWTAudienceMapper. +type MockJWTAudienceMapperMockRecorder struct { + mock *MockJWTAudienceMapper +} + +// NewMockJWTAudienceMapper creates a new mock instance. +func NewMockJWTAudienceMapper(ctrl *gomock.Controller) *MockJWTAudienceMapper { + mock := &MockJWTAudienceMapper{ctrl: ctrl} + mock.recorder = &MockJWTAudienceMapperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockJWTAudienceMapper) EXPECT() *MockJWTAudienceMapperMockRecorder { + return m.recorder +} + +// Audience mocks base method. +func (m *MockJWTAudienceMapper) Audience(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo) string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Audience", ctx, req, info) + ret0, _ := ret[0].(string) + return ret0 +} + +// Audience indicates an expected call of Audience. +func (mr *MockJWTAudienceMapperMockRecorder) Audience(ctx, req, info interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Audience", reflect.TypeOf((*MockJWTAudienceMapper)(nil).Audience), ctx, req, info) +} diff --git a/common/authorization/audience_mapper_test.go b/common/authorization/audience_mapper_test.go new file mode 100644 index 00000000000..b2cce781f34 --- /dev/null +++ b/common/authorization/audience_mapper_test.go @@ -0,0 +1,44 @@ +package authorization + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "go.temporal.io/server/common/config" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" +) + +type audienceMapperSuite struct { + suite.Suite + *require.Assertions + controller *gomock.Controller +} + +func TestAudienceMapperSuite(t *testing.T) { + suite.Run(t, new(audienceMapperSuite)) +} + +func (s *audienceMapperSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.controller = gomock.NewController(s.T()) +} + +func (s *audienceMapperSuite) TearDownTest() { + s.controller.Finish() +} + +func (s *audienceMapperSuite) TestNewAudienceMapper_Static() { + mapper := NewAudienceMapper("foo-audience") + audience := mapper.Audience(context.Background(), nil, &grpc.UnaryServerInfo{}) + s.Equal("foo-audience", audience) +} + +func (s *audienceMapperSuite) TestGetAudienceMapperFromConfig() { + cfg := &config.Authorization{Audience: "bar-audience"} + mapper, _ := GetAudienceMapperFromConfig(cfg) + audience := mapper.Audience(context.Background(), nil, &grpc.UnaryServerInfo{}) + s.Equal("bar-audience", audience) +} diff --git a/common/authorization/authorizer.go b/common/authorization/authorizer.go index e56a67b34d6..ac79cc837e0 100644 --- a/common/authorization/authorizer.go +++ b/common/authorization/authorizer.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination authorizer_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination authorizer_mock.go package authorization @@ -31,6 +7,7 @@ import ( "fmt" "strings" + commonpb "go.temporal.io/api/common/v1" "go.temporal.io/server/common/config" ) @@ -50,8 +27,10 @@ type CallTarget struct { APIName string // If a Namespace is not being targeted this be set to an empty string. Namespace string + // The nexus endpoint name being targeted (if any). + NexusEndpointName string // Request contains a deserialized copy of the API request object - Request interface{} + Request any } // @@@SNIPEND @@ -62,6 +41,8 @@ type ( Decision Decision // Reason may contain a message explaining the value of the Decision field. Reason string + // Principal is the server-computed identity of the caller. Can be nil when not computed. + Principal *commonpb.Principal } // Decision is enum type for auth decision diff --git a/common/authorization/authorizer_mock.go b/common/authorization/authorizer_mock.go index 324baf2b3bb..6002e71ffa4 100644 --- a/common/authorization/authorizer_mock.go +++ b/common/authorization/authorizer_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: authorizer.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package authorization -source authorizer.go -destination authorizer_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: authorizer.go // Package authorization is a generated GoMock package. package authorization @@ -32,13 +13,14 @@ import ( context "context" reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockAuthorizer is a mock of Authorizer interface. type MockAuthorizer struct { ctrl *gomock.Controller recorder *MockAuthorizerMockRecorder + isgomock struct{} } // MockAuthorizerMockRecorder is the mock recorder for MockAuthorizer. @@ -68,7 +50,7 @@ func (m *MockAuthorizer) Authorize(ctx context.Context, caller *Claims, target * } // Authorize indicates an expected call of Authorize. -func (mr *MockAuthorizerMockRecorder) Authorize(ctx, caller, target interface{}) *gomock.Call { +func (mr *MockAuthorizerMockRecorder) Authorize(ctx, caller, target any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Authorize", reflect.TypeOf((*MockAuthorizer)(nil).Authorize), ctx, caller, target) } @@ -77,6 +59,7 @@ func (mr *MockAuthorizerMockRecorder) Authorize(ctx, caller, target interface{}) type MockhasNamespace struct { ctrl *gomock.Controller recorder *MockhasNamespaceMockRecorder + isgomock struct{} } // MockhasNamespaceMockRecorder is the mock recorder for MockhasNamespace. diff --git a/common/authorization/claim_mapper.go b/common/authorization/claim_mapper.go index 35f37848261..58338425b47 100644 --- a/common/authorization/claim_mapper.go +++ b/common/authorization/claim_mapper.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination claim_mapper_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination claim_mapper_mock.go package authorization @@ -31,10 +7,9 @@ import ( "fmt" "strings" - "google.golang.org/grpc/credentials" - "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" + "google.golang.org/grpc/credentials" ) // @@@SNIPSTART temporal-common-authorization-authinfo @@ -83,6 +58,25 @@ func (*noopClaimMapper) AuthInfoRequired() bool { return false } +// internalClaimMapper is used by the internal frontend to identify requests as +// coming from the Temporal server itself. +type internalClaimMapper struct{} + +var _ ClaimMapper = (*internalClaimMapper)(nil) +var _ ClaimMapperWithAuthInfoRequired = (*internalClaimMapper)(nil) + +func NewInternalClaimMapper() ClaimMapper { + return &internalClaimMapper{} +} + +func (*internalClaimMapper) GetClaims(_ *AuthInfo) (*Claims, error) { + return &Claims{System: RoleAdmin, AuthType: InternalPrincipalType, Subject: InternalPrincipalName}, nil +} + +func (*internalClaimMapper) AuthInfoRequired() bool { + return false +} + func GetClaimMapperFromConfig(config *config.Authorization, logger log.Logger) (ClaimMapper, error) { switch strings.ToLower(config.ClaimMapper) { diff --git a/common/authorization/claim_mapper_mock.go b/common/authorization/claim_mapper_mock.go index 4b20ab5dfaa..51d237211b0 100644 --- a/common/authorization/claim_mapper_mock.go +++ b/common/authorization/claim_mapper_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: claim_mapper.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package authorization -source claim_mapper.go -destination claim_mapper_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: claim_mapper.go // Package authorization is a generated GoMock package. package authorization @@ -31,13 +12,14 @@ package authorization import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockClaimMapper is a mock of ClaimMapper interface. type MockClaimMapper struct { ctrl *gomock.Controller recorder *MockClaimMapperMockRecorder + isgomock struct{} } // MockClaimMapperMockRecorder is the mock recorder for MockClaimMapper. @@ -67,7 +49,7 @@ func (m *MockClaimMapper) GetClaims(authInfo *AuthInfo) (*Claims, error) { } // GetClaims indicates an expected call of GetClaims. -func (mr *MockClaimMapperMockRecorder) GetClaims(authInfo interface{}) *gomock.Call { +func (mr *MockClaimMapperMockRecorder) GetClaims(authInfo any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClaims", reflect.TypeOf((*MockClaimMapper)(nil).GetClaims), authInfo) } @@ -76,6 +58,7 @@ func (mr *MockClaimMapperMockRecorder) GetClaims(authInfo interface{}) *gomock.C type MockClaimMapperWithAuthInfoRequired struct { ctrl *gomock.Controller recorder *MockClaimMapperWithAuthInfoRequiredMockRecorder + isgomock struct{} } // MockClaimMapperWithAuthInfoRequiredMockRecorder is the mock recorder for MockClaimMapperWithAuthInfoRequired. diff --git a/common/authorization/default_authorizer.go b/common/authorization/default_authorizer.go index e2ba6f68b5e..03ddd31cf92 100644 --- a/common/authorization/default_authorizer.go +++ b/common/authorization/default_authorizer.go @@ -1,32 +1,9 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( "context" + commonpb "go.temporal.io/api/common/v1" "go.temporal.io/server/common/api" ) @@ -80,7 +57,9 @@ func (a *defaultAuthorizer) Authorize(_ context.Context, claims *Claims, target } if hasRole >= getRequiredRole(metadata.Access) { - return resultAllow, nil + result := Result{Decision: DecisionAllow} + result.Principal = &commonpb.Principal{Type: claims.AuthType, Name: claims.Subject} + return result, nil } return resultDeny, nil } diff --git a/common/authorization/default_authorizer_test.go b/common/authorization/default_authorizer_test.go index 4973c1db48e..b81da0ddd4a 100644 --- a/common/authorization/default_authorizer_test.go +++ b/common/authorization/default_authorizer_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( @@ -29,10 +5,11 @@ import ( "reflect" "testing" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "go.temporal.io/server/common/config" + "go.uber.org/mock/gomock" + healthpb "google.golang.org/grpc/health/grpc_health_v1" ) var ( @@ -79,7 +56,7 @@ var ( Namespace: testNamespace, } targetGrpcHealthCheck = CallTarget{ - APIName: "/grpc.health.v1.Health/Check", + APIName: healthpb.Health_Check_FullMethodName, Namespace: "", } targetGetSystemInfo = CallTarget{ diff --git a/common/authorization/default_jwt_claim_mapper.go b/common/authorization/default_jwt_claim_mapper.go index 0b53fcff4bc..a4e58d4e72a 100644 --- a/common/authorization/default_jwt_claim_mapper.go +++ b/common/authorization/default_jwt_claim_mapper.go @@ -1,40 +1,16 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( "context" "fmt" + "regexp" "strings" "github.com/golang-jwt/jwt/v4" "go.temporal.io/api/serviceerror" - "go.temporal.io/server/common/primitives" - "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" + "go.temporal.io/server/common/primitives" ) const ( @@ -53,6 +29,9 @@ type defaultJWTClaimMapper struct { keyProvider TokenKeyProvider logger log.Logger permissionsClaimName string + permissionsRegex *regexp.Regexp + matchNamespaceIndex int + matchRoleIndex int } func NewDefaultJWTClaimMapper(provider TokenKeyProvider, cfg *config.Authorization, logger log.Logger) ClaimMapper { @@ -60,20 +39,51 @@ func NewDefaultJWTClaimMapper(provider TokenKeyProvider, cfg *config.Authorizati if claimName == "" { claimName = defaultPermissionsClaimName } - return &defaultJWTClaimMapper{keyProvider: provider, logger: logger, permissionsClaimName: claimName} + var permissionsRegex *regexp.Regexp + var namespaceIndex, roleIndex int + if cfg.PermissionsRegex != "" { + r, err := regexp.Compile(cfg.PermissionsRegex) + if err == nil { + for i, name := range r.SubexpNames() { + switch name { + case "namespace": + namespaceIndex = i + case "role": + roleIndex = i + } + } + if namespaceIndex != 0 && roleIndex != 0 { + permissionsRegex = r + } else { + logger.Warn("permissions regex does not have namespace or role named group") + } + } else { + logger.Warn(fmt.Sprintf("failed to compile permissions regex '%s': %v", cfg.PermissionsRegex, err)) + } + } + return &defaultJWTClaimMapper{ + keyProvider: provider, + logger: logger, + permissionsClaimName: claimName, + permissionsRegex: permissionsRegex, + matchNamespaceIndex: namespaceIndex, + matchRoleIndex: roleIndex, + } } var _ ClaimMapper = (*defaultJWTClaimMapper)(nil) func (a *defaultJWTClaimMapper) GetClaims(authInfo *AuthInfo) (*Claims, error) { - claims := Claims{} + claims := Claims{AuthType: "jwt"} if authInfo.AuthToken == "" { return &claims, nil } - parts := strings.Split(authInfo.AuthToken, " ") + // We use strings.SplitN even though we check the length later, to avoid + // unnecessary allocations if the format is correct. + parts := strings.SplitN(authInfo.AuthToken, " ", 2) if len(parts) != 2 { return nil, serviceerror.NewPermissionDenied("unexpected authorization token format", "") } @@ -89,7 +99,7 @@ func (a *defaultJWTClaimMapper) GetClaims(authInfo *AuthInfo) (*Claims, error) { return nil, serviceerror.NewPermissionDenied("unexpected value type of \"sub\" claim", "") } claims.Subject = subject - permissions, ok := jwtClaims[a.permissionsClaimName].([]interface{}) + permissions, ok := jwtClaims[a.permissionsClaimName].([]any) if ok { err := a.extractPermissions(permissions, &claims) if err != nil { @@ -99,17 +109,27 @@ func (a *defaultJWTClaimMapper) GetClaims(authInfo *AuthInfo) (*Claims, error) { return &claims, nil } -func (a *defaultJWTClaimMapper) extractPermissions(permissions []interface{}, claims *Claims) error { +func (a *defaultJWTClaimMapper) extractPermissions(permissions []any, claims *Claims) error { for _, permission := range permissions { p, ok := permission.(string) if !ok { a.logger.Warn(fmt.Sprintf("ignoring permission that is not a string: %v", permission)) continue } - parts := strings.Split(p, ":") - if len(parts) != 2 { - a.logger.Warn(fmt.Sprintf("ignoring permission in unexpected format: %v", permission)) - continue + var parts []string + if a.permissionsRegex != nil { + match := a.permissionsRegex.FindStringSubmatch(p) + if len(match) == 0 { + a.logger.Warn(fmt.Sprintf("ignoring permission not matching pattern: %v", permission)) + continue + } + parts = []string{match[a.matchNamespaceIndex], match[a.matchRoleIndex]} + } else { + parts = strings.SplitN(p, ":", 2) + if len(parts) != 2 { + a.logger.Warn(fmt.Sprintf("ignoring permission in unexpected format: %v", permission)) + continue + } } namespace := parts[0] if namespace == permissionScopeSystem { @@ -136,13 +156,13 @@ func parseJWTWithAudience(tokenString string, keyProvider TokenKeyProvider, audi var keyFunc jwt.Keyfunc if provider, _ := keyProvider.(RawTokenKeyProvider); provider != nil { - keyFunc = func(token *jwt.Token) (interface{}, error) { + keyFunc = func(token *jwt.Token) (any, error) { // reserve context // impl may introduce network request to get public key return provider.GetKey(context.Background(), token) } } else { - keyFunc = func(token *jwt.Token) (interface{}, error) { + keyFunc = func(token *jwt.Token) (any, error) { kid, ok := token.Header["kid"].(string) if !ok { return nil, fmt.Errorf("malformed token - no \"kid\" header") diff --git a/common/authorization/default_jwt_claim_mapper_test.go b/common/authorization/default_jwt_claim_mapper_test.go index f4244eb041e..b89dbe1f118 100644 --- a/common/authorization/default_jwt_claim_mapper_test.go +++ b/common/authorization/default_jwt_claim_mapper_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( @@ -35,13 +11,12 @@ import ( "time" "github.com/golang-jwt/jwt/v4" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "go.temporal.io/server/common/primitives" - "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" + "go.temporal.io/server/common/primitives" + "go.uber.org/mock/gomock" ) type errorTestOptions int16 @@ -210,6 +185,24 @@ func (s *defaultClaimMapperSuite) testTokenWithReaderWriterWorkerPermissions(alg defaultRole := claims.Namespaces[defaultNamespace] s.Equal(RoleReader|RoleWriter|RoleWorker, defaultRole) } + +func (s *defaultClaimMapperSuite) TestTokenWithReaderWriterWorkerPermissionsRegex() { + permissions := []string{"read:default", "write:default", "worker:default"} + tokenString, err := s.tokenGenerator.generateToken(RSA, testSubject, permissions, errorTestOptionNoError) + s.NoError(err) + authConfig := &config.Authorization{PermissionsRegex: `(?P\w+):(?P\w+)`} + claimMapper := NewDefaultJWTClaimMapper(s.tokenGenerator, authConfig, log.NewNoopLogger()) + s.NotNil(claimMapper) + authInfo := &AuthInfo{AuthToken: AddBearer(tokenString), Audience: "test-audience"} + claims, err := claimMapper.GetClaims(authInfo) + s.NoError(err) + s.Equal(testSubject, claims.Subject) + s.Equal(RoleUndefined, claims.System) + s.Equal(1, len(claims.Namespaces)) + defaultRole := claims.Namespaces[defaultNamespace] + s.Equal(RoleReader|RoleWriter|RoleWorker, defaultRole) +} + func (s *defaultClaimMapperSuite) TestGetClaimMapperFromConfigNoop() { s.testGetClaimMapperFromConfig("", true, reflect.TypeOf(&noopClaimMapper{})) } @@ -221,6 +214,55 @@ func (s *defaultClaimMapperSuite) TestGetClaimMapperFromConfigUnknown() { s.testGetClaimMapperFromConfig("foo", false, nil) } +func (s *defaultClaimMapperSuite) TestGetClaimMapperWithPermissionsRegexInvalidRegex() { + pattern := `(?P\w+)` + mapper := NewDefaultJWTClaimMapper(nil, &config.Authorization{PermissionsRegex: pattern}, log.NewNoopLogger()).(*defaultJWTClaimMapper) + s.Nil(mapper.permissionsRegex) + s.Zero(mapper.matchNamespaceIndex) + s.Zero(mapper.matchRoleIndex) +} + +func (s *defaultClaimMapperSuite) TestGetClaimMapperWithPermissionsRegexMissingNamespaceGroup() { + pattern := `(?P\w+):(\w+)` + mapper := NewDefaultJWTClaimMapper( + nil, &config.Authorization{PermissionsRegex: pattern}, log.NewNoopLogger(), + ).(*defaultJWTClaimMapper) + s.Nil(mapper.permissionsRegex) +} + +func (s *defaultClaimMapperSuite) TestGetClaimMapperWithPermissionsRegexMissingRoleGroup() { + pattern := `(?P\w+):(\w+)` + mapper := NewDefaultJWTClaimMapper( + nil, &config.Authorization{PermissionsRegex: pattern}, log.NewNoopLogger(), + ).(*defaultJWTClaimMapper) + s.Nil(mapper.permissionsRegex) +} + +func (s *defaultClaimMapperSuite) TestGetClaimMapperWithPermissionsRegex() { + authConfig := &config.Authorization{PermissionsRegex: `(?P\w+):(?P\w+)`} + mapper := NewDefaultJWTClaimMapper(nil, authConfig, nil).(*defaultJWTClaimMapper) + s.NotNil(mapper.permissionsRegex) + s.NotZero(mapper.matchNamespaceIndex) + s.NotZero(mapper.matchRoleIndex) +} + +func (s *defaultClaimMapperSuite) TestTokenWithAdminPermissionsRegex() { + permissions := []string{"admin:" + primitives.SystemLocalNamespace, "read:default"} + pattern := `(?P[\w-]+):(?P[\w-]+)` + tokenString, err := s.tokenGenerator.generateToken(RSA, testSubject, permissions, errorTestOptionNoError) + s.NoError(err) + authInfo := &AuthInfo{AuthToken: AddBearer(tokenString)} + authConfig := &config.Authorization{PermissionsRegex: pattern} + claimMapper := NewDefaultJWTClaimMapper(s.tokenGenerator, authConfig, nil) + claims, err := claimMapper.GetClaims(authInfo) + s.NoError(err) + s.Equal(testSubject, claims.Subject) + s.Equal(RoleAdmin, claims.System) + s.Equal(1, len(claims.Namespaces)) + defaultRole := claims.Namespaces[defaultNamespace] + s.Equal(RoleReader, defaultRole) +} + func (s *defaultClaimMapperSuite) TestWrongAudience() { tokenString, err := s.tokenGenerator.generateRSAToken(testSubject, permissionsAdmin, errorTestOptionNoError) s.NoError(err) diff --git a/common/authorization/default_token_key_provider.go b/common/authorization/default_token_key_provider.go index b81a9bc2e2b..9eb1aa0d196 100644 --- a/common/authorization/default_token_key_provider.go +++ b/common/authorization/default_token_key_provider.go @@ -1,43 +1,21 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( "crypto/ecdsa" "crypto/rsa" "encoding/json" + "errors" "fmt" + "io" "net/http" + "net/url" + "os" "strings" "sync" "time" + "github.com/go-jose/go-jose/v4" "github.com/golang-jwt/jwt/v4" - "go.uber.org/multierr" - "gopkg.in/square/go-jose.v2" - "go.temporal.io/server/common/config" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" @@ -163,17 +141,16 @@ func (a *defaultTokenKeyProvider) updateKeysFromURI( ecKeys map[string]*ecdsa.PublicKey, ) (err error) { - resp, err := http.Get(uri) + resp, err := a.openURI(uri) if err != nil { return err } defer func() { - err = multierr.Combine(err, resp.Body.Close()) + err = errors.Join(err, resp.Close()) }() jwks := jose.JSONWebKeySet{} - err = json.NewDecoder(resp.Body).Decode(&jwks) - if err != nil { + if err := json.NewDecoder(resp).Decode(&jwks); err != nil { return err } @@ -190,6 +167,25 @@ func (a *defaultTokenKeyProvider) updateKeysFromURI( return nil } +// openURI returns a ReadCloser for the given URI. Supports http://, https://, and file:// schemes. +func (a *defaultTokenKeyProvider) openURI(uri string) (io.ReadCloser, error) { + u, err := url.Parse(uri) + if err != nil { + return nil, err + } + if u.Scheme == "file" { + if u.Host != "" && u.Host != "localhost" { + return nil, fmt.Errorf("file URI with remote host is not supported: %s", uri) + } + return os.Open(u.Path) + } + resp, err := http.Get(uri) + if err != nil { + return nil, err + } + return resp.Body, nil +} + func (a *defaultTokenKeyProvider) HmacKey(alg string, kid string) ([]byte, error) { return nil, fmt.Errorf("unsupported key type HMAC for: %s", alg) } diff --git a/common/authorization/default_token_key_provider_test.go b/common/authorization/default_token_key_provider_test.go new file mode 100644 index 00000000000..8f6566a9a99 --- /dev/null +++ b/common/authorization/default_token_key_provider_test.go @@ -0,0 +1,51 @@ +package authorization + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/common/log" +) + +func TestOpenURI_FileScheme(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.json") + require.NoError(t, os.WriteFile(path, []byte(`{"keys":[]}`), 0644)) + + provider := &defaultTokenKeyProvider{logger: log.NewNoopLogger()} + + r, err := provider.openURI("file://" + path) + require.NoError(t, err) + require.NoError(t, r.Close()) +} + +func TestOpenURI_FileScheme_Localhost(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "test.json") + require.NoError(t, os.WriteFile(path, []byte(`{"keys":[]}`), 0644)) + + provider := &defaultTokenKeyProvider{logger: log.NewNoopLogger()} + + r, err := provider.openURI("file://localhost" + path) + require.NoError(t, err) + require.NoError(t, r.Close()) +} + +func TestOpenURI_FileScheme_RemoteHost(t *testing.T) { + provider := &defaultTokenKeyProvider{logger: log.NewNoopLogger()} + + _, err := provider.openURI("file://remotehost/tmp/test.json") + require.ErrorContains(t, err, "remote host") +} + +func TestOpenURI_FileScheme_NotFound(t *testing.T) { + dir := t.TempDir() + path := filepath.Join(dir, "nonexistent.json") + + provider := &defaultTokenKeyProvider{logger: log.NewNoopLogger()} + + _, err := provider.openURI("file://" + path) + require.Error(t, err) +} diff --git a/common/authorization/frontend_api.go b/common/authorization/frontend_api.go index 73b05128098..6a144d68e89 100644 --- a/common/authorization/frontend_api.go +++ b/common/authorization/frontend_api.go @@ -1,33 +1,12 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization -import "go.temporal.io/server/common/api" +import ( + "go.temporal.io/server/common/api" + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) var healthCheckAPI = map[string]struct{}{ - "/grpc.health.v1.Health/Check": {}, + healthpb.Health_Check_FullMethodName: {}, "/temporal.api.workflowservice.v1.WorkflowService/GetSystemInfo": {}, } diff --git a/common/authorization/interceptor.go b/common/authorization/interceptor.go index 353da3bc219..116438206cc 100644 --- a/common/authorization/interceptor.go +++ b/common/authorization/interceptor.go @@ -1,47 +1,26 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( + "cmp" "context" "crypto/x509" "crypto/x509/pkix" "time" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" - + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/server/common/api" + "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" - "go.temporal.io/server/common/util" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" ) type ( @@ -52,7 +31,7 @@ type ( type ( // JWTAudienceMapper returns JWT audience for a given request JWTAudienceMapper interface { - Audience(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo) string + Audience(ctx context.Context, req any, info *grpc.UnaryServerInfo) string } NamespaceChecker interface { @@ -102,14 +81,18 @@ func PeerCert(tlsInfo *credentials.TLSInfo) *x509.Certificate { } type Interceptor struct { - claimMapper ClaimMapper - authorizer Authorizer - metricsHandler metrics.Handler - logger log.Logger - namespaceChecker NamespaceChecker - audienceGetter JWTAudienceMapper - authHeaderName string - authExtraHeaderName string + claimMapper ClaimMapper + authorizer Authorizer + metricsHandler metrics.Handler + logger log.Logger + namespaceChecker NamespaceChecker + audienceGetter JWTAudienceMapper + authHeaderName string + authExtraHeaderName string + exposeAuthorizerErrors dynamicconfig.BoolPropertyFn + enableCrossNamespaceCommands dynamicconfig.BoolPropertyFn + enablePrincipalPropagation dynamicconfig.BoolPropertyFnWithNamespaceFilter + disableStreamingAuthorizer dynamicconfig.BoolPropertyFn } // NewInterceptor creates an authorization interceptor. @@ -122,29 +105,36 @@ func NewInterceptor( audienceGetter JWTAudienceMapper, authHeaderName string, authExtraHeaderName string, + exposeAuthorizerErrors dynamicconfig.BoolPropertyFn, + enableCrossNamespaceCommands dynamicconfig.BoolPropertyFn, + enablePrincipalPropagation dynamicconfig.BoolPropertyFnWithNamespaceFilter, + disableStreamingAuthorizer dynamicconfig.BoolPropertyFn, ) *Interceptor { return &Interceptor{ - claimMapper: claimMapper, - authorizer: authorizer, - logger: logger, - namespaceChecker: namespaceChecker, - metricsHandler: metricsHandler, - authHeaderName: util.Coalesce(authHeaderName, defaultAuthHeaderName), - authExtraHeaderName: util.Coalesce(authExtraHeaderName, defaultAuthExtraHeaderName), - audienceGetter: audienceGetter, + claimMapper: claimMapper, + authorizer: authorizer, + logger: logger, + namespaceChecker: namespaceChecker, + metricsHandler: metricsHandler, + authHeaderName: cmp.Or(authHeaderName, defaultAuthHeaderName), + authExtraHeaderName: cmp.Or(authExtraHeaderName, defaultAuthExtraHeaderName), + audienceGetter: audienceGetter, + exposeAuthorizerErrors: exposeAuthorizerErrors, + enableCrossNamespaceCommands: enableCrossNamespaceCommands, + enablePrincipalPropagation: enablePrincipalPropagation, + disableStreamingAuthorizer: disableStreamingAuthorizer, } } func (a *Interceptor) Intercept( ctx context.Context, - req interface{}, + req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, -) (interface{}, error) { +) (any, error) { tlsConnection := TLSInfoFromContext(ctx) - md, _ := metadata.FromIncomingContext(ctx) - authInfo := a.GetAuthInfo(tlsConnection, headers.GRPCHeaderGetter{Metadata: md}, func() string { + authInfo := a.GetAuthInfo(tlsConnection, headers.NewGRPCHeaderGetter(ctx), func() string { if a.audienceGetter != nil { return a.audienceGetter.Audience(ctx, req, info) } @@ -163,6 +153,10 @@ func (a *Interceptor) Intercept( ctx = a.EnhanceContext(ctx, authInfo, claims) } + // Always strip inbound principal headers to prevent external callers from + // spoofing principal identity, regardless of whether the authorizer is enabled. + ctx = headers.StripPrincipal(ctx) + if a.authorizer != nil { var namespace string requestWithNamespace, ok := req.(hasNamespace) @@ -174,13 +168,79 @@ func (a *Interceptor) Intercept( APIName: info.FullMethod, Request: req, } - if err := a.Authorize(ctx, claims, ct); err != nil { + principal, err := a.Authorize(ctx, claims, ct) + if err != nil { + return nil, err + } + if a.enablePrincipalPropagation != nil && a.enablePrincipalPropagation(namespace) && principal != nil { + ctx = headers.SetPrincipal(ctx, principal) + } + + // Authorize target namespaces in cross-namespace commands + if err := a.authorizeTargetNamespaces(ctx, claims, namespace, req); err != nil { return nil, err } } return handler(ctx, req) } +// InterceptStream is a gRPC stream server interceptor that enforces authorization. +func (a *Interceptor) InterceptStream( + srv any, + ss grpc.ServerStream, + info *grpc.StreamServerInfo, + handler grpc.StreamHandler, +) error { + ctx := ss.Context() + bypassAuth := a.disableStreamingAuthorizer() + if !bypassAuth { + tlsConnection := TLSInfoFromContext(ctx) + + authInfo := a.GetAuthInfo(tlsConnection, headers.NewGRPCHeaderGetter(ctx), func() string { + // JWTAudienceMapper only supports UnaryServerInfo; no request is available at stream init. + return "" + }) + + var claims *Claims + if authInfo != nil { + var err error + claims, err = a.GetClaims(authInfo) + if err != nil { + a.logger.Error("Authorization error", tag.Error(err)) + return errUnauthorized + } + ctx = a.EnhanceContext(ctx, authInfo, claims) + } + + // Always strip inbound principal headers to prevent external callers from + // spoofing principal identity, regardless of whether the authorizer is enabled. + ctx = headers.StripPrincipal(ctx) + + if a.authorizer != nil { + // Namespace is not available in the stream handshake (no initial request body). + ct := &CallTarget{ + Namespace: "", + APIName: info.FullMethod, + Request: nil, + } + if _, err := a.Authorize(ctx, claims, ct); err != nil { + a.logger.Error("Authorization error", tag.Error(err)) + return err + } + } + } + + return handler(srv, &wrappedServerStream{ServerStream: ss, ctx: ctx}) +} + +// wrappedServerStream wraps grpc.ServerStream to allow replacing the context. +type wrappedServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (w *wrappedServerStream) Context() context.Context { return w.ctx } + // GetAuthInfo extracts auth info from TLS info and headers. // Returns nil if either the policy's claimMapper or authorizer are nil or when there is no auth information in the // provided TLS info or headers. @@ -236,9 +296,10 @@ func (a *Interceptor) EnhanceContext(ctx context.Context, authInfo *AuthInfo, cl // Authorize uses the policy's authorizer to authorize a request based on provided claims and call target. // Logs and emits metrics when unauthorized. -func (a *Interceptor) Authorize(ctx context.Context, claims *Claims, ct *CallTarget) error { +// Returns the principal identity and any authorization error. +func (a *Interceptor) Authorize(ctx context.Context, claims *Claims, ct *CallTarget) (*commonpb.Principal, error) { if a.authorizer == nil { - return nil + return nil, nil } mh := a.getMetricsHandler(ct.Namespace) @@ -249,17 +310,20 @@ func (a *Interceptor) Authorize(ctx context.Context, claims *Claims, ct *CallTar if err != nil { metrics.ServiceErrAuthorizeFailedCounter.With(mh).Record(1) a.logger.Error("Authorization error", tag.Error(err)) - return errUnauthorized // return a generic error to the caller without disclosing details + if a.exposeAuthorizerErrors() { + return nil, err + } + return nil, errUnauthorized // return a generic error to the caller without disclosing details } if result.Decision != DecisionAllow { metrics.ServiceErrUnauthorizedCounter.With(mh).Record(1) // if a reason is included in the result, include it in the error message if result.Reason != "" { - return serviceerror.NewPermissionDenied(RequestUnauthorized, result.Reason) + return nil, serviceerror.NewPermissionDenied(RequestUnauthorized, result.Reason) } - return errUnauthorized // return a generic error to the caller without disclosing details + return nil, errUnauthorized // return a generic error to the caller without disclosing details } - return nil + return result.Principal, nil } // getMetricsHandler returns a metrics handler with a namespace tag @@ -275,3 +339,75 @@ func (a *Interceptor) getMetricsHandler(nsName string) metrics.Handler { } return a.metricsHandler.WithTags(metrics.OperationTag(metrics.AuthorizationScope), nsTag) } + +// authorizeTargetNamespaces authorizes cross-namespace commands in RespondWorkflowTaskCompleted. +// Commands like SignalExternalWorkflow, StartChildWorkflow, and CancelExternalWorkflow can target +// workflows in different namespaces. This method ensures the caller has permission in those target +// namespaces as well. +func (a *Interceptor) authorizeTargetNamespaces( + ctx context.Context, + claims *Claims, + sourceNamespace string, + req any, +) error { + // Skip if cross-namespace commands are not enabled + if !a.enableCrossNamespaceCommands() { + return nil + } + + // Cross-namespace commands can only be initiated via RespondWorkflowTaskCompletedRequest. + // Here we handle authorization for all such commands: SignalExternalWorkflow, + // StartChildWorkflow, and RequestCancelExternalWorkflow targeting a different namespace. + wftRequest, ok := req.(*workflowservice.RespondWorkflowTaskCompletedRequest) + if !ok { + return nil + } + + // Track namespace+API combinations we've already authorized to avoid duplicate checks + authorizedNamespaceAPIs := make(map[string]struct{}) + + for _, cmd := range wftRequest.GetCommands() { + var targetNamespace string + var apiName string + + switch cmd.GetCommandType() { + case enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION: + if attr := cmd.GetSignalExternalWorkflowExecutionCommandAttributes(); attr != nil { + targetNamespace = attr.GetNamespace() + apiName = "SignalWorkflowExecution" + } + case enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION: + if attr := cmd.GetStartChildWorkflowExecutionCommandAttributes(); attr != nil { + targetNamespace = attr.GetNamespace() + apiName = "StartWorkflowExecution" + } + case enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION: + if attr := cmd.GetRequestCancelExternalWorkflowExecutionCommandAttributes(); attr != nil { + targetNamespace = attr.GetNamespace() + apiName = "RequestCancelWorkflowExecution" + } + default: + // Other command types don't target external namespaces + } + + // Skip if empty, same as source, or already authorized + if targetNamespace == "" || targetNamespace == sourceNamespace { + continue + } + key := targetNamespace + ":" + apiName + if _, ok := authorizedNamespaceAPIs[key]; ok { + continue + } + + // Authorize access to target namespace for this specific API + if _, err := a.Authorize(ctx, claims, &CallTarget{ + APIName: api.WorkflowServicePrefix + apiName, + Namespace: targetNamespace, + Request: req, + }); err != nil { + return err + } + authorizedNamespaceAPIs[key] = struct{}{} + } + return nil +} diff --git a/common/authorization/interceptor_test.go b/common/authorization/interceptor_test.go index 09775c18f69..641016ac521 100644 --- a/common/authorization/interceptor_test.go +++ b/common/authorization/interceptor_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( @@ -29,21 +5,28 @@ import ( "errors" "testing" - "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + commandpb "go.temporal.io/api/command/v1" + commonpb "go.temporal.io/api/common/v1" + enumspb "go.temporal.io/api/enums/v1" + "go.temporal.io/api/serviceerror" "go.temporal.io/api/workflowservice/v1" - "go.temporal.io/api/workflowservicemock/v1" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - + "go.temporal.io/server/common/api" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/namespace" + "go.uber.org/mock/gomock" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) const ( - testNamespace string = "test-namespace" + testNamespace string = "test-namespace" + targetNamespace string = "target-namespace" + anotherNamespace string = "another-namespace" ) var ( @@ -54,6 +37,8 @@ var ( startWorkflowExecutionRequest = &workflowservice.StartWorkflowExecutionRequest{Namespace: testNamespace} startWorkflowExecutionTarget = &CallTarget{Namespace: testNamespace, Request: startWorkflowExecutionRequest, APIName: "/temporal.api.workflowservice.v1.WorkflowService/StartWorkflowExecution"} startWorkflowExecutionInfo = &grpc.UnaryServerInfo{FullMethod: "/temporal.api.workflowservice.v1.WorkflowService/StartWorkflowExecution"} + + respondWorkflowTaskCompletedInfo = &grpc.UnaryServerInfo{FullMethod: api.WorkflowServicePrefix + "RespondWorkflowTaskCompleted"} ) type ( @@ -61,13 +46,12 @@ type ( suite.Suite *require.Assertions - controller *gomock.Controller - mockFrontendHandler *workflowservicemock.MockWorkflowServiceServer - mockAuthorizer *MockAuthorizer - mockMetricsHandler *metrics.MockHandler - interceptor *Interceptor - handler grpc.UnaryHandler - mockClaimMapper *MockClaimMapper + controller *gomock.Controller + mockAuthorizer *MockAuthorizer + mockMetricsHandler *metrics.MockHandler + interceptor *Interceptor + handler grpc.UnaryHandler + mockClaimMapper *MockClaimMapper } mockNamespaceChecker namespace.Name @@ -82,7 +66,6 @@ func (s *authorizerInterceptorSuite) SetupTest() { s.Assertions = require.New(s.T()) s.controller = gomock.NewController(s.T()) - s.mockFrontendHandler = workflowservicemock.NewMockWorkflowServiceServer(s.controller) s.mockAuthorizer = NewMockAuthorizer(s.controller) s.mockMetricsHandler = metrics.NewMockHandler(s.controller) s.mockMetricsHandler.EXPECT().WithTags( @@ -101,8 +84,12 @@ func (s *authorizerInterceptorSuite) SetupTest() { nil, "", "", + dynamicconfig.GetBoolPropertyFn(false), // exposeAuthorizerErrors + dynamicconfig.GetBoolPropertyFn(false), // enableCrossNamespaceCommands + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), // enablePrincipalPropagation + dynamicconfig.GetBoolPropertyFn(false), // disableStreamingAuthorizer ) - s.handler = func(ctx context.Context, req interface{}) (interface{}, error) { return true, nil } + s.handler = func(ctx context.Context, req any) (any, error) { return true, nil } } func (s *authorizerInterceptorSuite) TearDownTest() { @@ -164,6 +151,32 @@ func (s *authorizerInterceptorSuite) TestAuthorizationFailed() { s.Error(err) } +func (s *authorizerInterceptorSuite) TestAuthorizationFailedExposed() { + interceptor := NewInterceptor( + s.mockClaimMapper, + s.mockAuthorizer, + s.mockMetricsHandler, + log.NewNoopLogger(), + mockNamespaceChecker(testNamespace), + nil, + "", + "", + dynamicconfig.GetBoolPropertyFn(true), // exposeAuthorizerErrors + dynamicconfig.GetBoolPropertyFn(false), // enableCrossNamespaceCommands + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), // enablePrincipalPropagation + dynamicconfig.GetBoolPropertyFn(false), // disableStreamingAuthorizer + ) + + authErr := serviceerror.NewInternal("intentional test failure") + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, describeNamespaceTarget). + Return(Result{Decision: DecisionDeny}, authErr) + s.mockMetricsHandler.EXPECT().Counter(metrics.ServiceErrAuthorizeFailedCounter.Name()).Return(metrics.NoopCounterMetricFunc) + + res, err := interceptor.Intercept(ctx, describeNamespaceRequest, describeNamespaceInfo, s.handler) + s.Nil(res) + s.ErrorIs(err, authErr) +} + func (s *authorizerInterceptorSuite) TestNoopClaimMapperWithoutTLS() { admin := &Claims{System: RoleAdmin} s.mockAuthorizer.EXPECT().Authorize(gomock.Any(), admin, describeNamespaceTarget). @@ -183,6 +196,10 @@ func (s *authorizerInterceptorSuite) TestNoopClaimMapperWithoutTLS() { nil, "", "", + dynamicconfig.GetBoolPropertyFn(false), // exposeAuthorizerErrors + dynamicconfig.GetBoolPropertyFn(false), // enableCrossNamespaceCommands + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), // enablePrincipalPropagation + dynamicconfig.GetBoolPropertyFn(false), // disableStreamingAuthorizer ) _, err := interceptor.Intercept(ctx, describeNamespaceRequest, describeNamespaceInfo, s.handler) s.NoError(err) @@ -198,6 +215,10 @@ func (s *authorizerInterceptorSuite) TestAlternateHeaders() { nil, "custom-header", "custom-extra-header", + dynamicconfig.GetBoolPropertyFn(false), // exposeAuthorizerErrors + dynamicconfig.GetBoolPropertyFn(false), // enableCrossNamespaceCommands + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), // enablePrincipalPropagation + dynamicconfig.GetBoolPropertyFn(false), // disableStreamingAuthorizer ) cases := []struct { @@ -246,3 +267,534 @@ func (n mockNamespaceChecker) Exists(name namespace.Name) error { } return errors.New("doesn't exist") } + +// multiNamespaceChecker is a mock that recognizes multiple namespaces +type multiNamespaceChecker []string + +func (m multiNamespaceChecker) Exists(name namespace.Name) error { + for _, ns := range m { + if ns == string(name) { + return nil + } + } + return errors.New("doesn't exist") +} + +// Helper to create a cross-namespace command +func makeCrossNamespaceCommand(commandType enumspb.CommandType, targetNs string) *commandpb.Command { + switch commandType { + case enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION: + return &commandpb.Command{ + CommandType: commandType, + Attributes: &commandpb.Command_SignalExternalWorkflowExecutionCommandAttributes{ + SignalExternalWorkflowExecutionCommandAttributes: &commandpb.SignalExternalWorkflowExecutionCommandAttributes{ + Namespace: targetNs, + }, + }, + } + case enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION: + return &commandpb.Command{ + CommandType: commandType, + Attributes: &commandpb.Command_StartChildWorkflowExecutionCommandAttributes{ + StartChildWorkflowExecutionCommandAttributes: &commandpb.StartChildWorkflowExecutionCommandAttributes{ + Namespace: targetNs, + }, + }, + } + case enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION: + return &commandpb.Command{ + CommandType: commandType, + Attributes: &commandpb.Command_RequestCancelExternalWorkflowExecutionCommandAttributes{ + RequestCancelExternalWorkflowExecutionCommandAttributes: &commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes{ + Namespace: targetNs, + }, + }, + } + default: + return nil + } +} + +// Helper to create interceptor with cross-namespace commands enabled +func (s *authorizerInterceptorSuite) newCrossNamespaceInterceptor(namespaces ...string) *Interceptor { + return NewInterceptor( + s.mockClaimMapper, + s.mockAuthorizer, + s.mockMetricsHandler, + log.NewNoopLogger(), + multiNamespaceChecker(namespaces), + nil, + "", + "", + dynamicconfig.GetBoolPropertyFn(false), // exposeAuthorizerErrors + dynamicconfig.GetBoolPropertyFn(true), // enableCrossNamespaceCommands + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(false), // enablePrincipalPropagation + dynamicconfig.GetBoolPropertyFn(false), // disableStreamingAuthorizer + ) +} + +func (s *authorizerInterceptorSuite) TestCrossNamespaceCommands_Authorized() { + testCases := []struct { + name string + commandType enumspb.CommandType + expectedAPI string + }{ + { + name: "SignalExternalWorkflow", + commandType: enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, + expectedAPI: "SignalWorkflowExecution", + }, + { + name: "StartChildWorkflow", + commandType: enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, + expectedAPI: "StartWorkflowExecution", + }, + { + name: "CancelExternalWorkflow", + commandType: enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION, + expectedAPI: "RequestCancelWorkflowExecution", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + request := &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: testNamespace, + Commands: []*commandpb.Command{makeCrossNamespaceCommand(tc.commandType, targetNamespace)}, + } + + sourceTarget := &CallTarget{ + Namespace: testNamespace, + Request: request, + APIName: api.WorkflowServicePrefix + "RespondWorkflowTaskCompleted", + } + crossNsTarget := &CallTarget{ + Namespace: targetNamespace, + Request: request, + APIName: api.WorkflowServicePrefix + tc.expectedAPI, + } + + interceptor := s.newCrossNamespaceInterceptor(testNamespace, targetNamespace) + + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, sourceTarget). + Return(Result{Decision: DecisionAllow}, nil) + s.mockMetricsHandler.EXPECT().WithTags( + metrics.OperationTag(metrics.AuthorizationScope), + metrics.NamespaceTag(targetNamespace), + ).Return(s.mockMetricsHandler) + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, crossNsTarget). + Return(Result{Decision: DecisionAllow}, nil) + + res, err := interceptor.Intercept(ctx, request, respondWorkflowTaskCompletedInfo, s.handler) + s.True(res.(bool)) + s.NoError(err) + }) + } +} + +func (s *authorizerInterceptorSuite) TestCrossNamespaceCommand_Unauthorized() { + request := &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: testNamespace, + Commands: []*commandpb.Command{makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, targetNamespace)}, + } + + sourceTarget := &CallTarget{ + Namespace: testNamespace, + Request: request, + APIName: api.WorkflowServicePrefix + "RespondWorkflowTaskCompleted", + } + crossNsTarget := &CallTarget{ + Namespace: targetNamespace, + Request: request, + APIName: api.WorkflowServicePrefix + "SignalWorkflowExecution", + } + + interceptor := s.newCrossNamespaceInterceptor(testNamespace, targetNamespace) + + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, sourceTarget). + Return(Result{Decision: DecisionAllow}, nil) + s.mockMetricsHandler.EXPECT().WithTags( + metrics.OperationTag(metrics.AuthorizationScope), + metrics.NamespaceTag(targetNamespace), + ).Return(s.mockMetricsHandler) + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, crossNsTarget). + Return(Result{Decision: DecisionDeny}, nil) + s.mockMetricsHandler.EXPECT().Counter(metrics.ServiceErrUnauthorizedCounter.Name()).Return(metrics.NoopCounterMetricFunc) + + res, err := interceptor.Intercept(ctx, request, respondWorkflowTaskCompletedInfo, s.handler) + s.Nil(res) + s.Error(err) +} + +func (s *authorizerInterceptorSuite) TestNoExtraAuthCheck() { + testCases := []struct { + name string + targetNs string + description string + }{ + { + name: "SameNamespace", + targetNs: testNamespace, // Same as source + description: "command targeting same namespace should not trigger extra auth", + }, + { + name: "EmptyNamespace", + targetNs: "", // Empty defaults to source + description: "command with empty namespace should not trigger extra auth", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + request := &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: testNamespace, + Commands: []*commandpb.Command{makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, tc.targetNs)}, + } + + sourceTarget := &CallTarget{ + Namespace: testNamespace, + Request: request, + APIName: api.WorkflowServicePrefix + "RespondWorkflowTaskCompleted", + } + + // Only expect authorization for source namespace + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, sourceTarget). + Return(Result{Decision: DecisionAllow}, nil) + + res, err := s.interceptor.Intercept(ctx, request, respondWorkflowTaskCompletedInfo, s.handler) + s.True(res.(bool)) + s.NoError(err) + }) + } +} + +func (s *authorizerInterceptorSuite) TestCrossNamespaceCommand_DisabledFeature() { + // When cross-namespace commands are disabled, no extra auth check should happen + request := &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: testNamespace, + Commands: []*commandpb.Command{makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, targetNamespace)}, + } + + sourceTarget := &CallTarget{ + Namespace: testNamespace, + Request: request, + APIName: api.WorkflowServicePrefix + "RespondWorkflowTaskCompleted", + } + + // Interceptor with cross-namespace commands DISABLED (uses default s.interceptor which has it disabled) + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, sourceTarget). + Return(Result{Decision: DecisionAllow}, nil) + + res, err := s.interceptor.Intercept(ctx, request, respondWorkflowTaskCompletedInfo, s.handler) + s.True(res.(bool)) + s.NoError(err) +} + +func (s *authorizerInterceptorSuite) TestMultipleCommands_AuthDeduplication() { + // Test that authorization is deduplicated per namespace+API combination + request := &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: testNamespace, + Commands: []*commandpb.Command{ + makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, targetNamespace), + makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, targetNamespace), + makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_REQUEST_CANCEL_EXTERNAL_WORKFLOW_EXECUTION, targetNamespace), + // Duplicate signal to same namespace - should not trigger extra auth + makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, targetNamespace), + }, + } + + sourceTarget := &CallTarget{ + Namespace: testNamespace, + Request: request, + APIName: api.WorkflowServicePrefix + "RespondWorkflowTaskCompleted", + } + + interceptor := s.newCrossNamespaceInterceptor(testNamespace, targetNamespace) + + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, sourceTarget). + Return(Result{Decision: DecisionAllow}, nil) + // Expect 3 auth checks (one per unique API type), not 4 + s.mockMetricsHandler.EXPECT().WithTags( + metrics.OperationTag(metrics.AuthorizationScope), + metrics.NamespaceTag(targetNamespace), + ).Return(s.mockMetricsHandler).Times(3) + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, &CallTarget{ + Namespace: targetNamespace, Request: request, APIName: api.WorkflowServicePrefix + "SignalWorkflowExecution", + }).Return(Result{Decision: DecisionAllow}, nil) + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, &CallTarget{ + Namespace: targetNamespace, Request: request, APIName: api.WorkflowServicePrefix + "StartWorkflowExecution", + }).Return(Result{Decision: DecisionAllow}, nil) + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, &CallTarget{ + Namespace: targetNamespace, Request: request, APIName: api.WorkflowServicePrefix + "RequestCancelWorkflowExecution", + }).Return(Result{Decision: DecisionAllow}, nil) + + res, err := interceptor.Intercept(ctx, request, respondWorkflowTaskCompletedInfo, s.handler) + s.True(res.(bool)) + s.NoError(err) +} + +func (s *authorizerInterceptorSuite) TestPrincipalPropagation_Enabled() { + principal := &commonpb.Principal{Type: "user", Name: "alice"} + s.mockAuthorizer.EXPECT().Authorize(gomock.Any(), nil, describeNamespaceTarget). + Return(Result{Decision: DecisionAllow, Principal: principal}, nil) + + interceptor := NewInterceptor( + s.mockClaimMapper, + s.mockAuthorizer, + s.mockMetricsHandler, + log.NewNoopLogger(), + mockNamespaceChecker(testNamespace), + nil, + "", + "", + dynamicconfig.GetBoolPropertyFn(false), // exposeAuthorizerErrors + dynamicconfig.GetBoolPropertyFn(false), // enableCrossNamespaceCommands + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(true), // enablePrincipalPropagation + dynamicconfig.GetBoolPropertyFn(false), // disableStreamingAuthorizer + ) + + inCtx := metadata.NewIncomingContext(ctx, metadata.MD{}) + var gotPrincipal *commonpb.Principal + handler := func(ctx context.Context, req any) (any, error) { + gotPrincipal = headers.GetPrincipal(ctx) + return true, nil + } + + res, err := interceptor.Intercept(inCtx, describeNamespaceRequest, describeNamespaceInfo, handler) + s.True(res.(bool)) + s.NoError(err) + s.Equal(principal, gotPrincipal) +} + +func (s *authorizerInterceptorSuite) TestPrincipalPropagation_Disabled() { + s.mockAuthorizer.EXPECT().Authorize(gomock.Any(), nil, describeNamespaceTarget). + Return(Result{Decision: DecisionAllow, Principal: &commonpb.Principal{Type: "user", Name: "alice"}}, nil) + + inCtx := metadata.NewIncomingContext(ctx, metadata.MD{}) + var gotPrincipal *commonpb.Principal + handler := func(ctx context.Context, req any) (any, error) { + gotPrincipal = headers.GetPrincipal(ctx) + return true, nil + } + + // s.interceptor has enablePrincipalPropagation=false + res, err := s.interceptor.Intercept(inCtx, describeNamespaceRequest, describeNamespaceInfo, handler) + s.True(res.(bool)) + s.NoError(err) + s.Nil(gotPrincipal) +} + +func (s *authorizerInterceptorSuite) TestPrincipalPropagation_SpoofedHeadersStripped() { + s.mockAuthorizer.EXPECT().Authorize(gomock.Any(), nil, describeNamespaceTarget). + Return(Result{Decision: DecisionAllow}, nil) // no principal returned + + // Inject spoofed principal headers in the incoming context. + inCtx := metadata.NewIncomingContext(ctx, metadata.Pairs( + headers.PrincipalTypeHeaderName, "spoofed-type", + headers.PrincipalNameHeaderName, "spoofed-name", + )) + var gotPrincipal *commonpb.Principal + handler := func(ctx context.Context, req any) (any, error) { + gotPrincipal = headers.GetPrincipal(ctx) + return true, nil + } + + // s.interceptor has enablePrincipalPropagation=false + res, err := s.interceptor.Intercept(inCtx, describeNamespaceRequest, describeNamespaceInfo, handler) + s.True(res.(bool)) + s.NoError(err) + s.Nil(gotPrincipal, "spoofed principal headers should be stripped") +} + +func (s *authorizerInterceptorSuite) TestMultipleTargetNamespaces() { + // Test commands targeting different namespaces + request := &workflowservice.RespondWorkflowTaskCompletedRequest{ + Namespace: testNamespace, + Commands: []*commandpb.Command{ + makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_SIGNAL_EXTERNAL_WORKFLOW_EXECUTION, targetNamespace), + makeCrossNamespaceCommand(enumspb.COMMAND_TYPE_START_CHILD_WORKFLOW_EXECUTION, anotherNamespace), + }, + } + + sourceTarget := &CallTarget{ + Namespace: testNamespace, + Request: request, + APIName: api.WorkflowServicePrefix + "RespondWorkflowTaskCompleted", + } + + interceptor := s.newCrossNamespaceInterceptor(testNamespace, targetNamespace, anotherNamespace) + + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, sourceTarget). + Return(Result{Decision: DecisionAllow}, nil) + s.mockMetricsHandler.EXPECT().WithTags( + metrics.OperationTag(metrics.AuthorizationScope), + metrics.NamespaceTag(targetNamespace), + ).Return(s.mockMetricsHandler) + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, &CallTarget{ + Namespace: targetNamespace, Request: request, APIName: api.WorkflowServicePrefix + "SignalWorkflowExecution", + }).Return(Result{Decision: DecisionAllow}, nil) + s.mockMetricsHandler.EXPECT().WithTags( + metrics.OperationTag(metrics.AuthorizationScope), + metrics.NamespaceTag(anotherNamespace), + ).Return(s.mockMetricsHandler) + s.mockAuthorizer.EXPECT().Authorize(ctx, nil, &CallTarget{ + Namespace: anotherNamespace, Request: request, APIName: api.WorkflowServicePrefix + "StartWorkflowExecution", + }).Return(Result{Decision: DecisionAllow}, nil) + + res, err := interceptor.Intercept(ctx, request, respondWorkflowTaskCompletedInfo, s.handler) + s.True(res.(bool)) + s.NoError(err) +} + +// mockServerStream is a minimal grpc.ServerStream for testing InterceptStream. +type mockServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (m *mockServerStream) Context() context.Context { return m.ctx } + +var streamInfo = &grpc.StreamServerInfo{FullMethod: "/temporal.server.api.adminservice.v1.AdminService/StreamWorkflowReplicationMessages"} + +func (s *authorizerInterceptorSuite) TestInterceptStream_Authorized() { + streamTarget := &CallTarget{ + Namespace: "", + APIName: streamInfo.FullMethod, + Request: nil, + } + s.mockMetricsHandler.EXPECT().WithTags( + metrics.OperationTag(metrics.AuthorizationScope), + metrics.NamespaceUnknownTag(), + ).Return(s.mockMetricsHandler) + s.mockAuthorizer.EXPECT().Authorize(gomock.Any(), nil, streamTarget). + Return(Result{Decision: DecisionAllow}, nil) + + handlerCalled := false + streamHandler := func(srv any, stream grpc.ServerStream) error { + handlerCalled = true + return nil + } + + ss := &mockServerStream{ctx: ctx} + err := s.interceptor.InterceptStream(nil, ss, streamInfo, streamHandler) + s.NoError(err) + s.True(handlerCalled) +} + +func (s *authorizerInterceptorSuite) TestInterceptStream_Unauthorized() { + streamTarget := &CallTarget{ + Namespace: "", + APIName: streamInfo.FullMethod, + Request: nil, + } + s.mockMetricsHandler.EXPECT().WithTags( + metrics.OperationTag(metrics.AuthorizationScope), + metrics.NamespaceUnknownTag(), + ).Return(s.mockMetricsHandler) + s.mockAuthorizer.EXPECT().Authorize(gomock.Any(), nil, streamTarget). + Return(Result{Decision: DecisionDeny}, nil) + s.mockMetricsHandler.EXPECT().Counter(metrics.ServiceErrUnauthorizedCounter.Name()).Return(metrics.NoopCounterMetricFunc) + + handlerCalled := false + streamHandler := func(srv any, stream grpc.ServerStream) error { + handlerCalled = true + return nil + } + + ss := &mockServerStream{ctx: ctx} + err := s.interceptor.InterceptStream(nil, ss, streamInfo, streamHandler) + s.Error(err) + s.False(handlerCalled) +} + +func (s *authorizerInterceptorSuite) TestInterceptStream_AuthDisabled() { + // When claimMapper and authorizer are nil, the interceptor should be a passthrough. + interceptor := NewInterceptor( + nil, // claimMapper + nil, // authorizer + s.mockMetricsHandler, + log.NewNoopLogger(), + mockNamespaceChecker(testNamespace), + nil, + "", + "", + dynamicconfig.GetBoolPropertyFn(false), + dynamicconfig.GetBoolPropertyFn(false), + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(true), + dynamicconfig.GetBoolPropertyFn(false), + ) + + handlerCalled := false + streamHandler := func(srv any, stream grpc.ServerStream) error { + handlerCalled = true + return nil + } + + ss := &mockServerStream{ctx: ctx} + err := interceptor.InterceptStream(nil, ss, streamInfo, streamHandler) + s.NoError(err) + s.True(handlerCalled) +} + +func (s *authorizerInterceptorSuite) TestInterceptStream_InvalidToken() { + interceptor := NewInterceptor( + s.mockClaimMapper, + s.mockAuthorizer, + s.mockMetricsHandler, + log.NewNoopLogger(), + mockNamespaceChecker(testNamespace), + nil, + "", + "", + dynamicconfig.GetBoolPropertyFn(false), + dynamicconfig.GetBoolPropertyFn(false), + dynamicconfig.GetBoolPropertyFnFilteredByNamespace(true), + dynamicconfig.GetBoolPropertyFn(false), + ) + + // Provide an incoming context with an auth token so GetAuthInfo returns non-nil. + inCtx := metadata.NewIncomingContext(ctx, metadata.Pairs("authorization", "bad-token")) + authInfo := &AuthInfo{AuthToken: "bad-token"} + claimErr := errors.New("invalid token") + s.mockClaimMapper.EXPECT().GetClaims(authInfo).Return(nil, claimErr) + + handlerCalled := false + streamHandler := func(srv any, stream grpc.ServerStream) error { + handlerCalled = true + return nil + } + + ss := &mockServerStream{ctx: inCtx} + err := interceptor.InterceptStream(nil, ss, streamInfo, streamHandler) + s.Error(err) + s.False(handlerCalled) +} + +func (s *authorizerInterceptorSuite) TestInterceptStream_ContextPropagated() { + // Verify the handler receives a wrapped stream with the modified context. + streamTarget := &CallTarget{ + Namespace: "", + APIName: streamInfo.FullMethod, + Request: nil, + } + s.mockMetricsHandler.EXPECT().WithTags( + metrics.OperationTag(metrics.AuthorizationScope), + metrics.NamespaceUnknownTag(), + ).Return(s.mockMetricsHandler) + s.mockAuthorizer.EXPECT().Authorize(gomock.Any(), nil, streamTarget). + Return(Result{Decision: DecisionAllow}, nil) + + // Inject a spoofed principal header; it must be stripped in the handler's context. + inCtx := metadata.NewIncomingContext(ctx, metadata.MD{}) + + var handlerCtx context.Context + streamHandler := func(srv any, stream grpc.ServerStream) error { + handlerCtx = stream.Context() + return nil + } + + ss := &mockServerStream{ctx: inCtx} + err := s.interceptor.InterceptStream(nil, ss, streamInfo, streamHandler) + s.NoError(err) + s.NotNil(handlerCtx) +} diff --git a/common/authorization/noop_authorizer.go b/common/authorization/noop_authorizer.go index eeba82740da..9208adbc281 100644 --- a/common/authorization/noop_authorizer.go +++ b/common/authorization/noop_authorizer.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import "context" diff --git a/common/authorization/principal.go b/common/authorization/principal.go new file mode 100644 index 00000000000..1aa4bc60d71 --- /dev/null +++ b/common/authorization/principal.go @@ -0,0 +1,9 @@ +package authorization + +// Defines principal types and names supported in Temporal. +const ( + // Identifies internal Temporal-managed services such as history, + // matching, per-namespace worker, etc. + InternalPrincipalType = "temporal" + InternalPrincipalName = "internal" +) diff --git a/common/authorization/roles.go b/common/authorization/roles.go index 3e23058953b..da424844ca9 100644 --- a/common/authorization/roles.go +++ b/common/authorization/roles.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization type Role int16 @@ -54,7 +30,9 @@ type Claims struct { // Roles within specific namespaces Namespaces map[string]Role // Free form bucket for extra data - Extensions interface{} + Extensions any + // AuthType identifies the authentication method that produced these claims (e.g., "jwt", "mtls"). + AuthType string } // @@@SNIPEND diff --git a/common/authorization/roles_test.go b/common/authorization/roles_test.go index 1acdd9af32d..8683238bed1 100644 --- a/common/authorization/roles_test.go +++ b/common/authorization/roles_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( diff --git a/common/authorization/token_key_provider.go b/common/authorization/token_key_provider.go index 60e534d6b21..a5592fcee32 100644 --- a/common/authorization/token_key_provider.go +++ b/common/authorization/token_key_provider.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package authorization import ( @@ -44,7 +20,7 @@ type TokenKeyProvider interface { // RawTokenKeyProvider is a TokenKeyProvider that provides keys for validating JWT tokens type RawTokenKeyProvider interface { - GetKey(ctx context.Context, token *jwt.Token) (interface{}, error) + GetKey(ctx context.Context, token *jwt.Token) (any, error) SupportedMethods() []string Close() } diff --git a/common/backoff/cron.go b/common/backoff/cron.go index 841646f7f7a..efd2ab147c5 100644 --- a/common/backoff/cron.go +++ b/common/backoff/cron.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package backoff import ( @@ -29,7 +5,6 @@ import ( "github.com/robfig/cron/v3" "go.temporal.io/api/serviceerror" - "go.temporal.io/server/common/convert" ) diff --git a/common/backoff/cron_test.go b/common/backoff/cron_test.go index c8a7291882c..c8e07e243a1 100644 --- a/common/backoff/cron_test.go +++ b/common/backoff/cron_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package backoff import ( diff --git a/common/backoff/jitter.go b/common/backoff/jitter.go index 9af28764fe5..7efbfa32d15 100644 --- a/common/backoff/jitter.go +++ b/common/backoff/jitter.go @@ -1,34 +1,6 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package backoff -import ( - "math/rand" -) - -const fullCoefficient float64 = 1 +import "math/rand" // FullJitter return random number from 0 to input, inclusive, exclusive func FullJitter[T ~int64 | ~int | ~int32 | ~float64 | ~float32](input T) T { diff --git a/common/backoff/jitter_test.go b/common/backoff/jitter_test.go index e32cc623cc2..9b3f5bf62fd 100644 --- a/common/backoff/jitter_test.go +++ b/common/backoff/jitter_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package backoff import ( @@ -52,7 +28,7 @@ func (s *jitterSuite) TestJitter_Int64() { lowerBound := int64(float64(input) * (1 - coefficient)) upperBound := int64(float64(input) * (1 + coefficient)) - for i := 0; i < 1048576; i++ { + for range 1048576 { result := Jitter(input, coefficient) s.True(result >= lowerBound) s.True(result < upperBound) @@ -69,7 +45,7 @@ func (s *jitterSuite) TestJitter_Float64() { lowerBound := float64(input) * (1 - coefficient) upperBound := float64(input) * (1 + coefficient) - for i := 0; i < 1048576; i++ { + for range 1048576 { result := Jitter(input, coefficient) s.True(result >= lowerBound) s.True(result < upperBound) @@ -86,7 +62,7 @@ func (s *jitterSuite) TestJitter_Duration() { lowerBound := time.Duration(int64(float64(input.Nanoseconds()) * (1 - coefficient))) upperBound := time.Duration(int64(float64(input.Nanoseconds()) * (1 + coefficient))) - for i := 0; i < 1048576; i++ { + for range 1048576 { result := Jitter(input, coefficient) s.True(result >= lowerBound) s.True(result < upperBound) diff --git a/common/backoff/retry.go b/common/backoff/retry.go index 27e28bf4490..5192a0f52df 100644 --- a/common/backoff/retry.go +++ b/common/backoff/retry.go @@ -1,35 +1,14 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package backoff import ( "context" - "sync" + "math" "time" + commonpb "go.temporal.io/api/common/v1" "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common/clock" + "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -54,63 +33,8 @@ type ( // IsRetryable handler can be used to exclude certain errors during retry IsRetryable func(error) bool - - // ConcurrentRetrier is used for client-side throttling. It determines whether to - // throttle outgoing traffic in case downstream backend server rejects - // requests due to out-of-quota or server busy errors. - ConcurrentRetrier struct { - sync.Mutex - retrier Retrier // Backoff retrier - failureCount int64 // Number of consecutive failures seen - } ) -// Throttle Sleep if there were failures since the last success call. -func (c *ConcurrentRetrier) Throttle() { - c.throttleInternal() -} - -func (c *ConcurrentRetrier) throttleInternal() time.Duration { - next := done - - // Check if we have failure count. - failureCount := c.failureCount - if failureCount > 0 { - defer c.Unlock() - c.Lock() - if c.failureCount > 0 { - next = c.retrier.NextBackOff() - } - } - - if next != done { - time.Sleep(next) - } - - return next -} - -// Succeeded marks client request succeeded. -func (c *ConcurrentRetrier) Succeeded() { - defer c.Unlock() - c.Lock() - c.failureCount = 0 - c.retrier.Reset() -} - -// Failed marks client request failed because backend is busy. -func (c *ConcurrentRetrier) Failed() { - defer c.Unlock() - c.Lock() - c.failureCount++ -} - -// NewConcurrentRetrier returns an instance of concurrent backoff retrier. -func NewConcurrentRetrier(retryPolicy RetryPolicy) *ConcurrentRetrier { - retrier := NewRetrier(retryPolicy, SystemClock) - return &ConcurrentRetrier{retrier: retrier} -} - // ThrottleRetry is a resource aware version of Retry. // Resource exhausted error will be retried using a different throttle retry policy, instead of the specified one. func ThrottleRetry(operation Operation, policy RetryPolicy, isRetryable IsRetryable) error { @@ -137,14 +61,15 @@ func ThrottleRetryContext( deadline, hasDeadline := ctx.Deadline() - r := NewRetrier(policy, SystemClock) - t := NewRetrier(throttleRetryPolicy, SystemClock) + timeSrc := clock.NewRealTimeSource() + r := NewRetrier(policy, timeSrc) + t := NewRetrier(throttleRetryPolicy, timeSrc) for ctx.Err() == nil { if err = operation(ctx); err == nil { return nil } - if next = r.NextBackOff(); next == done { + if next = r.NextBackOff(err); next == done { return err } @@ -153,10 +78,10 @@ func ThrottleRetryContext( } if _, ok := err.(*serviceerror.ResourceExhausted); ok { - next = max(next, t.NextBackOff()) + next = max(next, t.NextBackOff(err)) } - if hasDeadline && SystemClock.Now().Add(next).After(deadline) { + if hasDeadline && timeSrc.Now().Add(next).After(deadline) { break } @@ -175,6 +100,66 @@ func ThrottleRetryContext( return ctx.Err() } +// ThrottleRetryContextWithReturn is a context and resource aware version of Retry. +// Context timeout/cancellation errors are never retried, regardless of IsRetryable. +// Resource exhausted error will be retried using a different throttle retry policy, instead of the specified one. +// TODO: allow customizing throttle retry policy and what kind of error are categorized as throttle error. +func ThrottleRetryContextWithReturn[T any]( + ctx context.Context, + fn func(context.Context) (T, error), + policy RetryPolicy, + isRetryable IsRetryable, +) (T, error) { + var zero T + var err error + var next time.Duration + + if isRetryable == nil { + isRetryable = func(error) bool { return true } + } + + deadline, hasDeadline := ctx.Deadline() + + timeSrc := clock.NewRealTimeSource() + r := NewRetrier(policy, timeSrc) + t := NewRetrier(throttleRetryPolicy, timeSrc) + for ctx.Err() == nil { + result, err := fn(ctx) + if err == nil { + return result, nil + } + + if next = r.NextBackOff(err); next == done { + return zero, err + } + + if err == ctx.Err() || !isRetryable(err) { + return zero, err + } + + if _, ok := err.(*serviceerror.ResourceExhausted); ok { + next = max(next, t.NextBackOff(err)) + } + + if hasDeadline && timeSrc.Now().Add(next).After(deadline) { + break + } + + timer := time.NewTimer(next) + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + } + } + // always return the last error we got from operation, even if it is not useful + // this retry utility does not have enough information to do any filtering/mapping + if err != nil { + return zero, err + } + return zero, ctx.Err() +} + // IgnoreErrors can be used as IsRetryable handler for Retry function to exclude certain errors from the retry list func IgnoreErrors(errorsToExclude []error) func(error) bool { return func(err error) bool { @@ -187,3 +172,40 @@ func IgnoreErrors(errorsToExclude []error) func(error) bool { return true } } + +// BackoffCalculatorAlgorithmFunc is a function type that calculates backoff duration based on +// initial duration, coefficient, and current attempt number. +type BackoffCalculatorAlgorithmFunc func(duration *durationpb.Duration, coefficient float64, currentAttempt int32) time.Duration + +// ExponentialBackoffAlgorithm calculates the backoff duration using exponential algorithm. +// The result is initInterval * (backoffCoefficient ^ (currentAttempt - 1)). +// If the calculation overflows int64, it returns the maximum possible duration. A negative result will also never be returned. +func ExponentialBackoffAlgorithm(initInterval *durationpb.Duration, backoffCoefficient float64, currentAttempt int32) time.Duration { + result := float64(initInterval.AsDuration().Nanoseconds()) * math.Pow(backoffCoefficient, float64(currentAttempt-1)) + return time.Duration(max(0, min(int64(result), math.MaxInt64))) +} + +// MakeBackoffAlgorithm creates a BackoffCalculatorAlgorithmFunc that returns a fixed delay if requestedDelay is non-nil, +// otherwise falls back to exponential backoff algorithm. +func MakeBackoffAlgorithm(requestedDelay *time.Duration) BackoffCalculatorAlgorithmFunc { + return func(duration *durationpb.Duration, coefficient float64, currentAttempt int32) time.Duration { + if requestedDelay != nil { + return *requestedDelay + } + return ExponentialBackoffAlgorithm(duration, coefficient, currentAttempt) + } +} + +// CalculateExponentialRetryInterval calculates the retry interval using exponential backoff algorithm +func CalculateExponentialRetryInterval(retryPolicy *commonpb.RetryPolicy, attempt int32) time.Duration { + interval := ExponentialBackoffAlgorithm(retryPolicy.GetInitialInterval(), retryPolicy.GetBackoffCoefficient(), attempt) + + maxInterval := retryPolicy.GetMaximumInterval() + + // Cap interval to maximum if it's set + if maxInterval.AsDuration() != 0 && interval > maxInterval.AsDuration() { + interval = maxInterval.AsDuration() + } + + return interval +} diff --git a/common/backoff/retry_test.go b/common/backoff/retry_test.go index 75ad9ed6986..3f4af506cb3 100644 --- a/common/backoff/retry_test.go +++ b/common/backoff/retry_test.go @@ -1,32 +1,7 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package backoff import ( "context" - "fmt" "testing" "time" @@ -144,51 +119,6 @@ func (s *RetrySuite) TestIsRetryableFailure() { s.Equal(1, i) } -func (s *RetrySuite) TestConcurrentRetrier() { - policy := NewExponentialRetryPolicy(1 * time.Millisecond). - WithMaximumInterval(10 * time.Millisecond). - WithMaximumAttempts(4) - - // Basic checks - retrier := NewConcurrentRetrier(policy) - retrier.Failed() - s.Equal(int64(1), retrier.failureCount) - retrier.Succeeded() - s.Equal(int64(0), retrier.failureCount) - sleepDuration := retrier.throttleInternal() - s.Equal(done, sleepDuration) - - // Multiple count check. - retrier.Failed() - retrier.Failed() - s.Equal(int64(2), retrier.failureCount) - // Verify valid sleep times. - ch := make(chan time.Duration, 3) - go func() { - for i := 0; i < 3; i++ { - ch <- retrier.throttleInternal() - } - }() - for i := 0; i < 3; i++ { - val := <-ch - fmt.Printf("Duration: %d\n", val) - s.True(val > 0) - } - retrier.Succeeded() - s.Equal(int64(0), retrier.failureCount) - // Verify we don't have any sleep times. - go func() { - for i := 0; i < 3; i++ { - ch <- retrier.throttleInternal() - } - }() - for i := 0; i < 3; i++ { - val := <-ch - fmt.Printf("Duration: %d\n", val) - s.Equal(done, val) - } -} - func (s *RetrySuite) TestRetryContextCancel() { ctx, cancel := context.WithCancel(context.Background()) cancel() @@ -205,7 +135,7 @@ func (s *RetrySuite) TestRetryContextTimeout() { err := ThrottleRetryContext(ctx, func(ctx context.Context) error { return &someError{} }, NewExponentialRetryPolicy(1*time.Second), retryEverything) elapsed := time.Since(start) - s.ErrorIs(err, &someError{}) + s.ErrorAs(err, new(*someError)) s.Less(elapsed, timeout, "Call to retry should return early if backoff exceeds context timeout") } @@ -247,7 +177,7 @@ func (s *RetrySuite) TestThrottleRetryContext() { return &someError{} } - start := SystemClock.Now() + start := time.Now() err := ThrottleRetryContext(context.Background(), op, policy, retryEverything) s.Equal(&someError{}, err) s.GreaterOrEqual( @@ -257,7 +187,7 @@ func (s *RetrySuite) TestThrottleRetryContext() { ) // test if context timeout is respected - start = SystemClock.Now() + start = time.Now() ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) err = ThrottleRetryContext(ctx, func(_ context.Context) error { return &serviceerror.ResourceExhausted{} }, policy, retryEverything) s.Equal(&serviceerror.ResourceExhausted{}, err) diff --git a/common/backoff/retrypolicy.go b/common/backoff/retrypolicy.go index 277ac56c57a..8893b5407e1 100644 --- a/common/backoff/retrypolicy.go +++ b/common/backoff/retrypolicy.go @@ -1,33 +1,13 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package backoff import ( "math" "math/rand" + "sync" + "sync/atomic" "time" + + "go.temporal.io/server/common/clock" ) const ( @@ -40,32 +20,29 @@ const ( defaultMaximumInterval = 10 * time.Second defaultExpirationInterval = time.Minute defaultMaximumAttempts = noMaximumAttempts - - defaultFirstPhaseMaximumAttempts = 3 + defaultJitterPct = 0 ) var ( // DisabledRetryPolicy is a retry policy that never retries DisabledRetryPolicy RetryPolicy = &disabledRetryPolicyImpl{} + + // common 'globalToFile' rand instance, used in adding jitter to next interval in retry policy + jitterRand atomic.Pointer[rand.Rand] ) type ( // RetryPolicy is the API which needs to be implemented by various retry policy implementations RetryPolicy interface { - ComputeNextDelay(elapsedTime time.Duration, numAttempts int) time.Duration + ComputeNextDelay(elapsedTime time.Duration, numAttempts int, err error) time.Duration } // Retrier manages the state of retry operation Retrier interface { - NextBackOff() time.Duration + NextBackOff(err error) time.Duration Reset() } - // Clock used by ExponentialRetryPolicy implementation to get the current time. Mainly used for unit testing - Clock interface { - Now() time.Time - } - // ExponentialRetryPolicy provides the implementation for retry policy using a coefficient to compute the next delay. // Formula used to compute the next delay is: // min(initialInterval * pow(backoffCoefficient, currentAttempt), maximumInterval) @@ -77,29 +54,30 @@ type ( maximumAttempts int } - // TwoPhaseRetryPolicy implements a policy that first use one policy to get next delay, - // and once expired use the second policy for the following retry. - // It can achieve fast retries in first phase then slowly retires in second phase. - TwoPhaseRetryPolicy struct { - firstPolicy RetryPolicy - secondPolicy RetryPolicy + // ErrorDependentRetryPolicy is a policy that computes the next delay time based on the error returned by the + // operation. The delay time to use for a particular error is determined by the delayForError function. + ErrorDependentRetryPolicy struct { + maximumAttempts int + jitterPct float64 + delayForError func(err error) time.Duration } - disabledRetryPolicyImpl struct{} + ConstantDelayRetryPolicy struct { + maximumAttempts int + jitterPct float64 + delay time.Duration + } - systemClock struct{} + disabledRetryPolicyImpl struct{} retrierImpl struct { policy RetryPolicy - clock Clock + timeSource clock.TimeSource currentAttempt int startTime time.Time } ) -// SystemClock implements Clock interface that uses time.Now().UTC(). -var SystemClock = systemClock{} - // NewExponentialRetryPolicy returns an instance of ExponentialRetryPolicy using the provided initialInterval func NewExponentialRetryPolicy(initialInterval time.Duration) *ExponentialRetryPolicy { p := &ExponentialRetryPolicy{ @@ -114,11 +92,11 @@ func NewExponentialRetryPolicy(initialInterval time.Duration) *ExponentialRetryP } // NewRetrier is used for creating a new instance of Retrier -func NewRetrier(policy RetryPolicy, clock Clock) Retrier { +func NewRetrier(policy RetryPolicy, timeSource clock.TimeSource) Retrier { return &retrierImpl{ policy: policy, - clock: clock, - startTime: clock.Now(), + timeSource: timeSource, + startTime: timeSource.Now(), currentAttempt: 1, } } @@ -160,7 +138,7 @@ func (p *ExponentialRetryPolicy) WithMaximumAttempts(maximumAttempts int) *Expon } // ComputeNextDelay returns the next delay interval. This is used by Retrier to delay calling the operation again -func (p *ExponentialRetryPolicy) ComputeNextDelay(elapsedTime time.Duration, numAttempts int) time.Duration { +func (p *ExponentialRetryPolicy) ComputeNextDelay(elapsedTime time.Duration, numAttempts int, _ error) time.Duration { // Check to see if we ran out of maximum number of attempts // NOTE: if maxAttempts is X, return done when numAttempts == X, otherwise there will be attempt X+1 if p.maximumAttempts != noMaximumAttempts && numAttempts >= p.maximumAttempts { @@ -192,44 +170,35 @@ func (p *ExponentialRetryPolicy) ComputeNextDelay(elapsedTime time.Duration, num return done } + nextInterval = p.addJitter(nextInterval) + + return time.Duration(nextInterval) +} + +func (p *ExponentialRetryPolicy) addJitter(nextInterval float64) float64 { // add jitter to avoid global synchronization jitterPortion := int(0.2 * nextInterval) // Prevent overflow if jitterPortion < 1 { jitterPortion = 1 } - nextInterval = nextInterval*0.8 + float64(rand.Intn(jitterPortion)) - - return time.Duration(nextInterval) -} - -// ComputeNextDelay returns the next delay interval. -func (tp *TwoPhaseRetryPolicy) ComputeNextDelay(elapsedTime time.Duration, numAttempts int) time.Duration { - nextInterval := tp.firstPolicy.ComputeNextDelay(elapsedTime, numAttempts) - if nextInterval == done { - nextInterval = tp.secondPolicy.ComputeNextDelay(elapsedTime, numAttempts-defaultFirstPhaseMaximumAttempts) - } + nextInterval = nextInterval*0.8 + float64(getJitterRand().Intn(jitterPortion)) return nextInterval } -func (r *disabledRetryPolicyImpl) ComputeNextDelay(_ time.Duration, _ int) time.Duration { +func (r *disabledRetryPolicyImpl) ComputeNextDelay(_ time.Duration, _ int, _ error) time.Duration { return done } -// Now returns the current time using the system clock -func (t systemClock) Now() time.Time { - return time.Now().UTC() -} - // Reset will set the Retrier into initial state func (r *retrierImpl) Reset() { - r.startTime = r.clock.Now() + r.startTime = r.timeSource.Now() r.currentAttempt = 1 } // NextBackOff returns the next delay interval. This is used by Retry to delay calling the operation again -func (r *retrierImpl) NextBackOff() time.Duration { - nextInterval := r.policy.ComputeNextDelay(r.getElapsedTime(), r.currentAttempt) +func (r *retrierImpl) NextBackOff(err error) time.Duration { + nextInterval := r.policy.ComputeNextDelay(r.getElapsedTime(), r.currentAttempt, err) // Now increment the current attempt r.currentAttempt++ @@ -237,5 +206,115 @@ func (r *retrierImpl) NextBackOff() time.Duration { } func (r *retrierImpl) getElapsedTime() time.Duration { - return r.clock.Now().Sub(r.startTime) + return r.timeSource.Now().Sub(r.startTime) +} + +var _ RetryPolicy = (*ErrorDependentRetryPolicy)(nil) + +func NewErrorDependentRetryPolicy(delayForError func(err error) time.Duration) *ErrorDependentRetryPolicy { + return &ErrorDependentRetryPolicy{ + maximumAttempts: defaultMaximumAttempts, + delayForError: delayForError, + jitterPct: defaultJitterPct, + } +} + +func (p *ErrorDependentRetryPolicy) WithMaximumAttempts(maximumAttempts int) *ErrorDependentRetryPolicy { + p.maximumAttempts = maximumAttempts + return p +} + +func (p *ErrorDependentRetryPolicy) WithJitter(jitterPct float64) *ErrorDependentRetryPolicy { + p.jitterPct = jitterPct + return p +} + +func (p *ErrorDependentRetryPolicy) ComputeNextDelay(_ time.Duration, attempt int, err error) time.Duration { + if p.maximumAttempts != noMaximumAttempts && attempt >= p.maximumAttempts { + return done + } + + return addJitter(p.delayForError(err), p.jitterPct) +} + +var _ RetryPolicy = (*ConstantDelayRetryPolicy)(nil) + +func NewConstantDelayRetryPolicy(delay time.Duration) *ConstantDelayRetryPolicy { + return &ConstantDelayRetryPolicy{ + maximumAttempts: defaultMaximumAttempts, + jitterPct: defaultJitterPct, + delay: delay, + } +} + +func (p *ConstantDelayRetryPolicy) WithMaximumAttempts(maximumAttempts int) *ConstantDelayRetryPolicy { + p.maximumAttempts = maximumAttempts + return p +} + +func (p *ConstantDelayRetryPolicy) WithJitter(jitterPct float64) *ConstantDelayRetryPolicy { + p.jitterPct = jitterPct + return p +} + +func (p *ConstantDelayRetryPolicy) ComputeNextDelay(_ time.Duration, attempt int, _ error) time.Duration { + if p.maximumAttempts != noMaximumAttempts && attempt >= p.maximumAttempts { + return done + } + + return addJitter(p.delay, p.jitterPct) +} + +func addJitter(duration time.Duration, jitterPct float64) time.Duration { + return duration * time.Duration(1+jitterPct*rand.Float64()) +} + +func getJitterRand() *rand.Rand { + if r := jitterRand.Load(); r != nil { + return r + } + r := rand.New(NewRetryLockedSource()) + + if !jitterRand.CompareAndSwap(nil, r) { + // Two different goroutines called some top-level + // function at the same time. While the results in + // that case are unpredictable, if we just use r here, + // and we are using a seed, we will most likely return + // the same value for both calls. That doesn't seem ideal. + // Just use the first one to get in. + return jitterRand.Load() + } + + return r +} + +// We want to wrap our rng source with mutex, because the one in math/rand is used by other clients, +// so all of them are contending for the same mutex. +// Proper solution will be to use standard thread safe Rng source, but until Go 2 it seems it will not happen. +// See the following discussions for details +// https://github.com/golang/go/issues/24121 <- main +// https://github.com/stripe/veneur/pull/466 -< make rng source faster +// https://github.com/golang/go/issues/25057 +// https://github.com/golang/go/issues/21393 + +type RetryLockedSource struct { + lk sync.Mutex + s rand.Source +} + +func (r *RetryLockedSource) Int63() int64 { + r.lk.Lock() + defer r.lk.Unlock() + return r.s.Int63() +} + +func (r *RetryLockedSource) Seed(seed int64) { + panic("internal error: call to RetryLockedSource.Seed") +} + +func NewRetryLockedSource() *RetryLockedSource { + return &RetryLockedSource{ + lk: sync.Mutex{}, + s: rand.NewSource(time.Now().UnixNano()), + } } diff --git a/common/backoff/retrypolicy_test.go b/common/backoff/retrypolicy_test.go index ddd96fdea83..78ee024a282 100644 --- a/common/backoff/retrypolicy_test.go +++ b/common/backoff/retrypolicy_test.go @@ -1,30 +1,7 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package backoff import ( + "errors" "fmt" "math/rand" "testing" @@ -32,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "go.temporal.io/server/common/clock" ) type ( @@ -39,17 +17,14 @@ type ( *require.Assertions // override suite.Suite.Assertions with require.Assertions; this means that s.NotNil(nil) will stop the test, not merely log an error suite.Suite } - - TestClock struct { - currentTime time.Time - } ) // ExampleExponentialRetryPolicy_WithMaximumInterval demonstrates example delays with a backoff coefficient of 2 and a // maximum interval of 10 seconds. Keep in mind that there is a random jitter in these times, so they are not exactly // what you'd expect. func ExampleExponentialRetryPolicy_WithMaximumInterval() { - rand.Seed(42) + jitterRand.Store(rand.New(rand.NewSource(42))) + p1 := NewExponentialRetryPolicy(time.Second). WithBackoffCoefficient(2.0). WithMaximumInterval(0). @@ -60,9 +35,9 @@ func ExampleExponentialRetryPolicy_WithMaximumInterval() { p2 = p2.WithMaximumInterval(time.Second * 10) var e1, e2 time.Duration fmt.Printf("%-10s| %15s| %15s\n", "Attempt", "Delay", "Capped Delay") - for attempts := 0; attempts < 10; attempts++ { - d1 := p1.ComputeNextDelay(e1, attempts) - d2 := p2.ComputeNextDelay(e2, attempts) + for attempts := range 10 { + d1 := p1.ComputeNextDelay(e1, attempts, nil) + d2 := p2.ComputeNextDelay(e2, attempts, nil) e1 += d1 e2 += d2 _, _ = fmt.Printf( @@ -106,7 +81,7 @@ func (s *RetryPolicySuite) TestExponentialBackoff() { r, _ := createRetrier(policy) for _, expected := range expectedResult { min, max := getNextBackoffRange(expected) - next := r.NextBackOff() + next := r.NextBackOff(nil) s.True(next >= min, "NextBackoff too low") s.True(next < max, "NextBackoff too high") } @@ -120,11 +95,11 @@ func (s *RetryPolicySuite) TestNumberOfAttempts() { r, _ := createRetrier(policy) var next time.Duration for i := 0; i < maxAttempts-1; i++ { - next = r.NextBackOff() + next = r.NextBackOff(nil) s.NotEqual(done, next) } - s.Equal(done, r.NextBackOff()) + s.Equal(done, r.NextBackOff(nil)) } // Test to make sure relative maximum interval for each retry is honoured @@ -140,7 +115,7 @@ func (s *RetryPolicySuite) TestMaximumInterval() { r, _ := createRetrier(policy) for _, expected := range expectedResult { min, max := getNextBackoffRange(expected) - next := r.NextBackOff() + next := r.NextBackOff(nil) s.True(next >= min, "NextBackoff too low") s.True(next < max, "NextBackoff too high") } @@ -152,8 +127,8 @@ func (s *RetryPolicySuite) TestBackoffCoefficient() { r, _ := createRetrier(policy) min, max := getNextBackoffRange(2 * time.Second) - for i := 0; i < 10; i++ { - next := r.NextBackOff() + for range 10 { + next := r.NextBackOff(nil) s.True(next >= min, "NextBackoff too low") s.True(next < max, "NextBackoff too high") } @@ -163,9 +138,9 @@ func (s *RetryPolicySuite) TestExpirationInterval() { policy := createPolicy(2 * time.Second). WithExpirationInterval(5 * time.Minute) - r, clock := createRetrier(policy) - clock.moveClock(6 * time.Minute) - next := r.NextBackOff() + r, ts := createRetrier(policy) + ts.Advance(6 * time.Minute) + next := r.NextBackOff(nil) s.Equal(done, next) } @@ -174,15 +149,15 @@ func (s *RetryPolicySuite) TestExpirationOverflow() { policy := createPolicy(2 * time.Second). WithExpirationInterval(5 * time.Second) - r, clock := createRetrier(policy) - next := r.NextBackOff() + r, ts := createRetrier(policy) + next := r.NextBackOff(nil) min, max := getNextBackoffRange(2 * time.Second) s.True(next >= min, "NextBackoff too low") s.True(next < max, "NextBackoff too high") - clock.moveClock(2 * time.Second) + ts.Advance(2 * time.Second) - next = r.NextBackOff() + next = r.NextBackOff(nil) min, max = getNextBackoffRange(3 * time.Second) s.True(next >= min, "NextBackoff too low") s.True(next < max, "NextBackoff too high") @@ -193,7 +168,7 @@ func (s *RetryPolicySuite) TestDefaultPublishRetryPolicy() { WithExpirationInterval(time.Minute). WithMaximumInterval(10 * time.Second) - r, clock := createRetrier(policy) + r, ts := createRetrier(policy) expectedResult := []time.Duration{ 50 * time.Millisecond, 100 * time.Millisecond, @@ -212,14 +187,14 @@ func (s *RetryPolicySuite) TestDefaultPublishRetryPolicy() { } for _, expected := range expectedResult { - next := r.NextBackOff() + next := r.NextBackOff(nil) if expected == done { s.Equal(done, next, "backoff not done yet!!!") } else { min, max := getNextBackoffRange(expected) s.True(next >= min, "NextBackoff too low: actual: %v, min: %v", next, min) s.True(next < max, "NextBackoff too high: actual: %v, max: %v", next, max) - clock.moveClock(expected) + ts.Advance(expected) } } } @@ -229,31 +204,105 @@ func (s *RetryPolicySuite) TestNoMaxAttempts() { WithExpirationInterval(time.Minute). WithMaximumInterval(10 * time.Second) - r, clock := createRetrier(policy) - for i := 0; i < 100; i++ { - next := r.NextBackOff() + r, ts := createRetrier(policy) + for range 100 { + next := r.NextBackOff(nil) s.True(next > 0 || next == done, "Unexpected value for next retry duration: %v", next) - clock.moveClock(next) + ts.Advance(next) } } func (s *RetryPolicySuite) TestUnbounded() { policy := createPolicy(50 * time.Millisecond) - r, clock := createRetrier(policy) - for i := 0; i < 100; i++ { - next := r.NextBackOff() + r, ts := createRetrier(policy) + for range 100 { + next := r.NextBackOff(nil) s.True(next > 0 || next == done, "Unexpected value for next retry duration: %v", next) - clock.moveClock(next) + ts.Advance(next) } } -func (c *TestClock) Now() time.Time { - return c.currentTime +// Validate that ErrorDependentRetryPolicy returns the expected delay for a given error, with and without jitter +func (s *RetryPolicySuite) TestErrorDependentPolicy() { + var twoSecondError = fmt.Errorf("two seconds") + var threeSecondError = fmt.Errorf("two seconds") + + delayForError := func(err error) time.Duration { + switch { + case errors.Is(err, twoSecondError): + return 2 * time.Second + case errors.Is(err, threeSecondError): + return 3 * time.Second + default: + return 1 * time.Second + } + } + + policy := NewErrorDependentRetryPolicy(delayForError).WithMaximumAttempts(4) + retrier, ts := createRetrier(policy) + + delay := retrier.NextBackOff(fmt.Errorf("other error")) + s.Equal(1*time.Second, delay) + ts.Advance(delay) + + delay = retrier.NextBackOff(twoSecondError) + s.Equal(2*time.Second, delay) + ts.Advance(delay) + + delay = retrier.NextBackOff(threeSecondError) + s.Equal(3*time.Second, delay) + ts.Advance(delay) + + delay = retrier.NextBackOff(threeSecondError) + s.Equal(done, delay) + + // test with jitter + policy = NewErrorDependentRetryPolicy(delayForError).WithMaximumAttempts(4).WithJitter(0.1) + retrier, _ = createRetrier(policy) + + delay = retrier.NextBackOff(fmt.Errorf("other error")) + s.True(delay >= 1*time.Second) + s.True(delay < 1500*time.Millisecond) +} + +func (s *RetryPolicySuite) TestConstantDelayPolicy() { + policy := NewConstantDelayRetryPolicy(2 * time.Second).WithMaximumAttempts(4) + retrier, ts := createRetrier(policy) + + delay := retrier.NextBackOff(nil) + s.Equal(2*time.Second, delay) + ts.Advance(delay) + + delay = retrier.NextBackOff(nil) + s.Equal(2*time.Second, delay) + ts.Advance(delay) + + delay = retrier.NextBackOff(nil) + s.Equal(2*time.Second, delay) + ts.Advance(delay) + + delay = retrier.NextBackOff(nil) + s.Equal(done, delay) + + // test with jitter + policy = NewConstantDelayRetryPolicy(2 * time.Second).WithMaximumAttempts(4).WithJitter(0.1) + retrier, _ = createRetrier(policy) + + delay = retrier.NextBackOff(nil) + s.True(delay >= 2*time.Second) + s.True(delay < 2200*time.Millisecond) } -func (c *TestClock) moveClock(duration time.Duration) { - c.currentTime = c.currentTime.Add(duration) +// Validate jitter computation +func (s *RetryPolicySuite) TestAddJitter() { + for range 10 { + delay := 1 * time.Second + jitter := 0.5 + jitteredDelay := addJitter(delay, jitter) + s.True(jitteredDelay >= 1*time.Second) + s.True(jitteredDelay < 1500*time.Millisecond) + } } func createPolicy(initialInterval time.Duration) *ExponentialRetryPolicy { @@ -266,9 +315,10 @@ func createPolicy(initialInterval time.Duration) *ExponentialRetryPolicy { return policy } -func createRetrier(policy RetryPolicy) (Retrier, *TestClock) { - clock := &TestClock{currentTime: time.Time{}} - return NewRetrier(policy, clock), clock +func createRetrier(policy RetryPolicy) (Retrier, *clock.EventTimeSource) { + ts := clock.NewEventTimeSource() + ts.Update(time.Time{}) + return NewRetrier(policy, ts), ts } func getNextBackoffRange(duration time.Duration) (time.Duration, time.Duration) { diff --git a/common/build/build.go b/common/build/build.go index b875f11084c..d989d55a960 100644 --- a/common/build/build.go +++ b/common/build/build.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package build import ( diff --git a/common/cache/cache.go b/common/cache/cache.go index 0893446f36e..757171dcba9 100644 --- a/common/cache/cache.go +++ b/common/cache/cache.go @@ -1,33 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package cache import ( "time" "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/dynamicconfig" ) // A Cache is a generalized interface to a cache. See cache.LRU for a specific @@ -35,20 +12,20 @@ import ( type Cache interface { // Get retrieves an element based on a key, returning nil if the element // does not exist - Get(key interface{}) interface{} + Get(key any) any // Put adds an element to the cache, returning the previous element - Put(key interface{}, value interface{}) interface{} + Put(key any, value any) any // PutIfNotExist puts a value associated with a given key if it does not exist - PutIfNotExist(key interface{}, value interface{}) (interface{}, error) + PutIfNotExist(key any, value any) (any, error) // Delete deletes an element in the cache - Delete(key interface{}) + Delete(key any) // Release decrements the ref count of a pinned element. If the ref count // drops to 0, the element can be evicted from the cache. - Release(key interface{}) + Release(key any) // Iterator returns the iterator of the cache Iterator() Iterator @@ -58,7 +35,14 @@ type Cache interface { Size() int } -// Options control the behavior of the cache +type StoppableCache interface { + Cache + + // Stop halts any background processing, and should be called when the cache will no longer be used. + Stop() +} + +// Options control the behavior of the cache. type Options struct { // TTL controls the time-to-live for a given cache entry. Cache entries that // are older than the TTL will not be returned. @@ -69,9 +53,16 @@ type Options struct { // TimeSource is an optional clock to use for time-skipping and testing. If this is nil, a real clock will be used. TimeSource clock.TimeSource + + OnPut func(val any) + + OnEvict func(val any) + + // BackgroundEvict configures background scanning for expired entries. + BackgroundEvict func() dynamicconfig.CacheBackgroundEvictSettings } -// SimpleOptions provides options that can be used to configure SimpleCache +// SimpleOptions provides options that can be used to configure SimpleCache. type SimpleOptions struct { // RemovedFunc is an optional function called when an element // is scheduled for deletion @@ -82,9 +73,9 @@ type SimpleOptions struct { // scheduled for removal from the Cache. If f is a function with the // appropriate signature and i is the interface{} scheduled for // deletion, Cache calls go f(i) -type RemovedFunc func(interface{}) +type RemovedFunc func(any) -// Iterator represents the interface for cache iterators +// Iterator represents the interface for cache iterators. type Iterator interface { // Close closes the iterator // and releases any allocated resources @@ -95,12 +86,12 @@ type Iterator interface { Next() Entry } -// Entry represents a key-value entry within the map +// Entry represents a key-value entry within the map. type Entry interface { // Key represents the key - Key() interface{} + Key() any // Value represents the value - Value() interface{} + Value() any // CreateTime represents the time when the entry is created CreateTime() time.Time } diff --git a/common/cache/lru.go b/common/cache/lru.go index c3a8ac5601e..c44353e4e12 100644 --- a/common/cache/lru.go +++ b/common/cache/lru.go @@ -1,46 +1,26 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package cache import ( "container/list" + "context" "sync" "time" enumspb "go.temporal.io/api/enums/v1" "go.temporal.io/api/serviceerror" "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/goro" "go.temporal.io/server/common/metrics" ) var ( // ErrCacheFull is returned if Put fails due to cache being filled with pinned elements - ErrCacheFull = serviceerror.NewResourceExhausted( - enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED, - "cache capacity is fully occupied with pinned elements", - ) + ErrCacheFull = &serviceerror.ResourceExhausted{ + Cause: enumspb.RESOURCE_EXHAUSTED_CAUSE_SYSTEM_OVERLOADED, + Scope: enumspb.RESOURCE_EXHAUSTED_SCOPE_SYSTEM, + Message: "cache capacity is fully occupied with pinned elements", + } // ErrCacheItemTooLarge is returned if Put fails due to item size being larger than max cache capacity ErrCacheItemTooLarge = serviceerror.NewInternal("cache item size is larger than max cache capacity") ) @@ -50,16 +30,20 @@ const emptyEntrySize = 0 // lru is a concurrent fixed size cache that evicts elements in lru order type ( lru struct { - mut sync.Mutex - byAccess *list.List - byKey map[interface{}]*list.Element - maxSize int - currSize int - pinnedSize int - ttl time.Duration - pin bool - timeSource clock.TimeSource - metricsHandler metrics.Handler + mut sync.Mutex + byAccess *list.List + byKey map[any]*list.Element + maxSize int + currSize int + pinnedSize int + onPut func(val any) + onEvict func(val any) + ttl time.Duration + pin bool + timeSource clock.TimeSource + metricsHandler metrics.Handler + backgroundEvict dynamicconfig.TypedPropertyFn[dynamicconfig.CacheBackgroundEvictSettings] + loops goro.Group } iteratorImpl struct { @@ -69,9 +53,9 @@ type ( } entryImpl struct { - key interface{} + key any createTime time.Time - value interface{} + value any refCount int size int } @@ -133,11 +117,11 @@ func (c *lru) Iterator() Iterator { return iterator } -func (entry *entryImpl) Key() interface{} { +func (entry *entryImpl) Key() any { return entry.key } -func (entry *entryImpl) Value() interface{} { +func (entry *entryImpl) Value() any { return entry.value } @@ -150,10 +134,26 @@ func (entry *entryImpl) CreateTime() time.Time { } // New creates a new cache with the given options -func New(maxSize int, opts *Options, handler metrics.Handler) Cache { +func New(maxSize int, opts *Options) StoppableCache { + return NewWithMetrics(maxSize, opts, metrics.NoopMetricsHandler) +} + +// NewWithMetrics creates a new cache that will emit capacity and ttl metrics. +// handler should be tagged with metrics.CacheTypeTag. +func NewWithMetrics(maxSize int, opts *Options, handler metrics.Handler) StoppableCache { if opts == nil { opts = &Options{} } + + backgroundEvict := opts.BackgroundEvict + if backgroundEvict == nil { + backgroundEvict = func() dynamicconfig.CacheBackgroundEvictSettings { + return dynamicconfig.CacheBackgroundEvictSettings{ + Enabled: false, + } + } + } + timeSource := opts.TimeSource if timeSource == nil { timeSource = clock.NewRealTimeSource() @@ -161,26 +161,33 @@ func New(maxSize int, opts *Options, handler metrics.Handler) Cache { metrics.CacheSize.With(handler).Record(float64(maxSize)) metrics.CacheTtl.With(handler).Record(opts.TTL) - return &lru{ - byAccess: list.New(), - byKey: make(map[interface{}]*list.Element), - ttl: opts.TTL, - maxSize: maxSize, - currSize: 0, - pin: opts.Pin, - timeSource: timeSource, - metricsHandler: handler, - } + c := &lru{ + byAccess: list.New(), + byKey: make(map[any]*list.Element), + ttl: opts.TTL, + maxSize: maxSize, + currSize: 0, + pin: opts.Pin, + onPut: opts.OnPut, + onEvict: opts.OnEvict, + timeSource: timeSource, + metricsHandler: handler, + backgroundEvict: backgroundEvict, + } + if c.backgroundEvict().Enabled { + c.loops.Go(c.bgEvictLoop) + } + return c } // NewLRU creates a new LRU cache of the given size, setting initial capacity // to the max size -func NewLRU(maxSize int, handler metrics.Handler) Cache { - return New(maxSize, nil, handler) +func NewLRU(maxSize int, handler metrics.Handler) StoppableCache { + return New(maxSize, nil) } // Get retrieves the value stored under the given key -func (c *lru) Get(key interface{}) interface{} { +func (c *lru) Get(key any) any { if c.maxSize == 0 { // return nil } @@ -194,21 +201,21 @@ func (c *lru) Get(key interface{}) interface{} { entry := element.Value.(*entryImpl) - metrics.CacheEntryAgeOnGet.With(c.metricsHandler).Record(c.timeSource.Now().UTC().Sub(entry.createTime)) - if c.isEntryExpired(entry, c.timeSource.Now().UTC()) { // Entry has expired c.deleteInternal(element) return nil } + metrics.CacheEntryAgeOnGet.With(c.metricsHandler).Record(c.timeSource.Now().UTC().Sub(entry.createTime)) + c.updateEntryRefCount(entry) c.byAccess.MoveToFront(element) return entry.value } // Put puts a new value associated with a given key, returning the existing value (if present) -func (c *lru) Put(key interface{}, value interface{}) interface{} { +func (c *lru) Put(key any, value any) any { if c.pin { panic("Cannot use Put API in Pin mode. Use Delete and PutIfNotExist if necessary") } @@ -217,7 +224,7 @@ func (c *lru) Put(key interface{}, value interface{}) interface{} { } // PutIfNotExist puts a value associated with a given key if it does not exist -func (c *lru) PutIfNotExist(key interface{}, value interface{}) (interface{}, error) { +func (c *lru) PutIfNotExist(key any, value any) (any, error) { existing, err := c.putInternal(key, value, false) if err != nil { return nil, err @@ -232,7 +239,7 @@ func (c *lru) PutIfNotExist(key interface{}, value interface{}) (interface{}, er } // Delete deletes a key, value pair associated with a key -func (c *lru) Delete(key interface{}) { +func (c *lru) Delete(key any) { if c.maxSize == 0 { return } @@ -246,7 +253,7 @@ func (c *lru) Delete(key interface{}) { } // Release decrements the ref count of a pinned element. -func (c *lru) Release(key interface{}) { +func (c *lru) Release(key any) { if c.maxSize == 0 || !c.pin { return } @@ -286,7 +293,7 @@ func (c *lru) Size() int { // Put puts a new value associated with a given key, returning the existing value (if present) // allowUpdate flag is used to control overwrite behavior if the value exists. -func (c *lru) putInternal(key interface{}, value interface{}, allowUpdate bool) (interface{}, error) { +func (c *lru) putInternal(key any, value any, allowUpdate bool) (any, error) { if c.maxSize == 0 { return nil, nil } @@ -302,8 +309,9 @@ func (c *lru) putInternal(key interface{}, value interface{}, allowUpdate bool) // If the entry exists, check if it has expired or update the value if elt != nil { existingEntry := elt.Value.(*entryImpl) - if !c.isEntryExpired(existingEntry, time.Now().UTC()) { + if !c.isEntryExpired(existingEntry, c.timeSource.Now().UTC()) { existingVal := existingEntry.value + if allowUpdate { newCacheSize := c.calculateNewCacheSize(newEntrySize, existingEntry.Size()) if newCacheSize > c.maxSize { @@ -323,6 +331,10 @@ func (c *lru) putInternal(key interface{}, value interface{}, allowUpdate bool) c.currSize = newCacheSize metrics.CacheUsage.With(c.metricsHandler).Record(float64(c.currSize)) c.updateEntryTTL(existingEntry) + + if c.onPut != nil { + c.onPut(value) + } } c.updateEntryRefCount(existingEntry) @@ -347,13 +359,17 @@ func (c *lru) putInternal(key interface{}, value interface{}, allowUpdate bool) value: value, size: newEntrySize, } - c.updateEntryTTL(entry) c.updateEntryRefCount(entry) element := c.byAccess.PushFront(entry) c.byKey[key] = element c.currSize = newCacheSize metrics.CacheUsage.With(c.metricsHandler).Record(float64(c.currSize)) + + if c.onPut != nil { + c.onPut(value) + } + return nil, nil } @@ -367,14 +383,18 @@ func (c *lru) deleteInternal(element *list.Element) { metrics.CacheUsage.With(c.metricsHandler).Record(float64(c.currSize)) metrics.CacheEntryAgeOnEviction.With(c.metricsHandler).Record(c.timeSource.Now().UTC().Sub(entry.createTime)) delete(c.byKey, entry.key) + + if c.onEvict != nil { + c.onEvict(entry.value) + } } -// tryEvictUntilSizeUnderLimit tries to evict entries until c.currSize is less than c.maxSize. +// tryEvictUntilCacheSizeUnderLimit tries to evict entries until c.currSize is less than c.maxSize. func (c *lru) tryEvictUntilCacheSizeUnderLimit() { c.tryEvictUntilEnoughSpaceWithSkipEntry(0, nil) } -// tryEvictUntilEnoughSpace try to evict entries until there is enough space for the new entry without +// tryEvictUntilEnoughSpaceWithSkipEntry try to evict entries until there is enough space for the new entry without // evicting the existing entry. the existing entry is skipped because it is being updated. func (c *lru) tryEvictUntilEnoughSpaceWithSkipEntry(newEntrySize int, existingEntry *entryImpl) { element := c.byAccess.Back() @@ -424,3 +444,54 @@ func (c *lru) updateEntryRefCount(entry *entryImpl) { } } } + +func (c *lru) Stop() { + c.loops.Cancel() +} + +func (c *lru) bgEvictLoop(ctx context.Context) error { + ch, t := c.timeSource.NewTimer(c.backgroundEvict().LoopInterval) + for { + select { + case <-ch: + settings := c.backgroundEvict() + if settings.Enabled { + c.bgEvict(settings) + } + t.Reset(settings.LoopInterval) + case <-ctx.Done(): + return ctx.Err() + } + } +} + +func (c *lru) bgEvict(settings dynamicconfig.CacheBackgroundEvictSettings) { + now := c.timeSource.Now().UTC() + + // Limit each iteration to scanning MaxEntryPerCall entries, to avoid holding the cache lock for too long. + evictToMax := func() (again bool) { + c.mut.Lock() + defer c.mut.Unlock() + + element := c.byAccess.Back() + if settings.MaxEntryPerCall <= 0 { + return false + } + for n := 0; n < settings.MaxEntryPerCall; n++ { + if element == nil { + return false + } + elementPrev := element.Prev() + entry := element.Value.(*entryImpl) // nolint:revive + if !c.isEntryExpired(entry, now) { + return false + } + c.deleteInternal(element) + element = elementPrev + } + return element != nil + } + + for evictToMax() { + } +} diff --git a/common/cache/lru_test.go b/common/cache/lru_test.go index 9e6bcbf6153..bb73bc38ab4 100644 --- a/common/cache/lru_test.go +++ b/common/cache/lru_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package cache import ( @@ -32,7 +8,9 @@ import ( "github.com/google/uuid" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/metrics/metricstest" ) @@ -42,7 +20,6 @@ type ( dummyString string dummyInt int } - testEntryWithCacheSize struct { cacheSize int } @@ -57,7 +34,7 @@ func TestLRU(t *testing.T) { metricsHandler := metricstest.NewCaptureHandler() capture := metricsHandler.StartCapture() - cache := NewLRU(4, metricsHandler) + cache := NewWithMetrics(4, nil, metricsHandler) cache.Put("A", "Foo") assert.Equal(t, "Foo", cache.Get("A")) @@ -142,7 +119,7 @@ func TestLRUWithTTL(t *testing.T) { timeSource := clock.NewEventTimeSource() metricsHandler := metricstest.NewCaptureHandler() capture := metricsHandler.StartCapture() - cache := New(5, + cache := NewWithMetrics(5, &Options{ TTL: time.Millisecond * 100, TimeSource: timeSource, @@ -162,8 +139,7 @@ func TestLRUWithTTL(t *testing.T) { assert.Equal(t, 2, len(snapshot[metrics.CacheUsage.Name()])) assert.Equal(t, float64(0), snapshot[metrics.CacheUsage.Name()][1].Value) assert.Equal(t, 0, cache.Size()) - assert.Equal(t, 2, len(snapshot[metrics.CacheEntryAgeOnGet.Name()])) - assert.Equal(t, time.Millisecond*300, snapshot[metrics.CacheEntryAgeOnGet.Name()][1].Value) + assert.Equal(t, 1, len(snapshot[metrics.CacheEntryAgeOnGet.Name()])) assert.Equal(t, time.Millisecond*300, snapshot[metrics.CacheEntryAgeOnEviction.Name()][0].Value) } @@ -185,7 +161,7 @@ func TestLRUCacheConcurrentAccess(t *testing.T) { start := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 20; i++ { + for range 20 { wg.Add(2) // concurrent get and put @@ -194,7 +170,7 @@ func TestLRUCacheConcurrentAccess(t *testing.T) { <-start - for j := 0; j < 1000; j++ { + for range 1000 { cache.Get("A") cache.Put("A", "fooo") } @@ -206,7 +182,7 @@ func TestLRUCacheConcurrentAccess(t *testing.T) { <-start - for j := 0; j < 50; j++ { + for range 50 { it := cache.Iterator() for it.HasNext() { _ = it.Next() @@ -229,7 +205,6 @@ func TestTTL(t *testing.T) { TTL: time.Millisecond * 50, TimeSource: timeSource, }, - metrics.NoopMetricsHandler, ) cache.Put("A", t) @@ -243,7 +218,7 @@ func TestTTLWithPin(t *testing.T) { timeSource := clock.NewEventTimeSource() metricsHandler := metricstest.NewCaptureHandler() - cache := New(5, + cache := NewWithMetrics(5, &Options{ TTL: time.Millisecond * 50, Pin: true, @@ -287,7 +262,6 @@ func TestMaxSizeWithPin_MidItem(t *testing.T) { Pin: true, TimeSource: timeSource, }, - metrics.NoopMetricsHandler, ) _, err := cache.PutIfNotExist("A", t) @@ -336,7 +310,6 @@ func TestMaxSizeWithPin_LastItem(t *testing.T) { Pin: true, TimeSource: timeSource, }, - metrics.NoopMetricsHandler, ) _, err := cache.PutIfNotExist("A", t) @@ -464,7 +437,7 @@ func TestCache_ItemHasCacheSizeDefined(t *testing.T) { startWG.Wait() assert.True(t, cache.Size() < maxTotalBytes) }() - for i := 0; i < numPuts; i++ { + for range numPuts { go func() { defer endWG.Done() @@ -627,7 +600,7 @@ func TestCache_PutIfNotExistWithNewKeys_Pin(t *testing.T) { t.Parallel() maxTotalBytes := 10 - cache := New(maxTotalBytes, &Options{Pin: true}, metrics.NoopMetricsHandler) + cache := New(maxTotalBytes, &Options{Pin: true}) val, err := cache.PutIfNotExist(uuid.New(), &testEntryWithCacheSize{15}) assert.Equal(t, ErrCacheItemTooLarge, err) @@ -654,7 +627,7 @@ func TestCache_PutIfNotExistWithSameKeys_Pin(t *testing.T) { t.Parallel() maxTotalBytes := 10 - cache := New(maxTotalBytes, &Options{Pin: true}, metrics.NoopMetricsHandler) + cache := New(maxTotalBytes, &Options{Pin: true}) key := uuid.New() val, err := cache.PutIfNotExist(key, &testEntryWithCacheSize{15}) @@ -683,7 +656,6 @@ func TestCache_ItemSizeChangeBeforeRelease(t *testing.T) { Pin: true, TimeSource: nil, }, - metrics.NoopMetricsHandler, ) entry1 := &testEntryWithCacheSize{ @@ -725,3 +697,155 @@ func TestCache_ItemSizeChangeBeforeRelease(t *testing.T) { // Cache should have evicted entry1 to bring cache size under max limit. assert.Equal(t, 2, cache.Size()) } + +func TestCache_InvokeLifecycleCallbacks(t *testing.T) { + t.Parallel() + + var onPut, onEvict int + ttl := time.Millisecond * 50 + timeSource := clock.NewEventTimeSource() + cache := New(5, + &Options{ + TTL: ttl, + TimeSource: timeSource, + OnPut: func(val any) { + require.Equal(t, val, "value") + onPut++ + }, + OnEvict: func(val any) { + require.Equal(t, val, "value") + onEvict++ + }, + }, + ) + + cache.Put("key", "value") + cache.Put("key", "value") + require.Equal(t, 2, onPut, "expected OnPut callback to be invoked twice") + + _, _ = cache.PutIfNotExist("key", "value") + require.Equal(t, 2, onPut, "expected OnPut callback to *not* be invoked again") + require.Equal(t, 0, onEvict, "expected OnEvict callback to be *not* be invoked") + + cache.Delete("key") + require.Equal(t, 1, onEvict, "expected OnEvict callback to be invoked") + + cache.Put("key", "value") + timeSource.Advance(2 * ttl) + assert.Nil(t, cache.Get("key")) + require.Equal(t, 2, onEvict, "expected OnEvict callback to be invoked") +} + +func TestCache_UnusedExpiry(t *testing.T) { + t.Parallel() + r := require.New(t) + + ttl := 10 * time.Minute + loopInterval := 1 * time.Minute + timeSource := clock.NewEventTimeSource() + + cache := New(5, + &Options{ + TTL: ttl, + TimeSource: timeSource, + BackgroundEvict: func() dynamicconfig.CacheBackgroundEvictSettings { + return dynamicconfig.CacheBackgroundEvictSettings{ + Enabled: true, + LoopInterval: loopInterval, + MaxEntryPerCall: 1, + } + }, + }, + ) + + cache.Put(1, 1) + r.Equal(1, cache.Size()) + + r.Eventually(func() bool { + timeSource.Advance(loopInterval) + return cache.Size() == 0 + }, 2*time.Second, 100*time.Millisecond) + + cache.Put(2, 2) + timeSource.Advance(ttl / 2) + cache.Put(3, 3) + r.Equal(2, cache.Size()) + + r.Eventually(func() bool { + timeSource.Advance(loopInterval) + return cache.Size() == 1 && cache.Get(2) == nil && cache.Get(3) == 3 + }, 2*time.Second, 100*time.Millisecond) + + r.Eventually(func() bool { + timeSource.Advance(loopInterval) + return cache.Size() == 0 && cache.Get(2) == nil && cache.Get(3) == nil + }, 2*time.Second, 100*time.Millisecond) + + // Stop the background goroutine, confirm no active expiration. + cache.Put(4, 4) + cache.Stop() + l, ok := cache.(*lru) + r.True(ok) + c := make(chan struct{}) + go func() { + l.loops.Wait() + close(c) + }() + r.Eventually(func() bool { + select { + case <-c: + return true + default: + return false + } + }, 2*time.Second, 100*time.Millisecond) + timeSource.Advance(ttl + 1*time.Second) + // The cache should still have entry 4, + r.Equal(1, cache.Size()) + // but this Get call will check the (hard) ttl & expire it. + r.Equal(nil, cache.Get(4)) +} + +func TestCache_UnusedExpiryPin(t *testing.T) { + t.Parallel() + r := require.New(t) + + ttl := 10 * time.Minute + loopInterval := 1 * time.Minute + timeSource := clock.NewEventTimeSource() + + cache := New(5, + &Options{ + TTL: ttl, + Pin: true, + TimeSource: timeSource, + BackgroundEvict: func() dynamicconfig.CacheBackgroundEvictSettings { + return dynamicconfig.CacheBackgroundEvictSettings{ + Enabled: true, + LoopInterval: loopInterval, + MaxEntryPerCall: 1, + } + }, + }, + ) + + _, err := cache.PutIfNotExist(1, 1) + r.NoError(err) + timeSource.Advance(ttl / 2) + cache.Release(1) + _, err = cache.PutIfNotExist(2, 2) + r.NoError(err) + r.Equal(2, cache.Size()) + + r.Eventually(func() bool { + timeSource.Advance(loopInterval) + return cache.Size() == 1 && cache.Get(1) == nil + }, 1*time.Second, 100*time.Millisecond) + + cache.Release(2) + + r.Eventually(func() bool { + timeSource.Advance(loopInterval) + return cache.Size() == 0 + }, 1*time.Second, 100*time.Millisecond) +} diff --git a/common/cache/simple.go b/common/cache/simple.go index dba8cc29c29..e08cbf1a71c 100644 --- a/common/cache/simple.go +++ b/common/cache/simple.go @@ -1,25 +1,3 @@ -// The MIT License (MIT) -// -// Copyright (c) 2017-2020 Uber Technologies Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - package cache import ( @@ -36,7 +14,7 @@ var ( type ( simple struct { sync.RWMutex - accessMap map[interface{}]*list.Element + accessMap map[any]*list.Element iterateList *list.List rmFunc RemovedFunc } @@ -47,8 +25,8 @@ type ( } simpleEntry struct { - key interface{} - value interface{} + key any + value any } ) @@ -68,6 +46,7 @@ func (it *simpleItr) Next() Entry { panic("Simple cache iterator Next called when there is no next item") } + // nolint:revive entry := it.nextItem.Value.(*simpleEntry) it.nextItem = it.nextItem.Next() // make a copy of the entry so there will be no concurrent access to this entry @@ -78,11 +57,11 @@ func (it *simpleItr) Next() Entry { return entry } -func (e *simpleEntry) Key() interface{} { +func (e *simpleEntry) Key() any { return e.key } -func (e *simpleEntry) Value() interface{} { +func (e *simpleEntry) Value() any { return e.value } @@ -102,13 +81,13 @@ func NewSimple(opts *SimpleOptions) Cache { } return &simple{ iterateList: list.New(), - accessMap: make(map[interface{}]*list.Element), + accessMap: make(map[any]*list.Element), rmFunc: opts.RemovedFunc, } } // Get retrieves the value stored under the given key -func (c *simple) Get(key interface{}) interface{} { +func (c *simple) Get(key any) any { c.RLock() defer c.RUnlock() @@ -120,7 +99,7 @@ func (c *simple) Get(key interface{}) interface{} { } // Put puts a new value associated with a given key, returning the existing value (if present). -func (c *simple) Put(key interface{}, value interface{}) interface{} { +func (c *simple) Put(key any, value any) any { c.Lock() defer c.Unlock() existing := c.putInternal(key, value, true) @@ -128,7 +107,7 @@ func (c *simple) Put(key interface{}, value interface{}) interface{} { } // PutIfNotExist puts a value associated with a given key if it does not exist -func (c *simple) PutIfNotExist(key interface{}, value interface{}) (interface{}, error) { +func (c *simple) PutIfNotExist(key any, value any) (any, error) { c.Lock() defer c.Unlock() existing := c.putInternal(key, value, false) @@ -140,7 +119,7 @@ func (c *simple) PutIfNotExist(key interface{}, value interface{}) (interface{}, } // Delete deletes a key, value pair associated with a key -func (c *simple) Delete(key interface{}) { +func (c *simple) Delete(key any) { c.Lock() defer c.Unlock() @@ -148,6 +127,7 @@ func (c *simple) Delete(key interface{}) { if element == nil { return } + // nolint:revive entry := c.iterateList.Remove(element).(*simpleEntry) if c.rmFunc != nil { go c.rmFunc(entry.value) @@ -156,7 +136,7 @@ func (c *simple) Delete(key interface{}) { } // Release does nothing for simple cache -func (c *simple) Release(_ interface{}) {} +func (c *simple) Release(_ any) {} // Size returns the number of entries currently in the cache func (c *simple) Size() int { @@ -175,9 +155,10 @@ func (c *simple) Iterator() Iterator { return iterator } -func (c *simple) putInternal(key interface{}, value interface{}, allowUpdate bool) interface{} { +func (c *simple) putInternal(key any, value any, allowUpdate bool) any { elt := c.accessMap[key] if elt != nil { + // nolint:revive entry := elt.Value.(*simpleEntry) existing := entry.value if allowUpdate { diff --git a/common/cache/simple_test.go b/common/cache/simple_test.go index d8fc61fe8ef..7c89e3bf8da 100644 --- a/common/cache/simple_test.go +++ b/common/cache/simple_test.go @@ -1,25 +1,3 @@ -// The MIT License (MIT) -// -// Copyright (c) 2017-2020 Uber Technologies Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - package cache import ( @@ -95,7 +73,7 @@ func TestSimpleCacheConcurrentAccess(t *testing.T) { start := make(chan struct{}) var wg sync.WaitGroup - for i := 0; i < 20; i++ { + for range 20 { wg.Add(2) // concurrent get and put @@ -104,7 +82,7 @@ func TestSimpleCacheConcurrentAccess(t *testing.T) { <-start - for j := 0; j < 1000; j++ { + for range 1000 { cache.Get("A") cache.Put("A", "fooo") } @@ -116,7 +94,7 @@ func TestSimpleCacheConcurrentAccess(t *testing.T) { <-start - for j := 0; j < 50; j++ { + for range 50 { it := cache.Iterator() for it.HasNext() { _ = it.Next() @@ -133,7 +111,7 @@ func TestSimpleCacheConcurrentAccess(t *testing.T) { func TestSimpleRemoveFunc(t *testing.T) { ch := make(chan bool) cache := NewSimple(&SimpleOptions{ - RemovedFunc: func(i interface{}) { + RemovedFunc: func(i any) { _, ok := i.(*testing.T) assert.True(t, ok) ch <- true @@ -172,6 +150,7 @@ func TestSimpleIterator(t *testing.T) { it := cache.Iterator() for it.HasNext() { entry := it.Next() + // nolint:revive actual[entry.Key().(string)] = entry.Value().(string) } it.Close() @@ -180,6 +159,7 @@ func TestSimpleIterator(t *testing.T) { it = cache.Iterator() for i := 0; i < len(expected); i++ { entry := it.Next() + // nolint:revive actual[entry.Key().(string)] = entry.Value().(string) } it.Close() diff --git a/common/cache/size_getter.go b/common/cache/size_getter.go index a00e838250b..2d49fd186d9 100644 --- a/common/cache/size_getter.go +++ b/common/cache/size_getter.go @@ -1,28 +1,4 @@ -// The MIT License -// -// Copyright (c) 2023 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination size_getter_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination size_getter_mock.go package cache @@ -37,7 +13,7 @@ type ( } ) -func getSize(value interface{}) int { +func getSize(value any) int { if v, ok := value.(SizeGetter); ok { return v.CacheSize() } diff --git a/common/cache/size_getter_mock.go b/common/cache/size_getter_mock.go index bcbc710e13f..23ed458fe10 100644 --- a/common/cache/size_getter_mock.go +++ b/common/cache/size_getter_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: size_getter.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package cache -source size_getter.go -destination size_getter_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: size_getter.go // Package cache is a generated GoMock package. package cache @@ -31,13 +12,14 @@ package cache import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" + gomock "go.uber.org/mock/gomock" ) // MockSizeGetter is a mock of SizeGetter interface. type MockSizeGetter struct { ctrl *gomock.Controller recorder *MockSizeGetterMockRecorder + isgomock struct{} } // MockSizeGetterMockRecorder is the mock recorder for MockSizeGetter. diff --git a/common/channel/shutdown_once.go b/common/channel/shutdown_once.go index 2cbbdcf59f2..81d32ee8d7a 100644 --- a/common/channel/shutdown_once.go +++ b/common/channel/shutdown_once.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package channel import ( diff --git a/common/checksum/crc.go b/common/checksum/crc.go index f4942796253..7fd1d23c37d 100644 --- a/common/checksum/crc.go +++ b/common/checksum/crc.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package checksum import ( diff --git a/common/checksum/crc_test.go b/common/checksum/crc_test.go index 402971ce6ee..0955ebcaa3e 100644 --- a/common/checksum/crc_test.go +++ b/common/checksum/crc_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package checksum import ( @@ -30,13 +6,12 @@ import ( "testing" "time" - "github.com/pborman/uuid" + "github.com/google/uuid" "github.com/stretchr/testify/assert" commonpb "go.temporal.io/api/common/v1" workflowpb "go.temporal.io/api/workflow/v1" - "google.golang.org/protobuf/types/known/timestamppb" - "go.temporal.io/server/common" + "google.golang.org/protobuf/types/known/timestamppb" ) func TestCRC32OverProto(t *testing.T) { @@ -46,8 +21,8 @@ func TestCRC32OverProto(t *testing.T) { // different set of serialized bytes obj := &workflowpb.WorkflowExecutionInfo{ Execution: &commonpb.WorkflowExecution{ - WorkflowId: uuid.New(), - RunId: uuid.New(), + WorkflowId: uuid.NewString(), + RunId: uuid.NewString(), }, StartTime: timestamppb.New(time.Now().UTC()), HistoryLength: 550, @@ -61,11 +36,11 @@ func TestCRC32OverProto(t *testing.T) { doneWG := sync.WaitGroup{} doneWG.Add(parallism) - for i := 0; i < parallism; i++ { + for range parallism { go func() { defer doneWG.Done() <-startC - for count := 0; count < loopCount; count++ { + for range loopCount { csum, err := GenerateCRC32(obj, 1) if err != nil { return diff --git a/common/circuitbreaker/circuitbreaker.go b/common/circuitbreaker/circuitbreaker.go new file mode 100644 index 00000000000..51a7f86a3cf --- /dev/null +++ b/common/circuitbreaker/circuitbreaker.go @@ -0,0 +1,87 @@ +package circuitbreaker + +import ( + "sync" + "sync/atomic" + + "github.com/sony/gobreaker" + "go.temporal.io/server/common/dynamicconfig" +) + +type ( + TwoStepCircuitBreaker interface { + Name() string + State() gobreaker.State + Counts() gobreaker.Counts + Allow() (done func(success bool), err error) + } + + // TwoStepCircuitBreakerWithDynamicSettings is a wrapper of gobreaker.TwoStepCircuitBreaker + // that calls the settingsFn everytime the Allow function is called and replaces the circuit + // breaker if there is a change in the settings object. Note that in this case, the previous + // state of the circuit breaker is lost. + TwoStepCircuitBreakerWithDynamicSettings struct { + name string + readyToTrip func(counts gobreaker.Counts) bool + onStateChange func(name string, from gobreaker.State, to gobreaker.State) + + cb atomic.Pointer[gobreaker.TwoStepCircuitBreaker] + cbLock sync.Mutex + settings dynamicconfig.CircuitBreakerSettings + } + + Settings struct { + // For the following options, check gobreaker docs for details. + Name string + ReadyToTrip func(counts gobreaker.Counts) bool + OnStateChange func(name string, from gobreaker.State, to gobreaker.State) + } +) + +var _ TwoStepCircuitBreaker = (*TwoStepCircuitBreakerWithDynamicSettings)(nil) + +// Caller must call UpdateSettings once before using this object. +func NewTwoStepCircuitBreakerWithDynamicSettings( + settings Settings, +) *TwoStepCircuitBreakerWithDynamicSettings { + return &TwoStepCircuitBreakerWithDynamicSettings{ + name: settings.Name, + readyToTrip: settings.ReadyToTrip, + onStateChange: settings.OnStateChange, + } +} + +func (c *TwoStepCircuitBreakerWithDynamicSettings) UpdateSettings( + ds dynamicconfig.CircuitBreakerSettings, +) { + c.cbLock.Lock() + defer c.cbLock.Unlock() + if c.cb.Load() != nil && ds == c.settings { + return // no change + } + c.settings = ds + c.cb.Store(gobreaker.NewTwoStepCircuitBreaker(gobreaker.Settings{ + Name: c.name, + MaxRequests: uint32(ds.MaxRequests), + Interval: ds.Interval, + Timeout: ds.Timeout, + ReadyToTrip: c.readyToTrip, + OnStateChange: c.onStateChange, + })) +} + +func (c *TwoStepCircuitBreakerWithDynamicSettings) Name() string { + return c.cb.Load().Name() +} + +func (c *TwoStepCircuitBreakerWithDynamicSettings) State() gobreaker.State { + return c.cb.Load().State() +} + +func (c *TwoStepCircuitBreakerWithDynamicSettings) Counts() gobreaker.Counts { + return c.cb.Load().Counts() +} + +func (c *TwoStepCircuitBreakerWithDynamicSettings) Allow() (done func(success bool), err error) { + return c.cb.Load().Allow() +} diff --git a/common/circuitbreaker/circuitbreaker_test.go b/common/circuitbreaker/circuitbreaker_test.go new file mode 100644 index 00000000000..47144e8a6d3 --- /dev/null +++ b/common/circuitbreaker/circuitbreaker_test.go @@ -0,0 +1,53 @@ +package circuitbreaker + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "go.temporal.io/server/common/dynamicconfig" +) + +type TSCBWithDynamicSettingsTestSuite struct { + suite.Suite +} + +func TestTSCBWithDynamicSettings(t *testing.T) { + suite.Run(t, &TSCBWithDynamicSettingsTestSuite{}) +} + +func TestBasic(t *testing.T) { + s := assert.New(t) + + name := "test-tscb" + tscb := NewTwoStepCircuitBreakerWithDynamicSettings(Settings{Name: name}) + tscb.UpdateSettings(dynamicconfig.CircuitBreakerSettings{}) + s.Equal(name, tscb.Name()) + + doneFn, err := tscb.Allow() + s.NoError(err) + doneFn(true) +} + +func TestDynamicSettings(t *testing.T) { + s := assert.New(t) + + tscb := NewTwoStepCircuitBreakerWithDynamicSettings(Settings{}) + tscb.UpdateSettings(dynamicconfig.CircuitBreakerSettings{}) + cb1 := tscb.cb.Load() + + // should not change + tscb.UpdateSettings(dynamicconfig.CircuitBreakerSettings{}) + cb2 := tscb.cb.Load() + s.Equal(cb2, cb1) + + // should change + tscb.UpdateSettings(dynamicconfig.CircuitBreakerSettings{ + MaxRequests: 2, + Interval: 3600 * time.Second, + Timeout: 30 * time.Second, + }) + cb3 := tscb.cb.Load() + s.NotEqual(cb3, cb2) +} diff --git a/common/client_cache.go b/common/client_cache.go index e78bdc5794a..a4e32329f5f 100644 --- a/common/client_cache.go +++ b/common/client_cache.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package common import ( @@ -31,24 +7,25 @@ import ( type ( // ClientCache store initialized clients ClientCache interface { - GetClientForKey(key string) (interface{}, error) - GetClientForClientKey(clientKey string) (interface{}, error) - GetAllClients() ([]interface{}, error) + Lookup(key string, index int) (string, error) // pass through to keyResolver + GetClientForKey(key string, index int) (any, error) + GetClientForClientKey(clientKey string) (any, error) + GetAllClients() ([]any, error) } keyResolver interface { - Lookup(key string) (string, error) + Lookup(key string, index int) (string, error) GetAllAddresses() ([]string, error) } - clientProvider func(string) (interface{}, error) + clientProvider func(string) (any, error) clientCacheImpl struct { keyResolver keyResolver clientProvider clientProvider cacheLock sync.RWMutex - clients map[string]interface{} + clients map[string]any } ) @@ -62,20 +39,23 @@ func NewClientCache( keyResolver: keyResolver, clientProvider: clientProvider, - clients: make(map[string]interface{}), + clients: make(map[string]any), } } -func (c *clientCacheImpl) GetClientForKey(key string) (interface{}, error) { - clientKey, err := c.keyResolver.Lookup(key) +func (c *clientCacheImpl) Lookup(key string, index int) (string, error) { + return c.keyResolver.Lookup(key, index) +} + +func (c *clientCacheImpl) GetClientForKey(key string, index int) (any, error) { + clientKey, err := c.Lookup(key, index) if err != nil { return nil, err } - return c.GetClientForClientKey(clientKey) } -func (c *clientCacheImpl) GetClientForClientKey(clientKey string) (interface{}, error) { +func (c *clientCacheImpl) GetClientForClientKey(clientKey string) (any, error) { c.cacheLock.RLock() client, ok := c.clients[clientKey] c.cacheLock.RUnlock() @@ -99,8 +79,8 @@ func (c *clientCacheImpl) GetClientForClientKey(clientKey string) (interface{}, return client, nil } -func (c *clientCacheImpl) GetAllClients() ([]interface{}, error) { - var result []interface{} +func (c *clientCacheImpl) GetAllClients() ([]any, error) { + var result []any allAddresses, err := c.keyResolver.GetAllAddresses() if err != nil { return nil, err diff --git a/common/clock/context.go b/common/clock/context.go index 1aab00fbc26..668c7058187 100644 --- a/common/clock/context.go +++ b/common/clock/context.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package clock import ( @@ -65,7 +41,10 @@ func (ctx *ctxWithDeadline) deadlineExceeded() { func (ctx *ctxWithDeadline) cancel() { ctx.once.Do(func() { - ctx.timer.Stop() + // We'd like to call ctx.timer.Stop() here, but we can't: the time source may call + // deadlineExceeded while holding its lock, which acquires the once mutex. Here we have + // the once mutex and want to cancel a timer, which would create a potential lock + // cycle. So just leave the timer as a no-op. ctx.err = context.Canceled close(ctx.done) }) diff --git a/common/clock/context_test.go b/common/clock/context_test.go index 6358120550f..22a334041b6 100644 --- a/common/clock/context_test.go +++ b/common/clock/context_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package clock_test import ( diff --git a/common/clock/event_time_source.go b/common/clock/event_time_source.go index 7df233bc349..a8c25fe3d82 100644 --- a/common/clock/event_time_source.go +++ b/common/clock/event_time_source.go @@ -1,32 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package clock import ( "sync" "time" + + "go.temporal.io/server/common/util" ) type ( @@ -37,6 +15,7 @@ type ( mu sync.RWMutex now time.Time timers []*fakeTimer + async bool } // fakeTimer is a fake implementation of [Timer]. @@ -51,9 +30,13 @@ type ( done bool // index of the timer in the parent timeSource index int + // channel on which the current time is sent when a timer fires + c chan time.Time } ) +var _ TimeSource = (*EventTimeSource)(nil) + // NewEventTimeSource returns a EventTimeSource with the current time set to Unix zero: 1970-01-01 00:00:00 +0000 UTC. func NewEventTimeSource() *EventTimeSource { return &EventTimeSource{ @@ -61,6 +44,15 @@ func NewEventTimeSource() *EventTimeSource { } } +// Some clients depend on the fact that the runtime's timers do _not_ run synchronously. +// If UseAsyncTimers(true) is called, then EventTimeSource will behave that way also. +func (ts *EventTimeSource) UseAsyncTimers(async bool) { + ts.mu.Lock() + defer ts.mu.Unlock() + + ts.async = async +} + // Now return the current time. func (ts *EventTimeSource) Now() time.Time { ts.mu.RLock() @@ -69,24 +61,46 @@ func (ts *EventTimeSource) Now() time.Time { return ts.now } +func (ts *EventTimeSource) Since(t time.Time) time.Duration { + return ts.Now().Sub(t) +} + // AfterFunc return a timer that will fire after the specified duration. It is important to note that the timeSource is // locked while the callback is called. This means that you must be cautious about calling any other mutating methods on // the timeSource from within the callback. Doing so will probably result in a deadlock. To avoid this, you may want to // wrap all such calls in a goroutine. If the duration is non-positive, the callback will fire immediately before // AfterFunc returns. func (ts *EventTimeSource) AfterFunc(d time.Duration, f func()) Timer { - ts.mu.Lock() - defer ts.mu.Unlock() - if d < 0 { d = 0 } - t := &fakeTimer{timeSource: ts, deadline: ts.now.Add(d), callback: f} + timer := &fakeTimer{timeSource: ts, deadline: ts.Now().Add(d), callback: f} + ts.addTimer(timer) + return timer +} + +// NewTimer creates a Timer that will send the current time on a channel after at least +// duration d. It returns the channel and the Timer. +func (ts *EventTimeSource) NewTimer(d time.Duration) (<-chan time.Time, Timer) { + c := make(chan time.Time, 1) + // we can't call ts.Now() from the callback so just calculate what it should be + target := ts.Now().Add(d) + timer := &fakeTimer{ + timeSource: ts, + deadline: target, + callback: func() { c <- target }, + c: c, + } + ts.addTimer(timer) + return c, timer +} + +func (ts *EventTimeSource) addTimer(t *fakeTimer) { + ts.mu.Lock() + defer ts.mu.Unlock() t.index = len(ts.timers) ts.timers = append(ts.timers, t) ts.fireTimers() - - return t } // Update the fake current time. It returns the timeSource so that you can chain calls like this: @@ -109,6 +123,37 @@ func (ts *EventTimeSource) Advance(d time.Duration) { ts.fireTimers() } +// AdvanceNext advances to the next timer. +func (ts *EventTimeSource) AdvanceNext() { + ts.mu.Lock() + defer ts.mu.Unlock() + + if len(ts.timers) == 0 { + return + } + // just do linear search, this is efficient enough for now + tmin := ts.timers[0].deadline + for _, t := range ts.timers[1:] { + tmin = util.MinTime(tmin, t.deadline) + } + ts.now = tmin + ts.fireTimers() +} + +// NumTimers returns the number of outstanding timers. +func (ts *EventTimeSource) NumTimers() int { + ts.mu.Lock() + defer ts.mu.Unlock() + + return len(ts.timers) +} + +// Sleep is a convenience function for waiting on a new timer. +func (ts *EventTimeSource) Sleep(d time.Duration) { + t, _ := ts.NewTimer(d) + <-t +} + // fireTimers fires all timers that are ready. func (ts *EventTimeSource) fireTimers() { n := 0 @@ -118,7 +163,11 @@ func (ts *EventTimeSource) fireTimers() { t.index = n n++ } else { - t.callback() + if ts.async { + go t.callback() + } else { + t.callback() + } t.done = true } } @@ -140,6 +189,10 @@ func (t *fakeTimer) Reset(d time.Duration) bool { t.done = false t.index = len(t.timeSource.timers) t.timeSource.timers = append(t.timeSource.timers, t) + // Only reset the callback if this timer was created via NewTimer + if t.c != nil { + t.callback = func() { t.c <- t.deadline } + } } t.timeSource.fireTimers() return wasActive diff --git a/common/clock/event_time_source_test.go b/common/clock/event_time_source_test.go index 2f911f785cf..78814494872 100644 --- a/common/clock/event_time_source_test.go +++ b/common/clock/event_time_source_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package clock_test import ( @@ -83,6 +59,25 @@ func ExampleEventTimeSource() { // time source advanced } +func TestEventTimeSource_Since(t *testing.T) { + t.Parallel() + + // Create a new fake time source. + source := clock.NewEventTimeSource() + + // No delta expected yet + start := source.Now() + assert.Equal(t, time.Duration(0), source.Since(start)) + + // Advance by one + source.Advance(1 * time.Second) + assert.Equal(t, 1*time.Second, source.Since(start)) + + // Advance back to start + source.Advance(-1 * time.Second) + assert.Equal(t, time.Duration(0), source.Since(start)) +} + func TestEventTimeSource_AfterFunc(t *testing.T) { t.Parallel() @@ -203,3 +198,42 @@ func TestEventTimeSource_Update(t *testing.T) { ev1.AssertFiredOnce("Timer should fire after deadline") ev2.AssertFiredOnce("Timer should fire after deadline") } + +func TestEventTimeSource_NewTimerWithChannelAndReset(t *testing.T) { + t.Parallel() + + source := clock.NewEventTimeSource() + + ch, timer := source.NewTimer(time.Second) + expectedFireTime := source.Now().Add(time.Second) + + select { + case <-ch: + t.Error("shouldn't fire yet") + default: + } + + source.Advance(2 * time.Second) + + // Since the timer duration was 1s, it should send the time at which the timer fired (which was 1s ago) on the channel + select { + case result := <-ch: + assert.Equal(t, expectedFireTime, result) + default: + t.Error("should have fired") + } + + // Reset the timer so that it fires in 1 second + timer.Reset(time.Second) + expectedFireTime = source.Now().Add(time.Second) + + source.Advance(2 * time.Second) + + // Check that the timer sends the time at which it fired on the channel + select { + case result := <-ch: + assert.Equal(t, expectedFireTime, result) + default: + t.Error("should have fired") + } +} diff --git a/common/clock/hybrid_logical_clock/hybrid_logical_clock.go b/common/clock/hybrid_logical_clock/hybrid_logical_clock.go index 947b230e997..528b7a24308 100644 --- a/common/clock/hybrid_logical_clock/hybrid_logical_clock.go +++ b/common/clock/hybrid_logical_clock/hybrid_logical_clock.go @@ -1,37 +1,14 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package hybrid_logical_clock import ( "time" - clockpb "go.temporal.io/server/api/clock/v1" + clockspb "go.temporal.io/server/api/clock/v1" commonclock "go.temporal.io/server/common/clock" + "google.golang.org/protobuf/types/known/timestamppb" ) -type Clock = clockpb.HybridLogicalClock +type Clock = clockspb.HybridLogicalClock // Next generates the next clock timestamp given the current clock. // HybridLogicalClock requires the previous clock to ensure that time doesn't move backwards and the next clock is @@ -120,3 +97,8 @@ func UTC(c *Clock) time.Time { func Since(c *Clock) time.Duration { return time.Since(UTC(c)) } + +// ProtoTimestamp returns timestamppb.New(UTC(c)) +func ProtoTimestamp(c *Clock) *timestamppb.Timestamp { + return timestamppb.New(UTC(c)) +} diff --git a/common/clock/hybrid_logical_clock/hybrid_logical_clock_test.go b/common/clock/hybrid_logical_clock/hybrid_logical_clock_test.go index b8ac01e4cdc..39226978587 100644 --- a/common/clock/hybrid_logical_clock/hybrid_logical_clock_test.go +++ b/common/clock/hybrid_logical_clock/hybrid_logical_clock_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package hybrid_logical_clock import ( diff --git a/common/clock/time_skipping_time_source.go b/common/clock/time_skipping_time_source.go new file mode 100644 index 00000000000..e11872c107c --- /dev/null +++ b/common/clock/time_skipping_time_source.go @@ -0,0 +1,48 @@ +package clock + +import "time" + +var _ TimeSource = (*TimeSkippingTimeSourceWrapper)(nil) + +// TimeSkippingTimeSourceWrapper wraps a base TimeSource and adds an offset +// returned by getOffset to Now() and Since(). The offset is read lazily on +// every call so callers can back it with live state (e.g. a field on +// MutableState) without re-wrapping when that state changes. +// +// AfterFunc and NewTimer delegate to the base TimeSource and are not +// affected by the offset. +type TimeSkippingTimeSourceWrapper struct { + base TimeSource + getOffset func() time.Duration +} + +// WrapTimeSourceWithTimeSkipping returns a TimeSource that adds getOffset() to +// the base's Now()/Since() on every call. If getOffset is nil, the wrapper behaves +// as a pass-through to base. +func WrapTimeSourceWithTimeSkipping(base TimeSource, getOffset func() time.Duration) TimeSource { + return &TimeSkippingTimeSourceWrapper{base: base, getOffset: getOffset} +} + +func (ts *TimeSkippingTimeSourceWrapper) Now() time.Time { + t := ts.base.Now() + if ts.getOffset != nil { + t = t.Add(ts.getOffset()) + } + return t +} + +func (ts *TimeSkippingTimeSourceWrapper) Since(t time.Time) time.Duration { + return ts.Now().Sub(t) +} + +// AfterFunc delegates to the base TimeSource and does not apply the offset. +// TODO@time-skipping: examine if there is any need to skip time for this method. +func (ts *TimeSkippingTimeSourceWrapper) AfterFunc(d time.Duration, f func()) Timer { + return ts.base.AfterFunc(d, f) +} + +// NewTimer delegates to the base TimeSource and does not apply the offset. +// TODO@time-skipping: examine if there is any need to skip time for this method. +func (ts *TimeSkippingTimeSourceWrapper) NewTimer(d time.Duration) (<-chan time.Time, Timer) { + return ts.base.NewTimer(d) +} diff --git a/common/clock/time_skipping_time_source_test.go b/common/clock/time_skipping_time_source_test.go new file mode 100644 index 00000000000..b8fd005e636 --- /dev/null +++ b/common/clock/time_skipping_time_source_test.go @@ -0,0 +1,92 @@ +package clock_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "go.temporal.io/server/common/clock" +) + +func TestTimeSkippingTimeSource_Now_NilGetter(t *testing.T) { + t.Parallel() + + base := clock.NewEventTimeSource() + base.Update(time.Unix(100, 0)) + ts := clock.WrapTimeSourceWithTimeSkipping(base, nil) + + require.Equal(t, time.Unix(100, 0), ts.Now()) +} + +func TestTimeSkippingTimeSource_Now_WithZeroOffset(t *testing.T) { + t.Parallel() + + base := clock.NewEventTimeSource() + base.Update(time.Unix(100, 0)) + ts := clock.WrapTimeSourceWithTimeSkipping(base, func() time.Duration { return 0 }) + + require.Equal(t, time.Unix(100, 0), ts.Now()) +} + +func TestTimeSkippingTimeSource_Now_ReadsOffsetLazily(t *testing.T) { + t.Parallel() + + base := clock.NewEventTimeSource() + base.Update(time.Unix(100, 0)) + offset := 10 * time.Second + ts := clock.WrapTimeSourceWithTimeSkipping(base, func() time.Duration { return offset }) + + require.Equal(t, time.Unix(110, 0), ts.Now()) + + offset = 120 * time.Second + require.Equal(t, time.Unix(220, 0), ts.Now()) +} + +func TestTimeSkippingTimeSource_Since_IncludesOffset(t *testing.T) { + t.Parallel() + + base := clock.NewEventTimeSource() + base.Update(time.Unix(100, 0)) + ts := clock.WrapTimeSourceWithTimeSkipping(base, func() time.Duration { return 50 * time.Second }) + + // Since delegates to Now(), which includes the offset: (100+50) - 90 = 60s. + require.Equal(t, 60*time.Second, ts.Since(time.Unix(90, 0))) +} + +func TestTimeSkippingTimeSource_AfterFunc_DelegatesToBase(t *testing.T) { + t.Parallel() + + base := clock.NewEventTimeSource() + ts := clock.WrapTimeSourceWithTimeSkipping(base, func() time.Duration { return 50 * time.Second }) + + fired := false + ts.AfterFunc(time.Second, func() { fired = true }) + + require.False(t, fired) + base.Advance(time.Second) + require.True(t, fired) +} + +func TestTimeSkippingTimeSource_NewTimer_DelegatesToBase(t *testing.T) { + t.Parallel() + + base := clock.NewEventTimeSource() + ts := clock.WrapTimeSourceWithTimeSkipping(base, func() time.Duration { return 50 * time.Second }) + + ch, _ := ts.NewTimer(time.Second) + + select { + case <-ch: + t.Fatal("timer should not fire before deadline") + default: + } + + base.Advance(time.Second) + + select { + case <-ch: + // fired as expected + default: + t.Fatal("timer should have fired after deadline") + } +} diff --git a/common/clock/time_source.go b/common/clock/time_source.go index c5af7c8e70f..74a4c1e5a9b 100644 --- a/common/clock/time_source.go +++ b/common/clock/time_source.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - // Package clock provides extensions to the [time] package. package clock @@ -33,7 +9,9 @@ type ( // TimeSource is an interface to make it easier to test code that uses time. TimeSource interface { Now() time.Time + Since(t time.Time) time.Duration AfterFunc(d time.Duration, f func()) Timer + NewTimer(d time.Duration) (<-chan time.Time, Timer) } // Timer is a timer returned by TimeSource.AfterFunc. Unlike the timers returned by [time.NewTimer] or time.Ticker, // this timer does not have a channel. That is because the callback already reacts to the timer firing. @@ -49,6 +27,8 @@ type ( RealTimeSource struct{} ) +var _ TimeSource = (*RealTimeSource)(nil) + // NewRealTimeSource returns a timeSource that uses the real wall timeSource time. func NewRealTimeSource() RealTimeSource { return RealTimeSource{} @@ -59,7 +39,18 @@ func (ts RealTimeSource) Now() time.Time { return time.Now().UTC() } +// Since returns the time elapsed since t +func (ts RealTimeSource) Since(t time.Time) time.Duration { + return time.Since(t) +} + // AfterFunc is a pass-through to time.AfterFunc. func (ts RealTimeSource) AfterFunc(d time.Duration, f func()) Timer { return time.AfterFunc(d, f) } + +// NewTimer is a pass-through to time.NewTimer. +func (ts RealTimeSource) NewTimer(d time.Duration) (<-chan time.Time, Timer) { + t := time.NewTimer(d) + return t.C, t +} diff --git a/common/clock/time_source_test.go b/common/clock/time_source_test.go index 9f9fb08a1f1..0327f8dcc5d 100644 --- a/common/clock/time_source_test.go +++ b/common/clock/time_source_test.go @@ -1,31 +1,8 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package clock_test import ( "testing" + "time" "github.com/stretchr/testify/assert" "go.temporal.io/server/common/clock" @@ -39,6 +16,21 @@ func TestNewRealClock_Now(t *testing.T) { assert.Equal(t, "UTC", location.String()) } +func TestNewRealClock_Since(t *testing.T) { + t.Parallel() + + source := clock.NewRealTimeSource() + start := source.Now() + assert.Eventually( + t, + func() bool { + return source.Since(start) >= 5*time.Millisecond + }, + time.Second, + time.Millisecond, + ) +} + func TestNewRealClock_AfterFunc(t *testing.T) { t.Parallel() @@ -51,3 +43,20 @@ func TestNewRealClock_AfterFunc(t *testing.T) { <-ch assert.False(t, timer.Stop()) } + +func TestNewRealClock_NewTimer(t *testing.T) { + t.Parallel() + + source := clock.NewRealTimeSource() + ch, timer := source.NewTimer(0) + <-ch + assert.False(t, timer.Stop()) +} + +func TestNewRealClock_NewTimer_Stop(t *testing.T) { + t.Parallel() + + source := clock.NewRealTimeSource() + _, timer := source.NewTimer(time.Second) + assert.True(t, timer.Stop()) +} diff --git a/common/cluster/clustertest/test_metadata.go b/common/cluster/clustertest/test_metadata.go new file mode 100644 index 00000000000..9e1c9cb5c9e --- /dev/null +++ b/common/cluster/clustertest/test_metadata.go @@ -0,0 +1,22 @@ +package clustertest + +import ( + "go.temporal.io/server/common/cluster" + "go.temporal.io/server/common/log" +) + +// NewMetadataForTest returns a new [cluster.Metadata] instance for testing. +func NewMetadataForTest( + config *cluster.Config, +) cluster.Metadata { + return cluster.NewMetadata( + config.EnableGlobalNamespace, + config.FailoverVersionIncrement, + config.MasterClusterName, + config.CurrentClusterName, + config.ClusterInformation, + nil, + nil, + log.NewNoopLogger(), + ) +} diff --git a/common/cluster/frontend_http_client.go b/common/cluster/frontend_http_client.go new file mode 100644 index 00000000000..f808a1375f5 --- /dev/null +++ b/common/cluster/frontend_http_client.go @@ -0,0 +1,109 @@ +package cluster + +import ( + "crypto/tls" + "fmt" + "net" + "net/http" + "time" + + "go.temporal.io/api/serviceerror" + "go.temporal.io/server/common" + "go.temporal.io/server/common/collection" +) + +type tlsConfigProvider interface { + GetRemoteClusterClientConfig(hostname string) (*tls.Config, error) +} + +type FrontendHTTPClientCache struct { + metadata Metadata + tlsProvider tlsConfigProvider + clients *collection.FallibleOnceMap[string, *common.FrontendHTTPClient] +} + +func NewFrontendHTTPClientCache( + metadata Metadata, + tlsProvider tlsConfigProvider, +) *FrontendHTTPClientCache { + cache := &FrontendHTTPClientCache{ + metadata: metadata, + tlsProvider: tlsProvider, + } + cache.clients = collection.NewFallibleOnceMap(cache.newClientForCluster) + metadata.RegisterMetadataChangeCallback(cache, cache.evictionCallback) + return cache +} + +// Get returns a cached HttpClient if available, or constructs a new one for the given cluster name. +func (c *FrontendHTTPClientCache) Get(targetClusterName string) (*common.FrontendHTTPClient, error) { + return c.clients.Get(targetClusterName) +} + +func (c *FrontendHTTPClientCache) newClientForCluster(targetClusterName string) (*common.FrontendHTTPClient, error) { + targetInfo, ok := c.metadata.GetAllClusterInfo()[targetClusterName] + if !ok { + return nil, serviceerror.NewNotFoundf("could not find cluster metadata for cluster %s", targetClusterName) + } + + if targetInfo.HTTPAddress == "" { + return nil, serviceerror.NewInternalf("HTTPAddress not configured for cluster: %s", targetClusterName) + } + host, _, err := net.SplitHostPort(targetInfo.HTTPAddress) + if err != nil { + return nil, fmt.Errorf("%w: %w", serviceerror.NewInternal("invalid frontend address"), err) + } + + // dialer and transport field values copied from http.DefaultTransport. + dialer := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + } + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: dialer.DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + + urlScheme := "http" + if c.tlsProvider != nil { + tlsClientConfig, err := c.tlsProvider.GetRemoteClusterClientConfig(host) + if err != nil { + return nil, err + } + if tlsClientConfig != nil { + transport.TLSClientConfig = tlsClientConfig + urlScheme = "https" + } + } + + return &common.FrontendHTTPClient{ + Address: targetInfo.HTTPAddress, + Scheme: urlScheme, + Client: http.Client{Transport: transport}, + }, nil +} + +// evictionCallback is invoked by cluster.Metadata when cluster information changes. +// It invalidates clients which are either no longer present or have had their HTTP address changed. +// It is assumed that TLS information has not changed for clusters that are unmodified. +func (c *FrontendHTTPClientCache) evictionCallback(oldClusterMetadata map[string]*ClusterInformation, newClusterMetadata map[string]*ClusterInformation) { + for oldClusterName, oldClusterInfo := range oldClusterMetadata { + if oldClusterName == c.metadata.GetCurrentClusterName() || oldClusterInfo == nil { + continue + } + + newClusterInfo, exists := newClusterMetadata[oldClusterName] + if !exists || newClusterInfo == nil || oldClusterInfo.HTTPAddress != newClusterInfo.HTTPAddress { + // Cluster was removed or had its HTTP address changed, so invalidate the cached client for that cluster. + client, ok := c.clients.Pop(oldClusterName) + if ok { + client.CloseIdleConnections() + } + } + } +} diff --git a/common/cluster/fx.go b/common/cluster/fx.go index b9ba7de80e8..f1b49388ad1 100644 --- a/common/cluster/fx.go +++ b/common/cluster/fx.go @@ -1,42 +1,17 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package cluster import ( "context" + "go.temporal.io/server/common/pingable" "go.uber.org/fx" - - "go.temporal.io/server/common" ) var MetadataLifetimeHooksModule = fx.Options( fx.Provide(NewMetadataFromConfig), fx.Invoke(MetadataLifetimeHooks), fx.Provide(fx.Annotate( - func(p Metadata) common.Pingable { return p }, + func(p Metadata) pingable.Pingable { return p }, fx.ResultTags(`group:"deadlockDetectorRoots"`), )), ) diff --git a/common/cluster/metadata.go b/common/cluster/metadata.go index 4b5f1c4c5f4..1ffacb0399a 100644 --- a/common/cluster/metadata.go +++ b/common/cluster/metadata.go @@ -1,51 +1,27 @@ -// The MIT License -// -// Copyright (c) 2021 Temporal Technologies Inc. All rights reservem. -// -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination metadata_mock.go +//go:generate mockgen -package $GOPACKAGE -source $GOFILE -destination metadata_mock.go package cluster import ( "context" "fmt" + "maps" "math" "strconv" "sync" "sync/atomic" "time" - "golang.org/x/exp/maps" - "go.temporal.io/server/common" "go.temporal.io/server/common/collection" "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/goro" "go.temporal.io/server/common/headers" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/persistence" - "go.temporal.io/server/internal/goro" + "go.temporal.io/server/common/pingable" ) const ( @@ -56,8 +32,9 @@ const ( ) type ( + // Metadata provides information about the current cluster and other registered remote clusters. Metadata interface { - common.Pingable + pingable.Pingable // IsGlobalNamespaceEnabled whether the global namespace is enabled, // this attr should be discarded when cross DC is made public @@ -91,29 +68,34 @@ type ( // Config contains the all cluster which participated in cross DC Config struct { EnableGlobalNamespace bool `yaml:"enableGlobalNamespace"` - // FailoverVersionIncrement is the increment of each cluster version when failover happens + // FailoverVersionIncrement is the increment of each cluster version when failover happens. FailoverVersionIncrement int64 `yaml:"failoverVersionIncrement"` // MasterClusterName is the master cluster name, only the master cluster can register / update namespace - // all clusters can do namespace failover + // all clusters can do namespace failover. MasterClusterName string `yaml:"masterClusterName"` - // CurrentClusterName is the name of the current cluster + // CurrentClusterName is the name of the current cluster. CurrentClusterName string `yaml:"currentClusterName"` - // ClusterInformation contains all cluster names to corresponding information about that cluster + // ClusterInformation is a map from cluster name to corresponding information for each registered cluster. ClusterInformation map[string]ClusterInformation `yaml:"clusterInformation"` - // Tag contains customized tag about the current cluster + // Tags contains customized tags for the current cluster. Tags map[string]string `yaml:"tags"` } - // ClusterInformation contains the information about each cluster which participated in cross DC + // ClusterInformation contains information for a single cluster. ClusterInformation struct { Enabled bool `yaml:"enabled"` InitialFailoverVersion int64 `yaml:"initialFailoverVersion"` - // Address indicate the remote service address(Host:Port). Host can be DNS name. + // RPCAddress indicate the remote service address(Host:Port). Host can be DNS name. RPCAddress string `yaml:"rpcAddress"` - // Cluster ID allows to explicitly set the ID of the cluster. Optional. + // HTTPAddress indicates the address of the [go.temporal.io/server/service/frontend.HTTPAPIServer]. + // E.g. "localhost:7243". + HTTPAddress string `yaml:"httpAddress"` + // ClusterID allows to explicitly set the ID of the cluster. Optional. ClusterID string `yaml:"-"` ShardCount int32 `yaml:"-"` // Ignore this field when loading config. Tags map[string]string `yaml:"-"` // Ignore this field. Use cluster.Config.Tags for customized tags. + // ReplicationEnabled controls whether replication streams are active. + ReplicationEnabled bool `yaml:"-"` // private field to track cluster information updates version int64 } @@ -215,26 +197,11 @@ func NewMetadataFromConfig( config.CurrentClusterName, config.ClusterInformation, clusterMetadataStore, - dynamicCollection.GetDurationProperty(dynamicconfig.ClusterMetadataRefreshInterval, refreshInterval), + dynamicconfig.ClusterMetadataRefreshInterval.Get(dynamicCollection), logger, ) } -func NewMetadataForTest( - config *Config, -) Metadata { - return NewMetadata( - config.EnableGlobalNamespace, - config.FailoverVersionIncrement, - config.MasterClusterName, - config.CurrentClusterName, - config.ClusterInformation, - nil, - nil, - log.NewNoopLogger(), - ) -} - func (m *metadataImpl) Start() { if !atomic.CompareAndSwapInt32(&m.status, common.DaemonStatusInitialized, common.DaemonStatusStarted) { return @@ -243,7 +210,7 @@ func (m *metadataImpl) Start() { // TODO: specify a timeout for the context ctx := headers.SetCallerInfo( context.TODO(), - headers.SystemBackgroundCallerInfo, + headers.SystemBackgroundHighCallerInfo, ) err := m.refreshClusterMetadata(ctx) if err != nil { @@ -261,15 +228,15 @@ func (m *metadataImpl) Stop() { <-m.refresher.Done() } -func (m *metadataImpl) GetPingChecks() []common.PingCheck { - return []common.PingCheck{ +func (m *metadataImpl) GetPingChecks() []pingable.Check { + return []pingable.Check{ { Name: "cluster metadata lock", // we don't do any persistence ops under clusterLock, use a short timeout Timeout: 10 * time.Second, - Ping: func() []common.Pingable { + Ping: func() []pingable.Pingable { m.clusterLock.Lock() - //lint:ignore SA2001 just checking if we can acquire the lock + // nolint:staticcheck m.clusterLock.Unlock() return nil }, @@ -280,9 +247,9 @@ func (m *metadataImpl) GetPingChecks() []common.PingCheck { // listeners get called under clusterCallbackLock, they may do some more work, but // not persistence ops. Timeout: 10 * time.Second, - Ping: func() []common.Pingable { + Ping: func() []pingable.Pingable { m.clusterCallbackLock.Lock() - //lint:ignore SA2001 just checking if we can acquire the lock + // nolint:staticcheck m.clusterCallbackLock.Unlock() return nil }, @@ -409,13 +376,7 @@ func (m *metadataImpl) RegisterMetadataChangeCallback(callbackId any, cb Callbac m.clusterLock.RLock() for clusterName, clusterInfo := range m.clusterInfo { oldEntries[clusterName] = nil - newEntries[clusterName] = &ClusterInformation{ - Enabled: clusterInfo.Enabled, - InitialFailoverVersion: clusterInfo.InitialFailoverVersion, - RPCAddress: clusterInfo.RPCAddress, - ShardCount: clusterInfo.ShardCount, - version: clusterInfo.version, - } + newEntries[clusterName] = ShallowCopyClusterInformation(&clusterInfo) } m.clusterLock.RUnlock() cb(oldEntries, newEntries) @@ -466,39 +427,21 @@ func (m *metadataImpl) refreshClusterMetadata(ctx context.Context) error { if !ok { // handle new cluster registry oldEntries[clusterName] = nil - newEntries[clusterName] = &ClusterInformation{ - Enabled: newClusterInfo.Enabled, - InitialFailoverVersion: newClusterInfo.InitialFailoverVersion, - RPCAddress: newClusterInfo.RPCAddress, - ShardCount: newClusterInfo.ShardCount, - Tags: newClusterInfo.Tags, - version: newClusterInfo.version, - } + newEntries[clusterName] = ShallowCopyClusterInformation(newClusterInfo) } else if newClusterInfo.version > oldClusterInfo.version { if newClusterInfo.Enabled == oldClusterInfo.Enabled && + newClusterInfo.ReplicationEnabled == oldClusterInfo.ReplicationEnabled && newClusterInfo.RPCAddress == oldClusterInfo.RPCAddress && + newClusterInfo.HTTPAddress == oldClusterInfo.HTTPAddress && newClusterInfo.InitialFailoverVersion == oldClusterInfo.InitialFailoverVersion && + newClusterInfo.ClusterID == oldClusterInfo.ClusterID && maps.Equal(newClusterInfo.Tags, oldClusterInfo.Tags) { // key cluster info does not change continue } // handle updated cluster registry - oldEntries[clusterName] = &ClusterInformation{ - Enabled: oldClusterInfo.Enabled, - InitialFailoverVersion: oldClusterInfo.InitialFailoverVersion, - RPCAddress: oldClusterInfo.RPCAddress, - ShardCount: oldClusterInfo.ShardCount, - Tags: oldClusterInfo.Tags, - version: oldClusterInfo.version, - } - newEntries[clusterName] = &ClusterInformation{ - Enabled: newClusterInfo.Enabled, - InitialFailoverVersion: newClusterInfo.InitialFailoverVersion, - RPCAddress: newClusterInfo.RPCAddress, - ShardCount: newClusterInfo.ShardCount, - Tags: newClusterInfo.Tags, - version: newClusterInfo.version, - } + oldEntries[clusterName] = ShallowCopyClusterInformation(&oldClusterInfo) + newEntries[clusterName] = ShallowCopyClusterInformation(newClusterInfo) } } for clusterName, oldClusterInfo := range clusterInfoMap { @@ -559,6 +502,7 @@ func updateVersionToClusterName(clusterInfo map[string]ClusterInformation, failo if info.Enabled && info.RPCAddress == "" { panic(fmt.Sprintf("Cluster %v: RPCAddress is empty", clusterName)) } + // It's ok if info.HTTPAddress is empty } return versionToClusterName } @@ -567,12 +511,29 @@ func (m *metadataImpl) listAllClusterMetadataFromDB( ctx context.Context, ) (map[string]*ClusterInformation, error) { result := make(map[string]*ClusterInformation) - if m.clusterMetadataStore == nil { + metadataStore := m.clusterMetadataStore + if metadataStore == nil { return result, nil } - paginationFn := func(paginationToken []byte) ([]interface{}, []byte, error) { - resp, err := m.clusterMetadataStore.ListClusterMetadata( + iterator := GetAllClustersIter(ctx, metadataStore) + for iterator.HasNext() { + item, err := iterator.Next() + if err != nil { + return nil, err + } + result[item.GetClusterName()] = ClusterInformationFromDB(item) + } + return result, nil +} + +// GetAllClustersIter returns an iterator that can be used to iterate over all clusters in the metadata store. +func GetAllClustersIter( + ctx context.Context, + metadataStore persistence.ClusterMetadataManager, +) collection.Iterator[*persistence.GetClusterMetadataResponse] { + paginationFn := func(paginationToken []byte) ([]*persistence.GetClusterMetadataResponse, []byte, error) { + resp, err := metadataStore.ListClusterMetadata( ctx, &persistence.ListClusterMetadataRequest{ PageSize: defaultClusterMetadataPageSize, @@ -582,28 +543,42 @@ func (m *metadataImpl) listAllClusterMetadataFromDB( if err != nil { return nil, nil, err } - var paginateItems []interface{} - for _, clusterInfo := range resp.ClusterMetadata { - paginateItems = append(paginateItems, clusterInfo) - } - return paginateItems, resp.NextPageToken, nil + return resp.ClusterMetadata, resp.NextPageToken, nil } iterator := collection.NewPagingIterator(paginationFn) - for iterator.HasNext() { - item, err := iterator.Next() - if err != nil { - return nil, err - } - getClusterResp := item.(*persistence.GetClusterMetadataResponse) - result[getClusterResp.GetClusterName()] = &ClusterInformation{ - Enabled: getClusterResp.GetIsConnectionEnabled(), - InitialFailoverVersion: getClusterResp.GetInitialFailoverVersion(), - RPCAddress: getClusterResp.GetClusterAddress(), - ShardCount: getClusterResp.GetHistoryShardCount(), - Tags: getClusterResp.GetTags(), - version: getClusterResp.Version, - } + return iterator +} + +func ClusterInformationFromDB(getClusterResp *persistence.GetClusterMetadataResponse) *ClusterInformation { + return &ClusterInformation{ + Enabled: getClusterResp.GetIsConnectionEnabled(), + InitialFailoverVersion: getClusterResp.GetInitialFailoverVersion(), + RPCAddress: getClusterResp.GetClusterAddress(), + HTTPAddress: getClusterResp.GetHttpAddress(), + ClusterID: getClusterResp.GetClusterId(), + ShardCount: getClusterResp.GetHistoryShardCount(), + Tags: getClusterResp.GetTags(), + ReplicationEnabled: getClusterResp.GetIsReplicationEnabled(), + version: getClusterResp.Version, } - return result, nil +} + +// ShallowCopyClusterInformation returns a shallow copy of the given ClusterInformation. The [ClusterInformation.Tags] +// field is not deep-copied, so you must be careful when modifying it. +func ShallowCopyClusterInformation(information *ClusterInformation) *ClusterInformation { + tmp := *information + return &tmp +} + +// IsReplicationEnabledForCluster checks if replication is enabled for a cluster, considering the feature flag. +// When enableSeparateReplicationFlag is false, it falls back to only checking the Enabled flag. +// This is a shared helper function used across history service components. +func IsReplicationEnabledForCluster(clusterInfo ClusterInformation, enableSeparateReplicationFlag bool) bool { + if enableSeparateReplicationFlag { + // New behavior: check both Enabled (for connectivity) and ReplicationEnabled (for replication streams) + return clusterInfo.Enabled && clusterInfo.ReplicationEnabled + } + // Old behavior: only check Enabled flag + return clusterInfo.Enabled } diff --git a/common/cluster/metadata_mock.go b/common/cluster/metadata_mock.go index f727dd1b2d8..c0db8ff6ebd 100644 --- a/common/cluster/metadata_mock.go +++ b/common/cluster/metadata_mock.go @@ -1,29 +1,10 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. +// Code generated by MockGen. DO NOT EDIT. +// Source: metadata.go // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Generated by this command: // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// mockgen -package cluster -source metadata.go -destination metadata_mock.go // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -// Code generated by MockGen. DO NOT EDIT. -// Source: metadata.go // Package cluster is a generated GoMock package. package cluster @@ -31,14 +12,15 @@ package cluster import ( reflect "reflect" - gomock "github.com/golang/mock/gomock" - common "go.temporal.io/server/common" + pingable "go.temporal.io/server/common/pingable" + gomock "go.uber.org/mock/gomock" ) // MockMetadata is a mock of Metadata interface. type MockMetadata struct { ctrl *gomock.Controller recorder *MockMetadataMockRecorder + isgomock struct{} } // MockMetadataMockRecorder is the mock recorder for MockMetadata. @@ -67,7 +49,7 @@ func (m *MockMetadata) ClusterNameForFailoverVersion(isGlobalNamespace bool, fai } // ClusterNameForFailoverVersion indicates an expected call of ClusterNameForFailoverVersion. -func (mr *MockMetadataMockRecorder) ClusterNameForFailoverVersion(isGlobalNamespace, failoverVersion interface{}) *gomock.Call { +func (mr *MockMetadataMockRecorder) ClusterNameForFailoverVersion(isGlobalNamespace, failoverVersion any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterNameForFailoverVersion", reflect.TypeOf((*MockMetadata)(nil).ClusterNameForFailoverVersion), isGlobalNamespace, failoverVersion) } @@ -151,16 +133,16 @@ func (m *MockMetadata) GetNextFailoverVersion(arg0 string, arg1 int64) int64 { } // GetNextFailoverVersion indicates an expected call of GetNextFailoverVersion. -func (mr *MockMetadataMockRecorder) GetNextFailoverVersion(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockMetadataMockRecorder) GetNextFailoverVersion(arg0, arg1 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextFailoverVersion", reflect.TypeOf((*MockMetadata)(nil).GetNextFailoverVersion), arg0, arg1) } // GetPingChecks mocks base method. -func (m *MockMetadata) GetPingChecks() []common.PingCheck { +func (m *MockMetadata) GetPingChecks() []pingable.Check { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetPingChecks") - ret0, _ := ret[0].([]common.PingCheck) + ret0, _ := ret[0].([]pingable.Check) return ret0 } @@ -207,7 +189,7 @@ func (m *MockMetadata) IsVersionFromSameCluster(version1, version2 int64) bool { } // IsVersionFromSameCluster indicates an expected call of IsVersionFromSameCluster. -func (mr *MockMetadataMockRecorder) IsVersionFromSameCluster(version1, version2 interface{}) *gomock.Call { +func (mr *MockMetadataMockRecorder) IsVersionFromSameCluster(version1, version2 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsVersionFromSameCluster", reflect.TypeOf((*MockMetadata)(nil).IsVersionFromSameCluster), version1, version2) } @@ -219,7 +201,7 @@ func (m *MockMetadata) RegisterMetadataChangeCallback(callbackId any, cb Callbac } // RegisterMetadataChangeCallback indicates an expected call of RegisterMetadataChangeCallback. -func (mr *MockMetadataMockRecorder) RegisterMetadataChangeCallback(callbackId, cb interface{}) *gomock.Call { +func (mr *MockMetadataMockRecorder) RegisterMetadataChangeCallback(callbackId, cb any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterMetadataChangeCallback", reflect.TypeOf((*MockMetadata)(nil).RegisterMetadataChangeCallback), callbackId, cb) } @@ -255,7 +237,7 @@ func (m *MockMetadata) UnRegisterMetadataChangeCallback(callbackId any) { } // UnRegisterMetadataChangeCallback indicates an expected call of UnRegisterMetadataChangeCallback. -func (mr *MockMetadataMockRecorder) UnRegisterMetadataChangeCallback(callbackId interface{}) *gomock.Call { +func (mr *MockMetadataMockRecorder) UnRegisterMetadataChangeCallback(callbackId any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnRegisterMetadataChangeCallback", reflect.TypeOf((*MockMetadata)(nil).UnRegisterMetadataChangeCallback), callbackId) } diff --git a/common/cluster/metadata_test.go b/common/cluster/metadata_test.go index 35349bc79e9..909857dd3ca 100644 --- a/common/cluster/metadata_test.go +++ b/common/cluster/metadata_test.go @@ -1,26 +1,3 @@ -// The MIT License -// -// Copyright (c) 2021 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2021 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. package cluster import ( @@ -28,16 +5,14 @@ import ( "testing" "time" - "go.temporal.io/server/common/dynamicconfig" - - "github.com/golang/mock/gomock" - "github.com/pborman/uuid" + "github.com/google/uuid" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - persistencespb "go.temporal.io/server/api/persistence/v1" + "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" "go.temporal.io/server/common/persistence" + "go.uber.org/mock/gomock" ) type ( @@ -76,29 +51,29 @@ func (s *metadataSuite) SetupTest() { s.isGlobalNamespaceEnabled = true s.failoverVersionIncrement = 100 - s.clusterName = uuid.New() - s.secondClusterName = uuid.New() - s.thirdClusterName = uuid.New() + s.clusterName = uuid.NewString() + s.secondClusterName = uuid.NewString() + s.thirdClusterName = uuid.NewString() clusterInfo := map[string]ClusterInformation{ s.clusterName: { Enabled: true, InitialFailoverVersion: int64(1), - RPCAddress: uuid.New(), + RPCAddress: uuid.NewString(), ShardCount: 1, version: 1, }, s.secondClusterName: { Enabled: true, InitialFailoverVersion: int64(4), - RPCAddress: uuid.New(), + RPCAddress: uuid.NewString(), ShardCount: 2, version: 1, }, s.thirdClusterName: { Enabled: true, InitialFailoverVersion: int64(5), - RPCAddress: uuid.New(), + RPCAddress: uuid.NewString(), ShardCount: 1, version: 1, }, @@ -160,7 +135,7 @@ func (s *metadataSuite) Test_RegisterMetadataChangeCallback() { } func (s *metadataSuite) Test_RefreshClusterMetadata_Success() { - id := uuid.New() + id := uuid.NewString() s.metadata.clusterChangeCallback[id] = func(oldClusterMetadata map[string]*ClusterInformation, newClusterMetadata map[string]*ClusterInformation) { oldMetadata, ok := oldClusterMetadata[id] s.True(ok) @@ -194,7 +169,8 @@ func (s *metadataSuite) Test_RefreshClusterMetadata_Success() { IsConnectionEnabled: true, InitialFailoverVersion: 1, HistoryShardCount: 1, - ClusterAddress: uuid.New(), + ClusterAddress: uuid.NewString(), + HttpAddress: uuid.NewString(), }, Version: 1, }, @@ -205,7 +181,8 @@ func (s *metadataSuite) Test_RefreshClusterMetadata_Success() { IsConnectionEnabled: true, InitialFailoverVersion: 1, HistoryShardCount: 1, - ClusterAddress: uuid.New(), + ClusterAddress: uuid.NewString(), + HttpAddress: uuid.NewString(), Tags: map[string]string{"test": "test"}, }, Version: 2, @@ -217,7 +194,8 @@ func (s *metadataSuite) Test_RefreshClusterMetadata_Success() { IsConnectionEnabled: true, InitialFailoverVersion: 2, HistoryShardCount: 2, - ClusterAddress: uuid.New(), + ClusterAddress: uuid.NewString(), + HttpAddress: uuid.NewString(), Tags: map[string]string{"test": "test"}, }, Version: 2, @@ -233,7 +211,7 @@ func (s *metadataSuite) Test_RefreshClusterMetadata_Success() { func (s *metadataSuite) Test_ListAllClusterMetadataFromDB_Success() { nextPageSizeToken := []byte{1} - newClusterName := uuid.New() + newClusterName := uuid.NewString() s.mockClusterMetadataStore.EXPECT().ListClusterMetadata(gomock.Any(), &persistence.ListClusterMetadataRequest{ PageSize: defaultClusterMetadataPageSize, NextPageToken: nil, @@ -246,7 +224,8 @@ func (s *metadataSuite) Test_ListAllClusterMetadataFromDB_Success() { IsConnectionEnabled: true, InitialFailoverVersion: 1, HistoryShardCount: 1, - ClusterAddress: uuid.New(), + ClusterAddress: uuid.NewString(), + HttpAddress: uuid.NewString(), }, Version: 1, }, @@ -265,7 +244,8 @@ func (s *metadataSuite) Test_ListAllClusterMetadataFromDB_Success() { IsConnectionEnabled: true, InitialFailoverVersion: 2, HistoryShardCount: 2, - ClusterAddress: uuid.New(), + ClusterAddress: uuid.NewString(), + HttpAddress: uuid.NewString(), }, Version: 2, }, diff --git a/common/cluster/metadata_test_config.go b/common/cluster/metadata_test_config.go index b77b0c6553e..3f440ddd247 100644 --- a/common/cluster/metadata_test_config.go +++ b/common/cluster/metadata_test_config.go @@ -1,29 +1,9 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package cluster +import ( + "github.com/google/uuid" +) + const ( // TestCurrentClusterInitialFailoverVersion is initial failover version for current cluster TestCurrentClusterInitialFailoverVersion = int64(1) @@ -39,6 +19,10 @@ const ( TestCurrentClusterFrontendAddress = "127.0.0.1:7134" // TestAlternativeClusterFrontendAddress is the ip port address of alternative cluster TestAlternativeClusterFrontendAddress = "127.0.0.1:8134" + // TestCurrentClusterFrontendHTTPAddress is the ip port HTTP address of current cluster. Currently, gRPC port+10. See tests/onebox.go:FrontendHTTPAddress + TestCurrentClusterFrontendHTTPAddress = "127.0.0.1:7144" + // TestAlternativeClusterFrontendHTTPAddress is the ip port HTTP address of the alternative cluster. Currently, gRPC port+10. See tests/onebox.go:FrontendHTTPAddress + TestAlternativeClusterFrontendHTTPAddress = "127.0.0.1:8144" ) var ( @@ -50,13 +34,17 @@ var ( Enabled: true, InitialFailoverVersion: TestCurrentClusterInitialFailoverVersion, RPCAddress: TestCurrentClusterFrontendAddress, + HTTPAddress: TestCurrentClusterFrontendHTTPAddress, ShardCount: 8, + ClusterID: uuid.NewString(), }, TestAlternativeClusterName: { Enabled: true, InitialFailoverVersion: TestAlternativeClusterInitialFailoverVersion, RPCAddress: TestAlternativeClusterFrontendAddress, + HTTPAddress: TestAlternativeClusterFrontendHTTPAddress, ShardCount: 4, + ClusterID: uuid.NewString(), }, } @@ -68,6 +56,8 @@ var ( Enabled: true, InitialFailoverVersion: TestCurrentClusterInitialFailoverVersion, RPCAddress: TestCurrentClusterFrontendAddress, + HTTPAddress: TestCurrentClusterFrontendHTTPAddress, + ClusterID: uuid.NewString(), }, } ) diff --git a/common/codec/gob/gob.go b/common/codec/gob/gob.go deleted file mode 100644 index 85f2cf38f11..00000000000 --- a/common/codec/gob/gob.go +++ /dev/null @@ -1,76 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package gob - -import ( - "bytes" - "encoding/gob" - "errors" - "fmt" - "reflect" -) - -var errEmptyArgument = errors.New("length of input argument is 0") - -// Encoder is wrapper of gob encoder/decoder -type Encoder struct{} - -// NewGobEncoder create new Encoder -func NewGobEncoder() *Encoder { - return &Encoder{} -} - -// Encode one or more objects to binary -func (gobEncoder *Encoder) Encode(value ...interface{}) ([]byte, error) { - if len(value) == 0 { - return nil, errEmptyArgument - } - - var buf bytes.Buffer - enc := gob.NewEncoder(&buf) - for i, obj := range value { - if err := enc.Encode(obj); err != nil { - return nil, fmt.Errorf( - "unable to encode argument: %d, %v, with gob error: %v", i, reflect.TypeOf(obj), err) - } - } - return buf.Bytes(), nil -} - -// Decode binary to one or more objects -func (gobEncoder *Encoder) Decode(input []byte, valuePtr ...interface{}) error { - if len(valuePtr) == 0 { - return errEmptyArgument - } - - dec := gob.NewDecoder(bytes.NewBuffer(input)) - for i, obj := range valuePtr { - if err := dec.Decode(obj); err != nil { - return fmt.Errorf( - "unable to decode argument: %d, %v, with gob error: %v", i, reflect.TypeOf(obj), err) - } - } - return nil -} diff --git a/common/codec/gob/gob_test.go b/common/codec/gob/gob_test.go deleted file mode 100644 index 17bea768def..00000000000 --- a/common/codec/gob/gob_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package gob - -import ( - "testing" - "time" - - "github.com/pborman/uuid" - "github.com/stretchr/testify/require" -) - -type testStruct struct { - Namespace string - WorkflowID string - RunID string - StartTime time.Time -} - -func TestGobEncoder(t *testing.T) { - encoder := NewGobEncoder() - - namespace := "test-namespace" - wid := uuid.New() - rid := uuid.New() - startTime := time.Now().UTC() - - // test encode and decode 1 object - msg := &testStruct{ - Namespace: namespace, - WorkflowID: wid, - RunID: rid, - StartTime: startTime, - } - payload, err := encoder.Encode(msg) - require.NoError(t, err) - var decoded *testStruct - err = encoder.Decode(payload, &decoded) - require.NoError(t, err) - require.Equal(t, msg, decoded) - - // test encode and decode 2 objects - msg2 := "test-string" - payload, err = encoder.Encode(msg2, msg) - require.NoError(t, err) - var decoded2 string - err = encoder.Decode(payload, &decoded2, &decoded) - require.NoError(t, err) - require.Equal(t, msg, decoded) - require.Equal(t, msg2, decoded2) - - // test encode and decode 0 object - _, err = encoder.Encode() - require.Error(t, err) - err = encoder.Decode(payload) - require.Error(t, err) -} diff --git a/common/codec/jsonpb.go b/common/codec/jsonpb.go index 6915b229ec2..79e04552c08 100644 --- a/common/codec/jsonpb.go +++ b/common/codec/jsonpb.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package codec import ( @@ -118,7 +94,7 @@ func (e *JSONPBEncoder) encodeSlice( ) ([]byte, error) { var buf bytes.Buffer buf.WriteString("[") - for i := 0; i < len; i++ { + for i := range len { pb := item(i) bs, err := e.marshaler.Marshal(pb) if err != nil { @@ -126,12 +102,11 @@ func (e *JSONPBEncoder) encodeSlice( } buf.Write(bs) - if i == len-1 { - buf.WriteString("]") - } else { + if i < len-1 { buf.WriteString(",") } } + buf.WriteString("]") return buf.Bytes(), nil } diff --git a/common/codec/jsonpb_test.go b/common/codec/jsonpb_test.go index 7496cfcc471..925f85bef5f 100644 --- a/common/codec/jsonpb_test.go +++ b/common/codec/jsonpb_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package codec import ( @@ -30,13 +6,11 @@ import ( "time" "github.com/stretchr/testify/suite" - "google.golang.org/protobuf/types/known/timestamppb" - commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" historypb "go.temporal.io/api/history/v1" - "go.temporal.io/server/common/testing/protoassert" + "google.golang.org/protobuf/types/known/timestamppb" ) type ( @@ -81,14 +55,14 @@ func (s *jsonpbEncoderSuite) SetupSuite() { func (s *jsonpbEncoderSuite) TestEncode() { json, err := s.encoder.Encode(history) - s.Nil(err) + s.Require().NoError(err) s.JSONEq(encodedHistory, string(json)) } func (s *jsonpbEncoderSuite) TestDecode() { var val historypb.History err := s.encoder.Decode([]byte(encodedHistory), &val) - s.Nil(err) + s.Require().NoError(err) protoassert.ProtoEqual(s.T(), &val, history) } @@ -99,10 +73,18 @@ func (s *jsonpbEncoderSuite) TestEncodeHistories() { histories = append(histories, history) json, err := s.encoder.EncodeHistories(histories) - s.Nil(err) + s.Require().NoError(err) s.JSONEq(fmt.Sprintf("[%[1]s,%[1]s,%[1]s]", encodedHistory), string(json)) } +func (s *jsonpbEncoderSuite) TestEncodeEmptyHistories() { + var histories []*historypb.History + + json, err := s.encoder.EncodeHistories(histories) + s.Require().NoError(err) + s.JSONEq("[]", string(json)) +} + func (s *jsonpbEncoderSuite) TestDecodeHistories() { historyJSON := fmt.Sprintf("[%[1]s,%[1]s,%[1]s]", encodedHistory) @@ -113,7 +95,7 @@ func (s *jsonpbEncoderSuite) TestDecodeHistories() { decodedHistories, err := s.encoder.DecodeHistories([]byte(historyJSON)) - s.Nil(err) + s.Require().NoError(err) protoassert.ProtoSliceEqual(s.T(), histories, decodedHistories) } @@ -127,6 +109,6 @@ func (s *jsonpbEncoderSuite) TestDecodeOldHistories() { decodedHistories, err := s.encoder.DecodeHistories([]byte(historyJSON)) - s.Nil(err) + s.Require().NoError(err) protoassert.ProtoSliceEqual(s.T(), historyEvents, decodedHistories) } diff --git a/common/collection/concurrent_tx_map.go b/common/collection/concurrent_tx_map.go index 7fbaadc8af7..2cc7324f8a2 100644 --- a/common/collection/concurrent_tx_map.go +++ b/common/collection/concurrent_tx_map.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import ( @@ -60,7 +36,7 @@ type ( // of thread safe map mapShard struct { sync.RWMutex - items map[interface{}]interface{} + items map[any]any } ) @@ -86,10 +62,10 @@ func NewShardedConcurrentTxMap(initialCap int, hashfn HashFunc) ConcurrentTxMap } // Get returns the value corresponding to the key, if it exist -func (cmap *ShardedConcurrentTxMap) Get(key interface{}) (interface{}, bool) { +func (cmap *ShardedConcurrentTxMap) Get(key any) (any, bool) { shard := cmap.getShard(key) var ok bool - var value interface{} + var value any shard.RLock() if shard.items != nil { value, ok = shard.items[key] @@ -99,13 +75,13 @@ func (cmap *ShardedConcurrentTxMap) Get(key interface{}) (interface{}, bool) { } // Contains returns true if the key exist and false otherwise -func (cmap *ShardedConcurrentTxMap) Contains(key interface{}) bool { +func (cmap *ShardedConcurrentTxMap) Contains(key any) bool { _, ok := cmap.Get(key) return ok } // Put records the given key value mapping. Overwrites previous values -func (cmap *ShardedConcurrentTxMap) Put(key interface{}, value interface{}) { +func (cmap *ShardedConcurrentTxMap) Put(key any, value any) { shard := cmap.getShard(key) shard.Lock() cmap.lazyInitShard(shard) @@ -119,7 +95,7 @@ func (cmap *ShardedConcurrentTxMap) Put(key interface{}, value interface{}) { // PutIfNotExist records the mapping, if there is no mapping for this key already // Returns true if the mapping was recorded, false otherwise -func (cmap *ShardedConcurrentTxMap) PutIfNotExist(key interface{}, value interface{}) bool { +func (cmap *ShardedConcurrentTxMap) PutIfNotExist(key any, value any) bool { shard := cmap.getShard(key) var ok bool shard.Lock() @@ -134,7 +110,7 @@ func (cmap *ShardedConcurrentTxMap) PutIfNotExist(key interface{}, value interfa } // Remove deletes the given key from the map -func (cmap *ShardedConcurrentTxMap) Remove(key interface{}) { +func (cmap *ShardedConcurrentTxMap) Remove(key any) { shard := cmap.getShard(key) shard.Lock() cmap.lazyInitShard(shard) @@ -148,9 +124,9 @@ func (cmap *ShardedConcurrentTxMap) Remove(key interface{}) { // GetAndDo returns the value corresponding to the key, and apply fn to key value before return value // return (value, value exist or not, error when evaluation fn) -func (cmap *ShardedConcurrentTxMap) GetAndDo(key interface{}, fn ActionFunc) (interface{}, bool, error) { +func (cmap *ShardedConcurrentTxMap) GetAndDo(key any, fn ActionFunc) (any, bool, error) { shard := cmap.getShard(key) - var value interface{} + var value any var ok bool var err error shard.Lock() @@ -166,7 +142,7 @@ func (cmap *ShardedConcurrentTxMap) GetAndDo(key interface{}, fn ActionFunc) (in // PutOrDo put the key value in the map, if key does not exists, otherwise, call fn with existing key and value // return (value, fn evaluated or not, error when evaluation fn) -func (cmap *ShardedConcurrentTxMap) PutOrDo(key interface{}, value interface{}, fn ActionFunc) (interface{}, bool, error) { +func (cmap *ShardedConcurrentTxMap) PutOrDo(key any, value any, fn ActionFunc) (any, bool, error) { shard := cmap.getShard(key) var err error shard.Lock() @@ -184,7 +160,7 @@ func (cmap *ShardedConcurrentTxMap) PutOrDo(key interface{}, value interface{}, } // RemoveIf deletes the given key from the map if fn return true -func (cmap *ShardedConcurrentTxMap) RemoveIf(key interface{}, fn PredicateFunc) bool { +func (cmap *ShardedConcurrentTxMap) RemoveIf(key any, fn PredicateFunc) bool { shard := cmap.getShard(key) var removed bool shard.Lock() @@ -220,7 +196,7 @@ func (cmap *ShardedConcurrentTxMap) Iter() MapIterator { iterator.stopCh = make(chan struct{}) go func(iterator *mapIteratorImpl) { - for i := 0; i < nShards; i++ { + for i := range nShards { cmap.shards[i].RLock() for k, v := range cmap.shards[i].items { entry := &MapEntry{Key: k, Value: v} @@ -245,13 +221,13 @@ func (cmap *ShardedConcurrentTxMap) Len() int { return int(atomic.LoadInt32(&cmap.size)) } -func (cmap *ShardedConcurrentTxMap) getShard(key interface{}) *mapShard { +func (cmap *ShardedConcurrentTxMap) getShard(key any) *mapShard { shardIdx := cmap.hashfn(key) % nShards return &cmap.shards[shardIdx] } func (cmap *ShardedConcurrentTxMap) lazyInitShard(shard *mapShard) { if shard.items == nil { - shard.items = make(map[interface{}]interface{}, cmap.initialCap) + shard.items = make(map[any]any, cmap.initialCap) } } diff --git a/common/collection/concurrent_tx_map_test.go b/common/collection/concurrent_tx_map_test.go index d99ba44e648..026003ea2f4 100644 --- a/common/collection/concurrent_tx_map_test.go +++ b/common/collection/concurrent_tx_map_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import ( @@ -31,7 +7,7 @@ import ( "sync/atomic" "testing" - "github.com/pborman/uuid" + "github.com/google/uuid" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" ) @@ -79,11 +55,11 @@ func (s *ConcurrentTxMapSuite) TestLen() { func (s *ConcurrentTxMapSuite) TestGetAndDo() { testMap := NewShardedConcurrentTxMap(1, UUIDHashCode) - key := uuid.New() + key := uuid.NewString() var value intType fnApplied := false - interf, ok, err := testMap.GetAndDo(key, func(key interface{}, value interface{}) error { + interf, ok, err := testMap.GetAndDo(key, func(key any, value any) error { fnApplied = true return nil }) @@ -94,7 +70,7 @@ func (s *ConcurrentTxMapSuite) TestGetAndDo() { value = intType(1) testMap.Put(key, &value) - interf, ok, err = testMap.GetAndDo(key, func(key interface{}, value interface{}) error { + interf, ok, err = testMap.GetAndDo(key, func(key any, value any) error { fnApplied = true intValue := value.(*intType) *intValue++ @@ -110,12 +86,12 @@ func (s *ConcurrentTxMapSuite) TestGetAndDo() { func (s *ConcurrentTxMapSuite) TestPutOrDo() { testMap := NewShardedConcurrentTxMap(1, UUIDHashCode) - key := uuid.New() + key := uuid.NewString() var value intType fnApplied := false value = intType(1) - interf, ok, err := testMap.PutOrDo(key, &value, func(key interface{}, value interface{}) error { + interf, ok, err := testMap.PutOrDo(key, &value, func(key any, value any) error { fnApplied = true return errors.New("some err") }) @@ -126,7 +102,7 @@ func (s *ConcurrentTxMapSuite) TestPutOrDo() { s.False(fnApplied, "PutOrDo should not apply function when key not exixts") anotherValue := intType(111) - interf, ok, err = testMap.PutOrDo(key, &anotherValue, func(key interface{}, value interface{}) error { + interf, ok, err = testMap.PutOrDo(key, &anotherValue, func(key any, value any) error { fnApplied = true intValue := value.(*intType) *intValue++ @@ -141,18 +117,18 @@ func (s *ConcurrentTxMapSuite) TestPutOrDo() { func (s *ConcurrentTxMapSuite) TestRemoveIf() { testMap := NewShardedConcurrentTxMap(1, UUIDHashCode) - key := uuid.New() + key := uuid.NewString() value := intType(1) testMap.Put(key, &value) - removed := testMap.RemoveIf(key, func(key interface{}, value interface{}) bool { + removed := testMap.RemoveIf(key, func(key any, value any) bool { intValue := value.(*intType) return *intValue == intType(2) }) s.Equal(1, testMap.Len(), "TestRemoveIf should only entry if condition is met") s.False(removed, "TestRemoveIf should return false if key is not deleted") - removed = testMap.RemoveIf(key, func(key interface{}, value interface{}) bool { + removed = testMap.RemoveIf(key, func(key any, value any) bool { intValue := value.(*intType) return *intValue == intType(1) }) @@ -165,8 +141,8 @@ func (s *ConcurrentTxMapSuite) TestGetAfterPut() { countMap := make(map[string]int) testMap := NewShardedConcurrentTxMap(1, UUIDHashCode) - for i := 0; i < 1024; i++ { - key := uuid.New() + for range 1024 { + key := uuid.NewString() countMap[key] = 0 testMap.Put(key, boolType(true)) } @@ -199,7 +175,7 @@ func (s *ConcurrentTxMapSuite) TestGetAfterPut() { func (s *ConcurrentTxMapSuite) TestPutIfNotExist() { testMap := NewShardedConcurrentTxMap(1, UUIDHashCode) - key := uuid.New() + key := uuid.NewString() ok := testMap.PutIfNotExist(key, boolType(true)) s.True(ok, "PutIfNotExist failed to insert item") ok = testMap.PutIfNotExist(key, boolType(true)) @@ -209,8 +185,8 @@ func (s *ConcurrentTxMapSuite) TestPutIfNotExist() { func (s *ConcurrentTxMapSuite) TestMapConcurrency() { nKeys := 1024 keys := make([]string, nKeys) - for i := 0; i < nKeys; i++ { - keys[i] = uuid.New() + for i := range nKeys { + keys[i] = uuid.NewString() } var total int32 @@ -220,13 +196,13 @@ func (s *ConcurrentTxMapSuite) TestMapConcurrency() { startWG.Add(1) - for i := 0; i < 10; i++ { + for range 10 { doneWG.Add(1) go func() { startWG.Wait() - for n := 0; n < nKeys; n++ { + for n := range nKeys { val := intType(rand.Int()) if testMap.PutIfNotExist(keys[n], val) { atomic.AddInt32(&total, int32(val)) @@ -244,7 +220,7 @@ func (s *ConcurrentTxMapSuite) TestMapConcurrency() { s.Equal(nKeys, testMap.Len(), "Wrong concurrent map size") var gotTotal int32 - for i := 0; i < nKeys; i++ { + for i := range nKeys { v, ok := testMap.Get(keys[i]) s.True(ok, "Get failed to find previously inserted key") intVal := v.(intType) diff --git a/common/collection/indexedtakelist.go b/common/collection/indexedtakelist.go index 772ebb093f3..14cb54f9c72 100644 --- a/common/collection/indexedtakelist.go +++ b/common/collection/indexedtakelist.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection // IndexedTakeList holds a set of values that can only be observed by being diff --git a/common/collection/indexedtakelist_test.go b/common/collection/indexedtakelist_test.go index 3ba6b463b25..3e4f46da49d 100644 --- a/common/collection/indexedtakelist_test.go +++ b/common/collection/indexedtakelist_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection_test import ( diff --git a/common/collection/interface.go b/common/collection/interface.go index 5ec550f9302..f07b90e2e19 100644 --- a/common/collection/interface.go +++ b/common/collection/interface.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection type ( @@ -40,37 +16,37 @@ type ( } // HashFunc represents a hash function for string - HashFunc func(interface{}) uint32 + HashFunc func(any) uint32 // ActionFunc take a key and value, do calculation and return err - ActionFunc func(key interface{}, value interface{}) error + ActionFunc func(key any, value any) error // PredicateFunc take a key and value, do calculation and return boolean - PredicateFunc func(key interface{}, value interface{}) bool + PredicateFunc func(key any, value any) bool // ConcurrentTxMap is a generic interface for any implementation of a dictionary // or a key value lookup table that is thread safe, and providing functionality // to modify key / value pair inside within a transaction ConcurrentTxMap interface { // Get returns the value for the given key - Get(key interface{}) (interface{}, bool) + Get(key any) (any, bool) // Contains returns true if the key exist and false otherwise - Contains(key interface{}) bool + Contains(key any) bool // Put records the mapping from given key to value - Put(key interface{}, value interface{}) + Put(key any, value any) // PutIfNotExist records the key value mapping only // if the mapping does not already exist - PutIfNotExist(key interface{}, value interface{}) bool + PutIfNotExist(key any, value any) bool // Remove deletes the key from the map - Remove(key interface{}) + Remove(key any) // GetAndDo returns the value corresponding to the key, and apply fn to key value before return value // return (value, value exist or not, error when evaluation fn) - GetAndDo(key interface{}, fn ActionFunc) (interface{}, bool, error) + GetAndDo(key any, fn ActionFunc) (any, bool, error) // PutOrDo put the key value in the map, if key does not exists, otherwise, call fn with existing key and value // return (value, fn evaluated or not, error when evaluation fn) - PutOrDo(key interface{}, value interface{}, fn ActionFunc) (interface{}, bool, error) + PutOrDo(key any, value any, fn ActionFunc) (any, bool, error) // RemoveIf deletes the given key from the map if fn return true // return whether the key is removed or not - RemoveIf(key interface{}, fn PredicateFunc) bool + RemoveIf(key any, fn PredicateFunc) bool // Iter returns an iterator to the map Iter() MapIterator // Len returns the number of items in the map @@ -90,9 +66,9 @@ type ( // MapEntry represents a key-value entry within the map MapEntry struct { // Key represents the key - Key interface{} + Key any // Value represents the value - Value interface{} + Value any } ) diff --git a/common/collection/iterator.go b/common/collection/iterator.go index 91c666ffe79..6709a635fd3 100644 --- a/common/collection/iterator.go +++ b/common/collection/iterator.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection type ( diff --git a/common/collection/oncemap.go b/common/collection/oncemap.go index 407942cdb90..cb6ac74e08d 100644 --- a/common/collection/oncemap.go +++ b/common/collection/oncemap.go @@ -1,26 +1,3 @@ -// The MIT License - -// -// Copyright (c) 2024 Temporal Technologies Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import "sync" @@ -42,7 +19,41 @@ func NewOnceMap[K comparable, T any](construct func(K) T) *OnceMap[K, T] { } } -func (p *OnceMap[K, T]) Get(key K) T { +func (m *OnceMap[K, T]) Get(key K) T { + m.mu.RLock() + value, ok := m.inner[key] + m.mu.RUnlock() + if !ok { + m.mu.Lock() + defer m.mu.Unlock() + if value, ok = m.inner[key]; !ok { + value = m.construct(key) + m.inner[key] = value + } + } + + return value +} + +// FallibleOnceMap is a concurrent map which lazily constructs its values. Map values are initialized on-the-fly, using +// a provided construction function only when a key is accessed for the first time. +// If the construct function returns an error, the value is not cached. +type FallibleOnceMap[K comparable, T any] struct { + mu sync.RWMutex + inner map[K]T + construct func(K) (T, error) +} + +// NewFallibleOnceMap creates a [FallibleOnceMap] from a given construct function. +// construct should be kept light as it is called while holding a lock on the entire map. +func NewFallibleOnceMap[K comparable, T any](construct func(K) (T, error)) *FallibleOnceMap[K, T] { + return &FallibleOnceMap[K, T]{ + construct: construct, + inner: make(map[K]T, 0), + } +} + +func (p *FallibleOnceMap[K, T]) Get(key K) (T, error) { p.mu.RLock() value, ok := p.inner[key] p.mu.RUnlock() @@ -50,10 +61,24 @@ func (p *OnceMap[K, T]) Get(key K) T { p.mu.Lock() defer p.mu.Unlock() if value, ok = p.inner[key]; !ok { - value = p.construct(key) + var err error + value, err = p.construct(key) + if err != nil { + return value, err + } p.inner[key] = value } } - return value + return value, nil +} + +func (p *FallibleOnceMap[K, T]) Pop(key K) (T, bool) { + p.mu.Lock() + defer p.mu.Unlock() + val, ok := p.inner[key] + if ok { + delete(p.inner, key) + } + return val, ok } diff --git a/common/collection/oncemap_test.go b/common/collection/oncemap_test.go index 8593186153b..73b08db37a5 100644 --- a/common/collection/oncemap_test.go +++ b/common/collection/oncemap_test.go @@ -1,26 +1,3 @@ -// The MIT License - -// -// Copyright (c) 2024 Temporal Technologies Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import ( diff --git a/common/collection/paging_iterator.go b/common/collection/paging_iterator.go index fd3730fa21a..5fa1683718d 100644 --- a/common/collection/paging_iterator.go +++ b/common/collection/paging_iterator.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection type ( diff --git a/common/collection/paging_iterator_test.go b/common/collection/paging_iterator_test.go index d7c8df90572..e788af230f0 100644 --- a/common/collection/paging_iterator_test.go +++ b/common/collection/paging_iterator_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import ( @@ -106,7 +82,7 @@ func (s *pagingIteratorSuite) TestIteration_NoErr() { func (s *pagingIteratorSuite) TestIteration_Err_Beginging() { phase := 0 - ite := NewPagingIterator(func(token []byte) ([]interface{}, []byte, error) { + ite := NewPagingIterator(func(token []byte) ([]any, []byte, error) { switch phase { case 0: defer func() { phase++ }() @@ -126,13 +102,13 @@ func (s *pagingIteratorSuite) TestIteration_Err_Beginging() { func (s *pagingIteratorSuite) TestIteration_Err_NotBegining() { phase := 0 - outputs := [][]interface{}{ + outputs := [][]any{ {1, 2, 3, 4, 5}, } tokens := [][]byte{ []byte("some random token 1"), } - pagingFn := func(token []byte) ([]interface{}, []byte, error) { + pagingFn := func(token []byte) ([]any, []byte, error) { switch phase { case 0: s.Equal(0, len(token)) diff --git a/common/collection/priority_queue.go b/common/collection/priority_queue.go index c80b9a87b2c..f499bbcf813 100644 --- a/common/collection/priority_queue.go +++ b/common/collection/priority_queue.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import ( @@ -102,12 +78,12 @@ func (pq *priorityQueueImpl[T]) Swap(i, j int) { } // Push push an item to priority queue, used by go internal heap implementation -func (pq *priorityQueueImpl[T]) Push(item interface{}) { +func (pq *priorityQueueImpl[T]) Push(item any) { pq.items = append(pq.items, item.(T)) } // Pop pop an item from priority queue, used by go internal heap implementation -func (pq *priorityQueueImpl[T]) Pop() interface{} { +func (pq *priorityQueueImpl[T]) Pop() any { pqItem := pq.items[pq.Len()-1] pq.items = pq.items[0 : pq.Len()-1] return pqItem diff --git a/common/collection/priority_queue_test.go b/common/collection/priority_queue_test.go index 789ab752cb2..87757263e78 100644 --- a/common/collection/priority_queue_test.go +++ b/common/collection/priority_queue_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import ( @@ -118,11 +94,11 @@ func (s *PriorityQueueSuite) TestInsertAndPop() { } func (s *PriorityQueueSuite) TestRandomNumber() { - for round := 0; round < 1000; round++ { + for range 1000 { expected := []int{} result := []int{} - for i := 0; i < 1000; i++ { + for range 1000 { num := rand.Int() s.pq.Add(&testPriorityQueueItem{num}) expected = append(expected, num) diff --git a/common/collection/sorted_set_manager.go b/common/collection/sorted_set_manager.go index ee6de2e7b61..b9be069cee7 100644 --- a/common/collection/sorted_set_manager.go +++ b/common/collection/sorted_set_manager.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2024 Temporal Technologies Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import "slices" diff --git a/common/collection/sorted_set_manager_test.go b/common/collection/sorted_set_manager_test.go index 49036f6d8c8..a808c3fc5de 100644 --- a/common/collection/sorted_set_manager_test.go +++ b/common/collection/sorted_set_manager_test.go @@ -1,25 +1,3 @@ -// The MIT License -// -// Copyright (c) 2024 Temporal Technologies Inc. All rights reserved. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection_test import ( diff --git a/common/collection/sync_map.go b/common/collection/sync_map.go new file mode 100644 index 00000000000..900e6d9779f --- /dev/null +++ b/common/collection/sync_map.go @@ -0,0 +1,77 @@ +package collection + +import ( + "maps" + "sync" +) + +// SyncMap implements a simple mutex-wrapped map. SyncMap is copyable like a normal map[K]V. +type SyncMap[K comparable, V any] struct { + // Use a pointer to RWMutex instead of embedding so that the contents of this struct itself + // are immutable and copyable, and copies refer to the same RWMutex and map. + *sync.RWMutex + // For the same reason, contents (the pointer) should not be changed. + contents map[K]V +} + +func NewSyncMap[K comparable, V any]() SyncMap[K, V] { + return SyncMap[K, V]{ + RWMutex: &sync.RWMutex{}, + contents: make(map[K]V), + } +} + +func (m *SyncMap[K, V]) Get(key K) (value V, ok bool) { + m.RLock() + defer m.RUnlock() + value, ok = m.contents[key] + return +} + +func (m *SyncMap[K, V]) GetOrSet(key K, value V) (v V, exist bool) { + m.RLock() + currentValue, ok := m.contents[key] + m.RUnlock() + if ok { + return currentValue, ok + } + + m.Lock() + defer m.Unlock() + currentValue, ok = m.contents[key] + if ok { + return currentValue, ok + } + m.contents[key] = value + return value, false +} + +func (m *SyncMap[K, V]) Set(key K, value V) { + m.Lock() + defer m.Unlock() + m.contents[key] = value +} + +func (m *SyncMap[K, V]) Delete(key K) { + m.Lock() + defer m.Unlock() + delete(m.contents, key) +} + +func (m *SyncMap[K, V]) Pop(key K) (value V, ok bool) { + m.Lock() + defer m.Unlock() + value, ok = m.contents[key] + if ok { + delete(m.contents, key) + } + return value, ok +} + +func (m *SyncMap[K, V]) PopAll() map[K]V { + m.Lock() + defer m.Unlock() + contents := maps.Clone(m.contents) + clear(m.contents) + return contents +} diff --git a/common/collection/sync_map_test.go b/common/collection/sync_map_test.go new file mode 100644 index 00000000000..87521542f79 --- /dev/null +++ b/common/collection/sync_map_test.go @@ -0,0 +1,145 @@ +package collection_test + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "go.temporal.io/server/common/collection" +) + +// This isn't exhaustive but serves as a basic stress test to ensure our implementation is collection +func TestMap_MultiThreaded(t *testing.T) { + m := collection.NewSyncMap[int, int]() + var wg sync.WaitGroup + barrier := make(chan struct{}) + wg.Add(5) + go func() { + defer wg.Done() + <-barrier + for i := range 1000 { + m.Set(i, i) + } + }() + go func() { + <-barrier + defer wg.Done() + for i := range 1000 { + m.Get(i) + } + }() + go func() { + <-barrier + defer wg.Done() + for i := range 1000 { + m.GetOrSet(i, i) + } + }() + go func() { + <-barrier + defer wg.Done() + for i := range 1000 { + m.Pop(i) + } + }() + go func() { + <-barrier + defer wg.Done() + for range 1000 { + m.PopAll() + } + }() + close(barrier) + wg.Wait() +} + +func TestMap_Get(t *testing.T) { + m := collection.NewSyncMap[int, int]() + m.Set(1, 1) + v, ok := m.Get(1) + if !ok { + t.Error("Expected true, got false") + } + if v != 1 { + t.Errorf("Expected 1, got %v", v) + } +} + +func TestMap_GetOrSet(t *testing.T) { + m := collection.NewSyncMap[int, int]() + m.Set(1, 1) + v, ok := m.GetOrSet(1, 2) + assert.True(t, ok, "expected exist key") + assert.Equal(t, 1, v, "expected the existing value") + + v, ok = m.GetOrSet(2, 2) + assert.False(t, ok, "expected non exist key") + assert.Equal(t, 2, v, "expected the new set value") + + v, ok = m.Get(2) + assert.True(t, ok, "expected exist key") + assert.Equal(t, 2, v, "expected the existing value") +} + +func TestMap_Delete(t *testing.T) { + m := collection.NewSyncMap[int, int]() + m.Set(1, 1) + m.Set(2, 1) + m.Delete(1) + _, ok := m.Get(1) + if ok { + t.Error("Expected false, got true") + } + v, ok := m.Get(2) + if !ok { + t.Error("Expected true, got false") + } + if v != 1 { + t.Errorf("Expected 1, got %v", v) + } +} + +func TestMap_Pop_ReturnsFalseWhenKeyDoesNotExist(t *testing.T) { + m := collection.NewSyncMap[int, int]() + _, ok := m.Pop(1) + if ok { + t.Error("Expected false, got true") + } +} + +func TestMap_Pop_ReturnsTrueWhenKeyExists(t *testing.T) { + m := collection.NewSyncMap[int, int]() + m.Set(1, 1) + v, ok := m.Pop(1) + if !ok { + t.Error("Expected true, got false") + } + if v != 1 { + t.Errorf("Expected 1, got %v", v) + } +} + +func TestMap_PopAll(t *testing.T) { + m := collection.NewSyncMap[int, int]() + values := m.PopAll() + assert.Equal(t, 0, len(values)) + + m.Set(1, 1) + m.Set(2, 2) + m.Set(3, 3) + m.Set(4, 4) + m.Pop(4) + + mCopy := m + + values = m.PopAll() + assert.Equal(t, 3, len(values)) + sum := 0 + for _, v := range values { + sum += v + } + assert.Equal(t, 6, sum) + + _, ok := mCopy.Get(3) + assert.False(t, ok, "SyncMap is not correctly copyable") +} diff --git a/common/collection/syncmap.go b/common/collection/syncmap.go deleted file mode 100644 index f7c9799de04..00000000000 --- a/common/collection/syncmap.go +++ /dev/null @@ -1,70 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package collection - -import "sync" - -// SyncMap implements a simple mutex-wrapped map. We've had bugs where we took the wrong lock -// when reimplementing this pattern, so it's worth having a single canonical implementation. -type SyncMap[K comparable, V any] struct { - *sync.RWMutex - contents map[K]V -} - -func NewSyncMap[K comparable, V any]() SyncMap[K, V] { - return SyncMap[K, V]{ - RWMutex: &sync.RWMutex{}, - contents: make(map[K]V), - } -} - -func (m *SyncMap[K, V]) Get(key K) (value V, ok bool) { - m.RLock() - defer m.RUnlock() - value, ok = m.contents[key] - return -} - -func (m *SyncMap[K, V]) Set(key K, value V) { - m.Lock() - defer m.Unlock() - m.contents[key] = value -} - -func (m *SyncMap[K, V]) Delete(key K) { - m.Lock() - defer m.Unlock() - delete(m.contents, key) -} - -func (m *SyncMap[K, V]) Pop(key K) (value V, ok bool) { - m.Lock() - defer m.Unlock() - value, ok = m.contents[key] - if ok { - delete(m.contents, key) - } - return value, ok -} diff --git a/common/collection/syncmap_test.go b/common/collection/syncmap_test.go deleted file mode 100644 index 55cbe7a4c2e..00000000000 --- a/common/collection/syncmap_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package collection_test - -import ( - "sync" - "testing" - - "go.temporal.io/server/common/collection" -) - -// This isn't exhaustive but serves as a basic stress test to ensure our implementation is collection -func TestMap_MultiThreaded(t *testing.T) { - m := collection.NewSyncMap[int, int]() - var wg sync.WaitGroup - barrier := make(chan struct{}) - wg.Add(3) - go func() { - defer wg.Done() - <-barrier - for i := 0; i < 1000; i++ { - m.Set(i, i) - } - }() - go func() { - <-barrier - defer wg.Done() - for i := 0; i < 1000; i++ { - m.Get(i) - } - }() - go func() { - <-barrier - defer wg.Done() - for i := 0; i < 1000; i++ { - m.Pop(i) - } - }() - close(barrier) - wg.Wait() -} - -func TestMap_Get(t *testing.T) { - m := collection.NewSyncMap[int, int]() - m.Set(1, 1) - v, ok := m.Get(1) - if !ok { - t.Error("Expected true, got false") - } - if v != 1 { - t.Errorf("Expected 1, got %v", v) - } -} - -func TestMap_Delete(t *testing.T) { - m := collection.NewSyncMap[int, int]() - m.Set(1, 1) - m.Set(2, 1) - m.Delete(1) - _, ok := m.Get(1) - if ok { - t.Error("Expected false, got true") - } - v, ok := m.Get(2) - if !ok { - t.Error("Expected true, got false") - } - if v != 1 { - t.Errorf("Expected 1, got %v", v) - } -} - -func TestMap_Pop_ReturnsFalseWhenKeyDoesNotExist(t *testing.T) { - m := collection.NewSyncMap[int, int]() - _, ok := m.Pop(1) - if ok { - t.Error("Expected false, got true") - } -} - -func TestMap_Pop_ReturnsTrueWhenKeyExists(t *testing.T) { - m := collection.NewSyncMap[int, int]() - m.Set(1, 1) - v, ok := m.Pop(1) - if !ok { - t.Error("Expected true, got false") - } - if v != 1 { - t.Errorf("Expected 1, got %v", v) - } -} diff --git a/common/collection/util.go b/common/collection/util.go index 1e1ea2bc57d..d49681d8ea7 100644 --- a/common/collection/util.go +++ b/common/collection/util.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package collection import ( @@ -32,7 +8,7 @@ import ( // UUIDHashCode is a hash function for hashing string uuid // if the uuid is malformed, then the hash function always // returns 0 as the hash value -func UUIDHashCode(input interface{}) uint32 { +func UUIDHashCode(input any) uint32 { key, ok := input.(string) if !ok { return 0 diff --git a/common/config/archival.go b/common/config/archival.go index e980701ecaa..251fa080f81 100644 --- a/common/config/archival.go +++ b/common/config/archival.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config import ( diff --git a/common/config/config.go b/common/config/config.go index e126f8823a9..9dde68338ba 100644 --- a/common/config/config.go +++ b/common/config/config.go @@ -1,37 +1,13 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config import ( "bytes" "fmt" + "math" "strings" "time" - "gopkg.in/yaml.v3" - + "github.com/jmoiron/sqlx" "go.temporal.io/server/common/auth" "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/dynamicconfig" @@ -41,6 +17,12 @@ import ( "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" "go.temporal.io/server/common/primitives" "go.temporal.io/server/common/telemetry" + "google.golang.org/grpc/keepalive" + "gopkg.in/yaml.v3" +) + +const ( + infinity = time.Duration(math.MaxInt64) ) type ( @@ -69,6 +51,8 @@ type ( NamespaceDefaults NamespaceDefaults `yaml:"namespaceDefaults"` // ExporterConfig allows the specification of process-wide OTEL exporters ExporterConfig telemetry.ExportConfig `yaml:"otel"` + // Visibility related config + Visibility Visibility `yaml:"visibility"` } // Service contains the service specific config items @@ -104,8 +88,42 @@ type ( // disabled. This setting only applies to the frontend service. HTTPPort int `yaml:"httpPort"` // HTTPAdditionalForwardedHeaders adds additional headers to the default set - // forwarded from HTTP to gRPC. + // forwarded from HTTP to gRPC. Any value with a trailing * will match the prefix before + // the asterisk (eg. `x-internal-*`) HTTPAdditionalForwardedHeaders []string `yaml:"httpAdditionalForwardedHeaders"` + // KeepAliveServerConfig keep alive configuration for the server + KeepAliveServerConfig KeepAliveServerConfig `yaml:"keepAliveServerConfig"` + // ClientConnectionConfig defines the connection config used by other services + // when they create a gRPC client connection to this service. + ClientConnectionConfig ClientConnectionConfig `yaml:"clientConnectionConfig"` + } + + KeepAliveServerParameters struct { + MaxConnectionIdle *time.Duration `yaml:"maxConnectionIdle"` + MaxConnectionAge *time.Duration `yaml:"maxConnectionAge"` + MaxConnectionAgeGrace *time.Duration `yaml:"maxConnectionAgeGrace"` + Time *time.Duration `yaml:"keepAliveTime"` + Timeout *time.Duration `yaml:"keepAliveTimeout"` + } + + KeepAliveClientParameters struct { + Time *time.Duration `yaml:"keepAliveTime"` + Timeout *time.Duration `yaml:"keepAliveTimeout"` + PermitWithoutStream *bool `yaml:"keepAlivePermitWithoutStream"` + } + + ClientConnectionConfig struct { + KeepAliveClientConfig *KeepAliveClientParameters `yaml:"keepAliveClientParameters"` + } + + KeepAliveServerEnforcementPolicy struct { + MinTime *time.Duration `yaml:"minTime"` + PermitWithoutStream *bool `yaml:"permitWithoutStream"` + } + + KeepAliveServerConfig struct { + KeepAliveServerParameters *KeepAliveServerParameters `yaml:"keepAliveServerParameters"` + KeepAliveEnforcementPolicy *KeepAliveServerEnforcementPolicy `yaml:"keepAliveEnforcementPolicy"` } // Global contains config items that apply process-wide to all services @@ -243,9 +261,6 @@ type ( VisibilityStore string `yaml:"visibilityStore"` // SecondaryVisibilityStore is the name of the secondary datastore to be used for visibility records SecondaryVisibilityStore string `yaml:"secondaryVisibilityStore"` - // DEPRECATED: use VisibilityStore key instead of AdvancedVisibilityStore - // AdvancedVisibilityStore is the name of the datastore to be used for visibility records - AdvancedVisibilityStore string `yaml:"advancedVisibilityStore"` // NumHistoryShards is the desired number of history shards. This config doesn't // belong here, needs refactoring NumHistoryShards int32 `yaml:"numHistoryShards" validate:"nonzero"` @@ -270,15 +285,7 @@ type ( } FaultInjection struct { - // Rate is the probability that we will return an error from any call to any datastore. - // The value should be between 0.0 and 1.0. - // The fault injector will inject different errors depending on the data store and method. See the - // implementation for details. - // This field is ignored if Targets is non-empty. - Rate float64 `yaml:"rate"` - // Targets is a mapping of data store name to a targeted fault injection config for that data store. - // If Targets is non-empty, then Rate is ignored. // Here is an example config for targeted fault injection. This config will inject errors into the // UpdateShard method of the ShardStore at a rate of 100%. No other methods will be affected. /* @@ -292,6 +299,7 @@ type ( ShardOwnershipLostError: 1.0 # all UpdateShard calls will fail with ShardOwnershipLostError */ // This will cause the UpdateShard method of the ShardStore to always return ShardOwnershipLostError. + // See config/development-cass-es-fi.yaml for a more detailed example. Targets FaultInjectionTargets `yaml:"targets"` } @@ -346,6 +354,8 @@ type ( User string `yaml:"user"` // Password is the cassandra password used for authentication by gocql client Password string `yaml:"password"` + // AllowedAuthenticators is the optional list of authenticators the gocql client checks before approving the challenge request from the server. + AllowedAuthenticators []string `yaml:"allowedAuthenticators"` // keyspace is the cassandra keyspace Keyspace string `yaml:"keyspace" validate:"nonzero"` // Datacenter is the data center filter arg for cassandra @@ -354,6 +364,10 @@ type ( MaxConns int `yaml:"maxConns"` // ConnectTimeout is a timeout for initial dial to cassandra server (default: 600 milliseconds) ConnectTimeout time.Duration `yaml:"connectTimeout"` + // Timeout is a timeout for reads and, unless otherwise specified, writes. If not specified, ConnectTimeout is used. + Timeout time.Duration `yaml:"timeout"` + // WriteTimeout is a timeout for writing a query. If not specified, Timeout is used. + WriteTimeout time.Duration `yaml:"writeTimeout"` // TLS configuration TLS *auth.TLS `yaml:"tls"` // Consistency configuration (defaults to LOCAL_QUORUM / LOCAL_SERIAL for all stores if this field not set) @@ -386,12 +400,31 @@ type ( SerialConsistency string `yaml:"serialConsistency"` } + // PasswordCommandConfig configures an external command to fetch the datastore password. + // The command's stdout is used as the password. + PasswordCommandConfig struct { + // Command is the path to the executable to run. + Command string `yaml:"command"` + // Args is the list of arguments to pass to the command. + Args []string `yaml:"args"` + // Timeout is the maximum duration to wait for the command to complete. + // Defaults to 30 seconds if unset. + Timeout time.Duration `yaml:"timeout"` + } + // SQL is the configuration for connecting to a SQL backed datastore SQL struct { + // Connect is a function that returns a sql db connection. String based configuration is ignored if this is provided. + Connect func(sqlConfig *SQL) (*sqlx.DB, error) `yaml:"-" json:"-"` // User is the username to be used for the conn User string `yaml:"user"` // Password is the password corresponding to the user name Password string `yaml:"password"` + // PasswordCommand executes an external command and uses its stdout as the password. + // Mutually exclusive with Password. + // If the command returns an expiring token (e.g. cloud IAM), set MaxConnLifetime + // to ensure connections are recycled before the token expires. + PasswordCommand *PasswordCommandConfig `yaml:"passwordCommand"` // PluginName is the name of SQL plugin PluginName string `yaml:"pluginName" validate:"nonzero"` // DatabaseName is the name of SQL database to connect to @@ -420,6 +453,12 @@ type ( CustomDatastoreConfig struct { // Name of the custom datastore Name string `yaml:"name"` + // IndexName represents a unique identifier for the data store. + // The name "IndexName" inherits from the Elasticsearch config and refers to the index name. + // In SQL, it refers to the database name. + // For custom data store, you may pick any name as long as it's unique across custom data + // stores (Elasticsearch index names and SQL database names). + IndexName string `yaml:"indexName"` // Options to be used by AbstractDatastoreFactory implementation Options map[string]any `yaml:"options"` } @@ -462,6 +501,9 @@ type ( Filestore *FilestoreArchiver `yaml:"filestore"` Gstorage *GstorageArchiver `yaml:"gstorage"` S3store *S3Archiver `yaml:"s3store"` + // CustomStores contains the config for all custom history archivers + // The structure is a map of archiver name (scheme) to a map of config key-values + CustomStores map[string]map[string]any `yaml:"customStores"` } // VisibilityArchival contains the config for visibility archival @@ -479,6 +521,9 @@ type ( Filestore *FilestoreArchiver `yaml:"filestore"` S3store *S3Archiver `yaml:"s3store"` Gstorage *GstorageArchiver `yaml:"gstorage"` + // CustomStores contains the config for all custom visibility archivers + // The structure is a map of archiver name (scheme) to a map of config key-values + CustomStores map[string]map[string]any `yaml:"customStores"` } // FilestoreArchiver contain the config for filestore archiver @@ -521,6 +566,9 @@ type ( // HostPort is the host port to connect on. Host can be DNS name. See the above // comment: in many situations you can leave this empty. HostPort string `yaml:"hostPort"` + // HTTPHostPort is the HTTP host port to connect on. Host can be DNS name. See the above + // comment: in many situations you can leave this empty. + HTTPHostPort string `yaml:"httpHostPort"` // Force selection of either the "internode" or "frontend" TLS configs for these // connections (only those two strings are valid). ForceTLSConfig string `yaml:"forceTLSConfig"` @@ -556,10 +604,32 @@ type ( URI string `yaml:"URI"` } + Visibility struct { + // PersistenceCustomSearchAttributes is a set of key-value pairs specifying the number of + // pre-allocated custom search attributes for each type. Pre-allocated custom search attributes + // are named following the convention `` (eg. Keyword01) and are expected to exist in + // the data store (eg. SQL DB table column Keyword01 must exist). The pre-allocated custom search + // attributes serves as a limit for the number of custom search attributes you can create per + // namespace. + // If any type is not specified, it will pre-allocate the default number of custom search + // attributes for the type defined in the map defaultNumDbCustomSearchAttributes in + // common/searchattribute/sadefs.go. + // Modifying the number of pre-allocated custom search attributes: + // - if you increase a number, it will pre-allocate additional custom search attributes to match + // the desired number; + // - if you decrease a number, it will not delete the existing custom search attributes, ie., it + // is no-op. + // This config only applies to SQL or custom Visibility stores. + PersistenceCustomSearchAttributes map[string]int `yaml:"persistenceCustomSearchAttributes" validate:"persistence_custom_search_attributes"` + } + Authorization struct { // Signing key provider for validating JWT tokens JWTKeyProvider JWTKeyProvider `yaml:"jwtKeyProvider"` PermissionsClaimName string `yaml:"permissionsClaimName"` + // Regular expression to parse permissions claim value. The regex should contain named groups "namespace" and "role", for example + // `^(?P\w+):(?P\w+)$` will match `admin:default` and extract `default` as namespace and `admin` as role. + PermissionsRegex string `yaml:"permissionsRegex"` // Empty string for noopAuthorizer or "default" for defaultAuthorizer Authorizer string `yaml:"authorizer"` // Empty string for noopClaimMapper or "default" for defaultJWTClaimMapper @@ -568,6 +638,8 @@ type ( AuthHeaderName string `yaml:"authHeaderName"` // Name of extra auth header to pass to ClaimMapper (as `ExtraData`). Defaults to `authorization-extras`. AuthExtraHeaderName string `yaml:"authExtraHeaderName"` + // JWT audience for validating tokens + Audience string `yaml:"audience"` } // @@@SNIPSTART temporal-common-service-config-jwtkeyprovider @@ -580,14 +652,14 @@ type ( ) const ( - ShardStoreName DataStoreName = "ShardStore" - TaskStoreName DataStoreName = "TaskStore" - MetadataStoreName DataStoreName = "MetadataStore" - ExecutionStoreName DataStoreName = "ExecutionStore" - QueueName DataStoreName = "Queue" - QueueV2Name DataStoreName = "QueueV2" - ClusterMDStoreName DataStoreName = "ClusterMDStore" - NexusIncomingServiceStoreName DataStoreName = "NexusIncomingServiceStore" + ShardStoreName DataStoreName = "ShardStore" + TaskStoreName DataStoreName = "TaskStore" + MetadataStoreName DataStoreName = "MetadataStore" + ExecutionStoreName DataStoreName = "ExecutionStore" + QueueName DataStoreName = "Queue" + QueueV2Name DataStoreName = "QueueV2" + ClusterMDStoreName DataStoreName = "ClusterMDStore" + NexusEndpointStoreName DataStoreName = "NexusEndpointStore" ) const ( @@ -607,7 +679,7 @@ func (c *Config) Validate() error { } _, hasIFE := c.Services[string(primitives.InternalFrontendService)] - if hasIFE && (c.PublicClient.HostPort != "" || c.PublicClient.ForceTLSConfig != "") { + if hasIFE && (c.PublicClient.HostPort != "" || c.PublicClient.ForceTLSConfig != "" || c.PublicClient.HTTPHostPort != "") { return fmt.Errorf("when using internal-frontend, publicClient must be empty") } @@ -650,3 +722,135 @@ func (p *JWTKeyProvider) HasSourceURIsConfigured() bool { } return false } + +func (k *KeepAliveServerConfig) GetKeepAliveServerParameters() keepalive.ServerParameters { + // the default config is same as grpc default config, same for the below client config and enforcement policy + defaultConfig := keepalive.ServerParameters{ + MaxConnectionIdle: infinity, + MaxConnectionAge: infinity, + MaxConnectionAgeGrace: infinity, + Time: 2 * time.Hour, + Timeout: 20 * time.Second, + } + if k == nil || k.KeepAliveServerParameters == nil { + return defaultConfig + } + kp := k.KeepAliveServerParameters + if kp.MaxConnectionIdle != nil { + defaultConfig.MaxConnectionIdle = *kp.MaxConnectionIdle + } + if kp.MaxConnectionAge != nil { + defaultConfig.MaxConnectionAge = *kp.MaxConnectionAge + } + if kp.MaxConnectionAgeGrace != nil { + defaultConfig.MaxConnectionAgeGrace = *kp.MaxConnectionAgeGrace + } + if kp.Time != nil { + defaultConfig.Time = *kp.Time + } + if kp.Timeout != nil { + defaultConfig.Timeout = *kp.Timeout + } + return defaultConfig +} + +func (c *ClientConnectionConfig) GetKeepAliveClientParameters() keepalive.ClientParameters { + defaultConfig := keepalive.ClientParameters{ + Time: infinity, + Timeout: 20 * time.Second, + PermitWithoutStream: false, + } + + if c == nil || c.KeepAliveClientConfig == nil { + return defaultConfig + } + + if c.KeepAliveClientConfig.Time != nil { + defaultConfig.Time = *c.KeepAliveClientConfig.Time + } + if c.KeepAliveClientConfig.Timeout != nil { + defaultConfig.Timeout = *c.KeepAliveClientConfig.Timeout + } + if c.KeepAliveClientConfig.PermitWithoutStream != nil { + defaultConfig.PermitWithoutStream = *c.KeepAliveClientConfig.PermitWithoutStream + } + + return defaultConfig +} + +func (k *KeepAliveServerConfig) GetKeepAliveEnforcementPolicy() keepalive.EnforcementPolicy { + defaultConfig := keepalive.EnforcementPolicy{ + MinTime: 5 * time.Minute, + PermitWithoutStream: false, + } + + if k == nil || k.KeepAliveEnforcementPolicy == nil { + return defaultConfig + } + + if k.KeepAliveEnforcementPolicy.MinTime != nil { + defaultConfig.MinTime = *k.KeepAliveEnforcementPolicy.MinTime + } + if k.KeepAliveEnforcementPolicy.PermitWithoutStream != nil { + defaultConfig.PermitWithoutStream = *k.KeepAliveEnforcementPolicy.PermitWithoutStream + } + + return defaultConfig +} + +func (fi *FaultInjection) WithError(storeName DataStoreName, methodName, errorName string, probability float64) *FaultInjection { + if fi == nil { + return nil + } + m := fi.method(storeName, methodName) + m.Errors[errorName] = probability + fi.Targets.DataStores[storeName].Methods[methodName] = m + return fi +} + +func (fi *FaultInjection) WithMethodSeed(storeName DataStoreName, methodName string, seed int64) *FaultInjection { + if fi == nil { + return nil + } + m := fi.method(storeName, methodName) + m.Seed = seed + fi.Targets.DataStores[storeName].Methods[methodName] = m + return fi +} + +func (fi *FaultInjection) method(storeName DataStoreName, methodName string) FaultInjectionMethodConfig { + if fi.Targets.DataStores == nil { + fi.Targets.DataStores = map[DataStoreName]FaultInjectionDataStoreConfig{} + } + store, ok := fi.Targets.DataStores[storeName] + if !ok { + store = FaultInjectionDataStoreConfig{Methods: map[string]FaultInjectionMethodConfig{}} + } + method, ok := store.Methods[methodName] + if !ok { + method = FaultInjectionMethodConfig{Errors: map[string]float64{}} + } + store.Methods[methodName] = method + fi.Targets.DataStores[storeName] = store + return method +} + +func DefaultFaultInjection() *FaultInjection { + fiCfg := &FaultInjection{} + return fiCfg. + WithError(ExecutionStoreName, "CreateWorkflowExecution", "ResourceExhausted", 0.01). + WithError(ExecutionStoreName, "CreateWorkflowExecution", "Timeout", 0.01). + WithError(ExecutionStoreName, "CreateWorkflowExecution", "ExecuteAndTimeout", 0.01). + WithError(ExecutionStoreName, "UpdateWorkflowExecution", "ResourceExhausted", 0.01). + WithError(ExecutionStoreName, "UpdateWorkflowExecution", "Timeout", 0.01). + WithError(ExecutionStoreName, "UpdateWorkflowExecution", "ExecuteAndTimeout", 0.01). + WithError(ExecutionStoreName, "GetWorkflowExecution", "ResourceExhausted", 0.01). + WithError(ExecutionStoreName, "GetWorkflowExecution", "Timeout", 0.01). + WithError(ExecutionStoreName, "GetCurrentExecution", "ResourceExhausted", 0.01). + WithError(ExecutionStoreName, "GetCurrentExecution", "Timeout", 0.01). + WithError(ExecutionStoreName, "AppendHistoryNodes", "ResourceExhausted", 0.01). + WithError(ExecutionStoreName, "AppendHistoryNodes", "Timeout", 0.01). + WithError(ExecutionStoreName, "AppendHistoryNodes", "ExecuteAndTimeout", 0.01). + WithError(ExecutionStoreName, "ReadHistoryBranch", "ResourceExhausted", 0.01). + WithError(ExecutionStoreName, "ReadHistoryBranch", "Timeout", 0.01) +} diff --git a/common/config/config_template_embedded.yaml b/common/config/config_template_embedded.yaml new file mode 100644 index 00000000000..f7b44358a60 --- /dev/null +++ b/common/config/config_template_embedded.yaml @@ -0,0 +1,394 @@ +# enable-template +log: + stdout: true + level: {{ default "info" (env "LOG_LEVEL") }} + +persistence: + numHistoryShards: {{ default "4" (env "NUM_HISTORY_SHARDS") }} + defaultStore: default + {{- $es := default "false" (env "ENABLE_ES") | lower -}} + {{- if eq $es "true" }} + visibilityStore: es-visibility + {{- else }} + visibilityStore: visibility + {{- end }} + datastores: + {{- $db := default "cassandra" (env "DB") | lower -}} + {{- if eq $db "cassandra" }} + default: + cassandra: + hosts: "{{ default "" (env "CASSANDRA_SEEDS") }}" + keyspace: "{{ default "temporal" (env "KEYSPACE") }}" + user: "{{ default "" (env "CASSANDRA_USER") }}" + password: "{{ default "" (env "CASSANDRA_PASSWORD") }}" + {{- if env "CASSANDRA_ALLOWED_AUTHENTICATORS" }} + allowedAuthenticators: {{ range split (env "CASSANDRA_ALLOWED_AUTHENTICATORS") "," }} + - {{trim .}} + {{- end }} + {{- end }} + port: {{ default "9042" (env "CASSANDRA_PORT") }} + maxConns: {{ default "20" (env "CASSANDRA_MAX_CONNS") }} + tls: + enabled: {{ default "false" (env "CASSANDRA_TLS_ENABLED") }} + caFile: {{ default "" (env "CASSANDRA_CA") }} + certFile: {{ default "" (env "CASSANDRA_CERT") }} + keyFile: {{ default "" (env "CASSANDRA_CERT_KEY") }} + caData: {{ default "" (env "CASSANDRA_CA_DATA") }} + certData: {{ default "" (env "CASSANDRA_CERT_DATA") }} + keyData: {{ default "" (env "CASSANDRA_CERT_KEY_DATA") }} + enableHostVerification: {{ default "false" (env "CASSANDRA_HOST_VERIFICATION") }} + serverName: {{ default "" (env "CASSANDRA_HOST_NAME") }} + {{- if env "CASSANDRA_ADDRESS_TRANSLATOR" }} + addressTranslator: + translator: {{ default "" (env "CASSANDRA_ADDRESS_TRANSLATOR") }} + {{- if env "CASSANDRA_ADDRESS_TRANSLATOR_OPTIONS" }} + options: + advertised-hostname: {{ default "" (env "CASSANDRA_ADDRESS_TRANSLATOR_OPTIONS") }} + {{- end }} + {{- end }} + {{- else if eq $db "mysql8" }} + default: + sql: + pluginName: "{{ $db }}" + databaseName: "{{ default "temporal" (env "DBNAME") }}" + connectAddr: "{{ default "" (env "MYSQL_SEEDS") }}:{{ default "3306" (env "DB_PORT") }}" + connectProtocol: "tcp" + user: "{{ default "" (env "MYSQL_USER") }}" + password: "{{ default "" (env "MYSQL_PWD") }}" + {{- if env "MYSQL_TX_ISOLATION_COMPAT" }} + connectAttributes: + tx_isolation: "'READ-COMMITTED'" + {{- end }} + maxConns: {{ default "20" (env "SQL_MAX_CONNS") }} + maxIdleConns: {{ default "20" (env "SQL_MAX_IDLE_CONNS") }} + maxConnLifetime: {{ default "1h" (env "SQL_MAX_CONN_TIME") }} + tls: + enabled: {{ default "false" (env "SQL_TLS_ENABLED") }} + caFile: {{ default "" (env "SQL_CA") }} + certFile: {{ default "" (env "SQL_CERT") }} + keyFile: {{ default "" (env "SQL_CERT_KEY") }} + enableHostVerification: {{ default "false" (env "SQL_HOST_VERIFICATION") }} + serverName: {{ default "" (env "SQL_HOST_NAME") }} + visibility: + sql: + {{ $visibility_seeds_default := default "" (env "MYSQL_SEEDS") }} + {{ $visibility_seeds := default $visibility_seeds_default (env "VISIBILITY_MYSQL_SEEDS") }} + {{ $visibility_port_default := default "3306" (env "DB_PORT") }} + {{ $visibility_port := default $visibility_port_default (env "VISIBILITY_DB_PORT") }} + {{ $visibility_user_default := default "" (env "MYSQL_USER") }} + {{ $visibility_user := default $visibility_user_default (env "VISIBILITY_MYSQL_USER") }} + {{ $visibility_pwd_default := default "" (env "MYSQL_PWD") }} + {{ $visibility_pwd := default $visibility_pwd_default (env "VISIBILITY_MYSQL_PWD") }} + pluginName: "{{ $db }}" + databaseName: "{{ default "temporal_visibility" (env "VISIBILITY_DBNAME") }}" + connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}" + connectProtocol: "tcp" + user: "{{ $visibility_user }}" + password: "{{ $visibility_pwd }}" + {{- if env "MYSQL_TX_ISOLATION_COMPAT" }} + connectAttributes: + tx_isolation: "'READ-COMMITTED'" + {{- end }} + maxConns: {{ default "10" (env "SQL_VIS_MAX_CONNS") }} + maxIdleConns: {{ default "10" (env "SQL_VIS_MAX_IDLE_CONNS") }} + maxConnLifetime: {{ default "1h" (env "SQL_VIS_MAX_CONN_TIME") }} + tls: + enabled: {{ default "false" (env "SQL_TLS_ENABLED") }} + caFile: {{ default "" (env "SQL_CA") }} + certFile: {{ default "" (env "SQL_CERT") }} + keyFile: {{ default "" (env "SQL_CERT_KEY") }} + enableHostVerification: {{ default "false" (env "SQL_HOST_VERIFICATION") }} + serverName: {{ default "" (env "SQL_HOST_NAME") }} + {{- else if eq $db "postgres12" "postgres12_pgx" }} + default: + sql: + pluginName: "{{ $db }}" + databaseName: "{{ default "temporal" (env "DBNAME") }}" + connectAddr: "{{ default "" (env "POSTGRES_SEEDS") }}:{{ default "5432" (env "DB_PORT") }}" + connectProtocol: "tcp" + user: "{{ default "" (env "POSTGRES_USER") }}" + password: "{{ default "" (env "POSTGRES_PWD") }}" + maxConns: {{ default "20" (env "SQL_MAX_CONNS") }} + maxIdleConns: {{ default "20" (env "SQL_MAX_IDLE_CONNS") }} + maxConnLifetime: {{ default "1h" (env "SQL_MAX_CONN_TIME") }} + tls: + enabled: {{ default "false" (env "SQL_TLS_ENABLED") }} + caFile: {{ default "" (env "SQL_CA") }} + certFile: {{ default "" (env "SQL_CERT") }} + keyFile: {{ default "" (env "SQL_CERT_KEY") }} + enableHostVerification: {{ default "false" (env "SQL_HOST_VERIFICATION") }} + serverName: {{ default "" (env "SQL_HOST_NAME") }} + visibility: + sql: + {{ $visibility_seeds_default := default "" (env "POSTGRES_SEEDS") }} + {{ $visibility_seeds := default $visibility_seeds_default (env "VISIBILITY_POSTGRES_SEEDS") }} + {{ $visibility_port_default := default "5432" (env "DB_PORT") }} + {{ $visibility_port := default $visibility_port_default (env "VISIBILITY_DB_PORT") }} + {{ $visibility_user_default := default "" (env "POSTGRES_USER") }} + {{ $visibility_user := default $visibility_user_default (env "VISIBILITY_POSTGRES_USER") }} + {{ $visibility_pwd_default := default "" (env "POSTGRES_PWD") }} + {{ $visibility_pwd := default $visibility_pwd_default (env "VISIBILITY_POSTGRES_PWD") }} + pluginName: "{{ $db }}" + databaseName: "{{ default "temporal_visibility" (env "VISIBILITY_DBNAME") }}" + connectAddr: "{{ $visibility_seeds }}:{{ $visibility_port }}" + connectProtocol: "tcp" + user: "{{ $visibility_user }}" + password: "{{ $visibility_pwd }}" + maxConns: {{ default "10" (env "SQL_VIS_MAX_CONNS") }} + maxIdleConns: {{ default "10" (env "SQL_VIS_MAX_IDLE_CONNS") }} + maxConnLifetime: {{ default "1h" (env "SQL_VIS_MAX_CONN_TIME") }} + tls: + enabled: {{ default "false" (env "SQL_TLS_ENABLED") }} + caFile: {{ default "" (env "SQL_CA") }} + certFile: {{ default "" (env "SQL_CERT") }} + keyFile: {{ default "" (env "SQL_CERT_KEY") }} + enableHostVerification: {{ default "false" (env "SQL_HOST_VERIFICATION") }} + serverName: {{ default "" (env "SQL_HOST_NAME") }} + {{- else if eq $db "sqlite" }} + default: + sql: + pluginName: "{{ $db }}" + databaseName: "{{ default "temporal" (env "DBNAME") }}" + connectAddr: "localhost" + connectProtocol: "tcp" + connectAttributes: + mode: "{{ default "rwc" (env "SQLITE_MODE") }}" + cache: "{{ default "private" (env "SQLITE_CACHE") }}" + setup: "{{ default "true" (env "SQLITE_SETUP") }}" + journal_mode: "{{ default "wal" (env "SQLITE_JOURNAL_MODE") }}" + synchronous: "{{ default "2" (env "SQLITE_SYNCHRONOUS") }}" + busy_timeout: "{{ default "10000" (env "SQLITE_BUSY_TIMEOUT") }}" + maxConns: {{ default "1" (env "SQL_MAX_CONNS") }} + maxIdleConns: {{ default "1" (env "SQL_MAX_IDLE_CONNS") }} + visibility: + sql: + pluginName: "{{ $db }}" + databaseName: "{{ default "temporal_visibility" (env "VISIBILITY_DBNAME") }}" + connectAddr: "localhost" + connectProtocol: "tcp" + connectAttributes: + mode: "{{ default "rwc" (env "SQLITE_MODE") }}" + cache: "{{ default "private" (env "SQLITE_CACHE") }}" + setup: "{{ default "true" (env "SQLITE_SETUP") }}" + journal_mode: "{{ default "wal" (env "SQLITE_JOURNAL_MODE") }}" + synchronous: "{{ default "2" (env "SQLITE_SYNCHRONOUS") }}" + busy_timeout: "{{ default "10000" (env "SQLITE_BUSY_TIMEOUT") }}" + maxConns: {{ default "1" (env "SQL_VIS_MAX_CONNS") }} + maxIdleConns: {{ default "1" (env "SQL_VIS_MAX_IDLE_CONNS") }} + {{- end }} + {{- if eq $es "true" }} + es-visibility: + elasticsearch: + version: {{ default "" (env "ES_VERSION") }} + url: + scheme: {{ default "http" (env "ES_SCHEME") }} + host: "{{ default "" (env "ES_SEEDS") }}:{{ default "9200" (env "ES_PORT") }}" + username: "{{ default "" (env "ES_USER") }}" + password: "{{ default "" (env "ES_PWD") }}" + indices: + visibility: "{{ default "temporal_visibility_v1_dev" (env "ES_VIS_INDEX") }}" + {{- $es_sec_vis_index := default "" (env "ES_SEC_VIS_INDEX") -}} + {{- if ne $es_sec_vis_index "" }} + secondary_visibility: "{{ $es_sec_vis_index }}" + {{- end }} + {{- end }} + +global: + membership: + maxJoinDuration: 30s + broadcastAddress: "{{ default "" (env "TEMPORAL_BROADCAST_ADDRESS") }}" + pprof: + port: {{ default "0" (env "PPROF_PORT") }} + tls: + refreshInterval: {{ default "0s" (env "TEMPORAL_TLS_REFRESH_INTERVAL") }} + expirationChecks: + warningWindow: {{ default "0s" (env "TEMPORAL_TLS_EXPIRATION_CHECKS_WARNING_WINDOW") }} + errorWindow: {{ default "0s" (env "TEMPORAL_TLS_EXPIRATION_CHECKS_ERROR_WINDOW") }} + checkInterval: {{ default "0s" (env "TEMPORAL_TLS_EXPIRATION_CHECKS_CHECK_INTERVAL") }} + internode: + # This server section configures the TLS certificate that internal temporal + # cluster nodes (history, matching, and internal-frontend) present to other + # clients within the Temporal Cluster. + server: + requireClientAuth: {{ default "false" (env "TEMPORAL_TLS_REQUIRE_CLIENT_AUTH") }} + + certFile: {{ default "" (env "TEMPORAL_TLS_SERVER_CERT") }} + keyFile: {{ default "" (env "TEMPORAL_TLS_SERVER_KEY") }} + {{- if env "TEMPORAL_TLS_SERVER_CA_CERT" }} + clientCaFiles: + - {{ default "" (env "TEMPORAL_TLS_SERVER_CA_CERT") }} + {{- end }} + + certData: {{ default "" (env "TEMPORAL_TLS_SERVER_CERT_DATA") }} + keyData: {{ default "" (env "TEMPORAL_TLS_SERVER_KEY_DATA") }} + {{- if env "TEMPORAL_TLS_SERVER_CA_CERT_DATA" }} + clientCaData: + - {{ default "" (env "TEMPORAL_TLS_SERVER_CA_CERT_DATA") }} + {{- end }} + + # This client section is used to configure the TLS clients within + # the Temporal Cluster that connect to an Internode (history, matching, or + # internal-frontend) + client: + serverName: {{ default "" (env "TEMPORAL_TLS_INTERNODE_SERVER_NAME") }} + disableHostVerification: {{ default "false" (env "TEMPORAL_TLS_INTERNODE_DISABLE_HOST_VERIFICATION")}} + {{- if env "TEMPORAL_TLS_SERVER_CA_CERT" }} + rootCaFiles: + - {{ default "" (env "TEMPORAL_TLS_SERVER_CA_CERT") }} + {{- end }} + {{- if env "TEMPORAL_TLS_SERVER_CA_CERT_DATA" }} + rootCaData: + - {{ default "" (env "TEMPORAL_TLS_SERVER_CA_CERT_DATA") }} + {{- end }} + frontend: + # This server section configures the TLS certificate that the Frontend + # server presents to external clients. + server: + requireClientAuth: {{ default "false" (env "TEMPORAL_TLS_REQUIRE_CLIENT_AUTH") }} + certFile: {{ default "" (env "TEMPORAL_TLS_FRONTEND_CERT") }} + keyFile: {{ default "" (env "TEMPORAL_TLS_FRONTEND_KEY") }} + {{- if env "TEMPORAL_TLS_CLIENT1_CA_CERT" }} + clientCaFiles: + - {{ default "" (env "TEMPORAL_TLS_CLIENT1_CA_CERT") }} + - {{ default "" (env "TEMPORAL_TLS_CLIENT2_CA_CERT") }} + {{- end }} + + certData: {{ default "" (env "TEMPORAL_TLS_FRONTEND_CERT_DATA") }} + keyData: {{ default "" (env "TEMPORAL_TLS_FRONTEND_KEY_DATA") }} + {{- if env "TEMPORAL_TLS_CLIENT1_CA_CERT_DATA" }} + clientCaData: + - {{ default "" (env "TEMPORAL_TLS_CLIENT1_CA_CERT_DATA") }} + - {{ default "" (env "TEMPORAL_TLS_CLIENT2_CA_CERT_DATA") }} + {{- end }} + + # This client section is used to configure the TLS clients within + # the Temporal Cluster (specifically the Worker role) that connect to the Frontend service + client: + serverName: {{ default "" (env "TEMPORAL_TLS_FRONTEND_SERVER_NAME") }} + disableHostVerification: {{ default "false" (env "TEMPORAL_TLS_FRONTEND_DISABLE_HOST_VERIFICATION")}} + {{- if env "TEMPORAL_TLS_SERVER_CA_CERT" }} + rootCaFiles: + - {{ default "" (env "TEMPORAL_TLS_SERVER_CA_CERT") }} + {{- end }} + {{- if env "TEMPORAL_TLS_SERVER_CA_CERT_DATA" }} + rootCaData: + - {{ default "" (env "TEMPORAL_TLS_SERVER_CA_CERT_DATA") }} + {{- end }} + {{- if env "STATSD_ENDPOINT" }} + metrics: + statsd: + hostPort: {{ env "STATSD_ENDPOINT" }} + prefix: "temporal" + {{- else if env "PROMETHEUS_ENDPOINT" }} + metrics: + prometheus: + timerType: {{ default "histogram" (env "PROMETHEUS_TIMER_TYPE") }} + listenAddress: "{{ env "PROMETHEUS_ENDPOINT" }}" + {{- end }} + authorization: + jwtKeyProvider: + keySourceURIs: + {{- if env "TEMPORAL_JWT_KEY_SOURCE1" }} + - {{ default "" (env "TEMPORAL_JWT_KEY_SOURCE1") }} + {{- end }} + {{- if env "TEMPORAL_JWT_KEY_SOURCE2" }} + - {{ default "" (env "TEMPORAL_JWT_KEY_SOURCE2") }} + {{- end }} + refreshInterval: {{ default "1m" (env "TEMPORAL_JWT_KEY_REFRESH") }} + permissionsClaimName: {{ default "permissions" (env "TEMPORAL_JWT_PERMISSIONS_CLAIM") }} + permissionsRegex: {{ default "" (env "TEMPORAL_JWT_PERMISSIONS_REGEX") }} + authorizer: {{ default "" (env "TEMPORAL_AUTH_AUTHORIZER") }} + claimMapper: {{ default "" (env "TEMPORAL_AUTH_CLAIM_MAPPER") }} + +{{- $temporalGrpcPort := default "7233" (env "FRONTEND_GRPC_PORT") }} +{{- $temporalHTTPPort := default "7243" (env "FRONTEND_HTTP_PORT") }} +{{- $temporalInternalHTTPPort := default "7246" (env "INTERNAL_FRONTEND_HTTP_PORT") }} +{{- $temporalClusterAddress := default (default "127.0.0.1" (env "BIND_ON_IP")) (env "TEMPORAL_BROADCAST_ADDRESS") }} +services: + frontend: + rpc: + grpcPort: {{ $temporalGrpcPort }} + membershipPort: {{ default "6933" (env "FRONTEND_MEMBERSHIP_PORT") }} + bindOnIP: "{{ default "127.0.0.1" (env "BIND_ON_IP") }}" + httpPort: {{ $temporalHTTPPort }} + + {{- if env "USE_INTERNAL_FRONTEND" }} + internal-frontend: + rpc: + grpcPort: {{ default "7236" (env "INTERNAL_FRONTEND_GRPC_PORT") }} + membershipPort: {{ default "6936" (env "INTERNAL_FRONTEND_MEMBERSHIP_PORT") }} + bindOnIP: "{{ default "127.0.0.1" (env "BIND_ON_IP") }}" + httpPort: {{ $temporalInternalHTTPPort }} + {{- end }} + + matching: + rpc: + grpcPort: {{ default "7235" (env "MATCHING_GRPC_PORT") }} + membershipPort: {{ default "6935" (env "MATCHING_MEMBERSHIP_PORT") }} + bindOnIP: "{{ default "127.0.0.1" (env "BIND_ON_IP") }}" + + history: + rpc: + grpcPort: {{ default "7234" (env "HISTORY_GRPC_PORT") }} + membershipPort: {{ default "6934" (env "HISTORY_MEMBERSHIP_PORT") }} + bindOnIP: "{{ default "127.0.0.1" (env "BIND_ON_IP") }}" + + worker: + rpc: + grpcPort: {{ default "7239" (env "WORKER_GRPC_PORT") }} + membershipPort: {{ default "6939" (env "WORKER_MEMBERSHIP_PORT") }} + bindOnIP: "{{ default "127.0.0.1" (env "BIND_ON_IP") }}" + +clusterMetadata: + enableGlobalNamespace: false + failoverVersionIncrement: 10 + masterClusterName: "active" + currentClusterName: "active" + clusterInformation: + active: + enabled: true + initialFailoverVersion: 1 + rpcName: "frontend" + rpcAddress: {{ default (print $temporalClusterAddress ":" $temporalGrpcPort) (env "CLUSTER_RPC_ADDRESS") }} + httpAddress: {{ default (print $temporalClusterAddress ":" $temporalHTTPPort) (env "CLUSTER_HTTP_ADDRESS") }} + +dcRedirectionPolicy: + policy: "noop" + +archival: + history: + state: "enabled" + enableRead: true + provider: + filestore: + fileMode: "0666" + dirMode: "0766" + visibility: + state: "enabled" + enableRead: true + provider: + filestore: + fileMode: "0666" + dirMode: "0766" + +namespaceDefaults: + archival: + history: + state: "disabled" + URI: "file:///tmp/temporal_archival/development" + visibility: + state: "disabled" + URI: "file:///tmp/temporal_vis_archival/development" + +{{- if or (env "USE_INTERNAL_FRONTEND") (and (not (env "TEMPORAL_AUTH_AUTHORIZER")) (not (env "TEMPORAL_AUTH_CLAIM_MAPPER"))) }} +{{/* publicClient is not needed with internal frontend, or if not using authorizer + claim mapper */}} +{{- else }} +{{ $publicIp := default "127.0.0.1" (env "BIND_ON_IP") -}} +{{- $defaultPublicHostPost := (print $publicIp ":" $temporalGrpcPort) -}} +publicClient: + hostPort: "{{ default $defaultPublicHostPost (env "PUBLIC_FRONTEND_ADDRESS") }}" +{{- end }} + +dynamicConfigClient: + filepath: "{{ default "/etc/temporal/config/dynamicconfig/docker.yaml" (env "DYNAMIC_CONFIG_FILE_PATH") }}" + pollInterval: "60s" diff --git a/common/config/config_test.go b/common/config/config_test.go deleted file mode 100644 index 3230fe811cc..00000000000 --- a/common/config/config_test.go +++ /dev/null @@ -1,38 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package config - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestToString(t *testing.T) { - var cfg Config - err := Load("", "../../config", "", &cfg) - assert.NoError(t, err) - assert.NotEmpty(t, cfg.String()) -} diff --git a/common/config/fx.go b/common/config/fx.go index a4ca58230be..5c1d374f3af 100644 --- a/common/config/fx.go +++ b/common/config/fx.go @@ -1,33 +1,8 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config import ( - "go.uber.org/fx" - "go.temporal.io/server/common/primitives" + "go.uber.org/fx" ) // ServicePortMap contains the gRPC ports for our services. diff --git a/common/config/loader.go b/common/config/loader.go index aeefb87a272..9085d60340a 100644 --- a/common/config/loader.go +++ b/common/config/loader.go @@ -1,38 +1,30 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config import ( + "bufio" + "bytes" + _ "embed" + "errors" "fmt" + "io" stdlog "log" "os" + "path/filepath" + "strings" + "text/template" - "gopkg.in/validator.v2" + "github.com/Masterminds/sprig/v3" "gopkg.in/yaml.v3" ) +//go:embed config_template_embedded.yaml +var embeddedConfigTemplate []byte + +var ( + // ErrConfigFilesNotFound is returned when no config files are found in the specified directory + ErrConfigFilesNotFound = errors.New("no config files found") +) + const ( // EnvKeyRoot the environment variable key for runtime root dir EnvKeyRoot = "TEMPORAL_ROOT" @@ -48,12 +40,16 @@ const ( EnvKeyAvailabilityZoneTypo = "TEMPORAL_AVAILABILTY_ZONE" // EnvKeyAllowNoAuth is the environment variable key for setting no authorizer EnvKeyAllowNoAuth = "TEMPORAL_ALLOW_NO_AUTH" + // EnvKeyConfigFile is the environment variable key for specifying a config file path + EnvKeyConfigFile = "TEMPORAL_SERVER_CONFIG_FILE_PATH" ) const ( - baseFile = "base.yaml" - envDevelopment = "development" - defaultConfigDir = "config" + baseFile = "base.yaml" + envDevelopment = "development" + defaultConfigDir = "config" + enableTemplate = "enable-template" + commentSearchLimit = 1024 ) // Load loads the configuration from a set of @@ -62,85 +58,252 @@ const ( // The loader first fetches the set of files matching // a pre-determined naming convention, then sorts // them by hierarchy order and after that, simply -// loads the files one after another with the -// key/values in the later files overriding the key/values -// in the earlier files +type loadOptions struct { + env string + configDir string + zone string + configFilePath string + useEmbeddedOnly bool +} + +type loadOption func(*loadOptions) + +// WithEnv sets the environment name for configuration loading (e.g., "development", "production"). +// If empty, defaults to "development". +func WithEnv(env string) loadOption { + return func(o *loadOptions) { + if env != "" { + o.env = env + } + } +} + +// WithConfigDir sets the directory path where configuration files are located. +// If empty, defaults to "config". +func WithConfigDir(configDir string) loadOption { + return func(o *loadOptions) { + if configDir != "" { + o.configDir = configDir + } + } +} + +// WithZone sets the availability zone for configuration loading. +// This is used to load zone-specific configuration overrides (e.g., "us-east-1a"). +func WithZone(zone string) loadOption { + return func(o *loadOptions) { + if zone != "" { + o.zone = zone + } + } +} + +// WithConfigFile sets a specific configuration file path to load. +// When provided, only this file will be loaded, bypassing the legacy hierarchical loading. +func WithConfigFile(configFilePath string) loadOption { + return func(o *loadOptions) { + if configFilePath != "" { + o.configFilePath = configFilePath + } + } +} + +// WithEmbedded forces the loader to use only the embedded configuration template. +// This loads configuration from environment variables only, using the embedded template. +func WithEmbedded() loadOption { + return func(o *loadOptions) { + o.useEmbeddedOnly = true + } +} + +// Load loads and validates the Temporal server configuration. +// It supports multiple loading strategies based on the provided options: +// - Embedded template with environment variables (WithEmbedded) +// - Single config file (WithConfigFile) +// - Legacy hierarchical config directory (WithConfigDir, WithEnv, WithZone) +// +// Configuration files can be templated using Go template syntax with sprig-compatible +// functions. To enable templating, add "# enable-template" comment in the first 1KB of the file. +// +// Returns the loaded configuration or an error if loading or validation fails. +func Load(opts ...loadOption) (*Config, error) { + cfg := &Config{} + options := &loadOptions{} + + for _, opt := range opts { + opt(options) + } + + if err := options.load(cfg); err != nil { + return nil, err + } + return cfg, nil +} + +func (opts *loadOptions) load(config any) error { + + if opts.useEmbeddedOnly { + stdlog.Println("Loading configuration from environment variables only") + return loadAndUnmarshalContent(embeddedConfigTemplate, "config_template_embedded.yaml", config) + } + + if opts.configFilePath != "" { + content, err := readConfigFile(opts.configFilePath) + if err != nil { + return err + } + return loadAndUnmarshalContent(content, filepath.Base(opts.configFilePath), config) + } + return opts.loadLegacy(config) + +} + +// loadLegacy loads configuration data from a set of YAML files +// located in the config directory. // -// The hierarchy is as follows from lowest to highest +// Deprecated: This loader is maintained only for backward compatibility +// and should not be used in new code. // -// base.yaml -// env.yaml -- environment is one of the input params ex-development -// env_az.yaml -- zone is another input param -func Load(env string, configDir string, zone string, config interface{}) error { - if len(env) == 0 { - env = envDevelopment +// The loader first identifies all files matching a predefined +// naming convention, then sorts them according to their hierarchy. +// It then loads the files sequentially, with key/value pairs in +// later files overriding those in earlier ones. +// +// The hierarchy, from lowest to highest precedence, is as follows: +// +// base.yaml +// env.yaml -- where "environment" is one of the input parameters (e.g., "development") +// env_az.yaml -- where "zone" is another input parameter + +func (opts *loadOptions) loadLegacy(config any) error { + stdlog.Printf("Loading config; env=%v,zone=%v,configDir=%v\n", opts.env, opts.zone, opts.configDir) + if opts.env == "" { + opts.env = envDevelopment } - if len(configDir) == 0 { - configDir = defaultConfigDir + if opts.configDir == "" { + opts.configDir = defaultConfigDir } - // TODO: remove log dependency. - stdlog.Printf("Loading config; env=%v,zone=%v,configDir=%v\n", env, zone, configDir) + stdlog.Printf("Loading config; env=%v,zone=%v,configDir=%v\n", opts.env, opts.zone, opts.configDir) - files, err := getConfigFiles(env, configDir, zone) + files, err := getConfigFiles(opts.env, opts.configDir, opts.zone) if err != nil { - return err + return fmt.Errorf("failed to get config files: %w", err) } - // TODO: remove log dependency. stdlog.Printf("Loading config files=%v\n", files) for _, f := range files { - // This is tagged nosec because the file names being read are for config files that are not user supplied - // #nosec - data, err := os.ReadFile(f) + data, err := readConfigFile(f) if err != nil { return err } - err = yaml.Unmarshal(data, config) + + processedData, err := processConfigFile(data, filepath.Base(f)) + if err != nil { + return err + } + + err = yaml.Unmarshal(processedData, config) if err != nil { return err } } - return validator.Validate(config) + validate := newValidator() + return validate.Validate(config) +} + +func readConfigFile(path string) ([]byte, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("could not read config file: %s. error: %w", path, err) + + } + return data, nil +} + +// processConfigFile processes a config file, rendering it as a template if enabled +func processConfigFile(data []byte, filename string) ([]byte, error) { + // If the config file contains "enable-template" in a comment within the first 1KB, then + // we will treat the file as a template and render it. + templating, err := checkTemplatingEnabled(data) + if err != nil { + return nil, err + } + + if !templating { + return data, nil + } + + stdlog.Printf("Processing config file as template; filename=%v\n", filename) + tpl, err := template.New(filename).Funcs(sprig.FuncMap()).Parse(string(data)) + if err != nil { + return nil, err + } + + var rendered bytes.Buffer + err = tpl.Execute(&rendered, nil) + if err != nil { + return nil, err + } + + return rendered.Bytes(), nil } -// Helper function for loading configuration -func LoadConfig(env string, configDir string, zone string) (*Config, error) { - config := Config{} - err := Load(env, configDir, zone, &config) +func loadAndUnmarshalContent(content []byte, filename string, config any) error { + processed, err := processConfigFile(content, filename) if err != nil { - return nil, fmt.Errorf("config file corrupted: %w", err) + return fmt.Errorf("failed to process config file %s: %w", filename, err) } - return &config, nil + + if err := yaml.Unmarshal(processed, config); err != nil { + return fmt.Errorf("failed to unmarshal config file %s: %w", filename, err) + } + + validate := newValidator() + return validate.Validate(config) +} + +func checkTemplatingEnabled(content []byte) (bool, error) { + scanner := bufio.NewScanner(io.LimitReader(bytes.NewReader(content), commentSearchLimit)) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + if strings.HasPrefix(line, "#") && strings.Contains(line, enableTemplate) { + return true, nil + } + } + + return false, scanner.Err() } // getConfigFiles returns the list of config files to // process in the hierarchy order func getConfigFiles(env string, configDir string, zone string) ([]string, error) { + candidates := make([]string, 2, 3) + candidates[0] = filepath.Join(configDir, baseFile) + candidates[1] = filepath.Join(configDir, file(env, "yaml")) - candidates := []string{ - path(configDir, baseFile), - path(configDir, file(env, "yaml")), - } - - if len(zone) > 0 { + if zone != "" { f := file(concat(env, zone), "yaml") - candidates = append(candidates, path(configDir, f)) + candidates = append(candidates, filepath.Join(configDir, f)) } - var result []string + result := make([]string, 0, len(candidates)) for _, c := range candidates { - if _, err := os.Stat(c); err != nil { + _, err := os.Stat(c) + if errors.Is(err, os.ErrNotExist) { continue } + if err != nil { + return nil, fmt.Errorf("error accessing config file %s: %w", c, err) + } result = append(result, c) } - if len(result) == 0 { - return nil, fmt.Errorf("no config files found within %v", configDir) + return nil, fmt.Errorf("%w in directory: %s", ErrConfigFilesNotFound, configDir) } return result, nil @@ -154,6 +317,15 @@ func file(name string, suffix string) string { return name + "." + suffix } -func path(dir string, file string) string { - return dir + "/" + file +func loadEnvMap() map[string]string { + environ := os.Environ() + envMap := make(map[string]string, len(environ)) + + for _, env := range environ { + key, value, found := strings.Cut(env, "=") + if found && key != "" { + envMap[key] = value + } + } + return envMap } diff --git a/common/config/loader_test.go b/common/config/loader_test.go index 61dd4408fe4..ec0fc1c8033 100644 --- a/common/config/loader_test.go +++ b/common/config/loader_test.go @@ -1,136 +1,432 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config import ( "os" + "path/filepath" + "strings" "testing" "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "go.temporal.io/server/tests/testutils" ) const fileMode = os.FileMode(0644) -type ( - LoaderSuite struct { - *require.Assertions - suite.Suite - } +func TestLoad(t *testing.T) { + const staticConfig = `log: + level: warn +persistence: + numHistoryShards: 16 + defaultStore: default + datastores: + default: + sql: + pluginName: "postgres12" + databaseName: "temporal" + connectAddr: "localhost:5432" + connectProtocol: "tcp" +services: + frontend: + rpc: + grpcPort: 9233 + bindOnIP: "0.0.0.0" +` - itemsConfig struct { - Item1 string `yaml:"item1"` - Item2 string `yaml:"item2"` - } + const templateConfig = `# enable-template +log: + level: {{ default "info" (env "LOG_LEVEL") }} +persistence: + numHistoryShards: {{ default "4" (env "NUM_HISTORY_SHARDS") }} + defaultStore: default + datastores: + default: + sql: + pluginName: "postgres12" + databaseName: "temporal" + connectAddr: "localhost:5432" + connectProtocol: "tcp" +services: + frontend: + rpc: + grpcPort: {{ default "7233" (env "FRONTEND_GRPC_PORT") }} + bindOnIP: "127.0.0.1" +` - testConfig struct { - Items itemsConfig `yaml:"items"` + const invalidYaml = `log: + level: warn + invalid indentation + bad: yaml +` + + testCases := []struct { + name string + configContent string + loadOptions func(configPath string) []loadOption + setupEnv func(t *testing.T) + expectError bool + errorContains string + validateConfig func(t *testing.T, cfg *Config) + }{ + { + name: "static config without template", + configContent: staticConfig, + loadOptions: func(configPath string) []loadOption { + return []loadOption{WithConfigDir(filepath.Dir(configPath))} + }, + expectError: false, + validateConfig: func(t *testing.T, cfg *Config) { + require.Equal(t, "warn", cfg.Log.Level) + require.Equal(t, int32(16), cfg.Persistence.NumHistoryShards) + require.Equal(t, 9233, cfg.Services["frontend"].RPC.GRPCPort) + }, + }, + { + name: "template config with file path uses system env vars", + configContent: templateConfig, + loadOptions: func(configPath string) []loadOption { + return []loadOption{WithConfigFile(configPath)} + }, + setupEnv: func(t *testing.T) { + t.Setenv("LOG_LEVEL", "error") + t.Setenv("NUM_HISTORY_SHARDS", "32") + t.Setenv("FRONTEND_GRPC_PORT", "7777") + }, + expectError: false, + validateConfig: func(t *testing.T, cfg *Config) { + require.Equal(t, "error", cfg.Log.Level) + require.Equal(t, int32(32), cfg.Persistence.NumHistoryShards) + require.Equal(t, 7777, cfg.Services["frontend"].RPC.GRPCPort) + }, + }, + { + name: "invalid yaml returns error", + configContent: invalidYaml, + loadOptions: func(configPath string) []loadOption { + return []loadOption{WithConfigDir(filepath.Dir(configPath))} + }, + expectError: true, + errorContains: "yaml", + }, + { + name: "non-existent directory returns error", + configContent: "", + loadOptions: func(configPath string) []loadOption { + return []loadOption{WithConfigDir("/nonexistent/path")} + }, + expectError: true, + errorContains: "no config files found", + }, + { + name: "non-existent file path returns error", + configContent: "", + loadOptions: func(configPath string) []loadOption { + return []loadOption{WithConfigFile("/nonexistent/path/config.yaml")} + }, + expectError: true, + errorContains: "could not read config file", + }, } -) -func TestLoaderSuite(t *testing.T) { - suite.Run(t, new(LoaderSuite)) + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var configPath string + if tc.configContent != "" { + tempDir := testutils.MkdirTemp(t, "", "load_test") + configPath = filepath.Join(tempDir, "base.yaml") + err := os.WriteFile(configPath, []byte(tc.configContent), fileMode) + require.NoError(t, err) + } + + if tc.setupEnv != nil { + tc.setupEnv(t) + } + + cfg, err := Load(tc.loadOptions(configPath)...) + + if tc.expectError { + require.Error(t, err) + if tc.errorContains != "" { + require.Contains(t, err.Error(), tc.errorContains) + } + if tc.errorContains == "failed to read config file" { + require.Nil(t, cfg) + } + } else { + require.NoError(t, err) + require.NotNil(t, cfg) + if tc.validateConfig != nil { + tc.validateConfig(t, cfg) + } + } + }) + } } -func (s *LoaderSuite) SetupTest() { - s.Assertions = require.New(s.T()) +func createFile(t *testing.T, dir string, file string, uid, uid2 string) { + err := os.WriteFile(path(dir, file), []byte(buildConfig(uid, uid2)), fileMode) + require.NoError(t, err) } -func (s *LoaderSuite) TestBaseYaml() { - dir := testutils.MkdirTemp(s.T(), "", "loader.testBaseYaml") +func path(dir string, file string) string { + return dir + "/" + file +} - data := buildConfig("", "") - err := os.WriteFile(path(dir, "base.yaml"), []byte(data), fileMode) - s.Nil(err) +func buildConfig(uid, uid2 string) string { + base := configBase + if uid != "" { - envs := []string{"", "prod"} - zones := []string{"", "us-east-1a"} + base += strings.ReplaceAll(appendItem1, "REP", uid) + } - for _, env := range envs { - for _, zone := range zones { - var cfg testConfig - err = Load(env, dir, zone, &cfg) - s.Nil(err) - s.Equal("hello__", cfg.Items.Item1) - s.Equal("world__", cfg.Items.Item2) - } + if uid2 != "" { + base += strings.ReplaceAll(appendItem2, "REP", uid2) } + return base } -func (s *LoaderSuite) TestHierarchy() { - dir := testutils.MkdirTemp(s.T(), "", "loader.testHierarchy") - - s.createFile(dir, "base.yaml", "", "") - s.createFile(dir, "development.yaml", "development", "") - s.createFile(dir, "prod.yaml", "prod", "") - s.createFile(dir, "prod_dca.yaml", "prod", "dca") - +func TestPathResolution(t *testing.T) { + // this does not test that the fact that env+zone overrides base and retains non-overridden configs + t.Parallel() testCases := []struct { - env string - zone string - item1 string - item2 string + name string + env string + zone string + before func(t *testing.T) string + level string + level2 string }{ - {"", "", "hello_development_", "world_development_"}, - {"", "dca", "hello_development_", "world_development_"}, - {"", "pdx", "hello_development_", "world_development_"}, - {"development", "", "hello_development_", "world_development_"}, - {"development", "dca", "hello_development_", "world_development_"}, - {"development", "pdx", "hello_development_", "world_development_"}, - {"prod", "", "hello_prod_", "world_prod_"}, - {"prod", "dca", "hello_prod_dca", "world_prod_dca"}, - {"prod", "pdx", "hello_prod_", "world_prod_"}, + { + name: "just base.yaml", + env: "", + zone: "", + before: func(t *testing.T) string { + dir := testutils.MkdirTemp(t, "", "loader.testHierarchy") + createFile(t, dir, "base.yaml", "base", "") + return dir + }, + level: "base", + }, + { + name: "just base.yaml env and zone defined", + env: "prod", + zone: "east", + before: func(t *testing.T) string { + dir := testutils.MkdirTemp(t, "", "loader.testHierarchy") + createFile(t, dir, "base.yaml", "base", "") + return dir + }, + level: "base", + }, + { + name: "base.yaml and prod_east.yaml env and zone defined", + env: "prod", + zone: "east", + before: func(t *testing.T) string { + dir := testutils.MkdirTemp(t, "", "loader.testHierarchy") + createFile(t, dir, "base.yaml", "base", "") + createFile(t, dir, "prod_east.yaml", "prod_east", "") + return dir + }, + level: "prod_east", + }, + { + name: "prod_east.yaml env and zone defined", + env: "prod", + zone: "east", + before: func(t *testing.T) string { + dir := testutils.MkdirTemp(t, "", "loader.testHierarchy") + createFile(t, dir, "prod_east.yaml", "prod_east", "") + return dir + }, + level: "prod_east", + }, + { + name: "base.yaml and development.yaml", + env: "", + zone: "", + before: func(t *testing.T) string { + dir := testutils.MkdirTemp(t, "", "loader.testHierarchy") + createFile(t, dir, "base.yaml", "base", "") + createFile(t, dir, "development.yaml", "development", "") + return dir + }, + level: "development", + }, + { + name: "base.yaml and development.yaml and development_zone.yaml", + env: "", + zone: "zone", + before: func(t *testing.T) string { + dir := testutils.MkdirTemp(t, "", "loader.testHierarchy") + createFile(t, dir, "base.yaml", "base", "") + createFile(t, dir, "development.yaml", "development", "") + createFile(t, dir, "development_zone.yaml", "development_zone", "") + return dir + }, + level: "development_zone", + }, + { + name: "base.yaml and development.yaml and development_zone.yaml", + env: "prod", + zone: "zone", + before: func(t *testing.T) string { + dir := testutils.MkdirTemp(t, "", "loader.testHierarchy") + createFile(t, dir, "base.yaml", "base", "") + createFile(t, dir, "development.yaml", "development", "") + createFile(t, dir, "development_zone.yaml", "development_zone", "") + createFile(t, dir, "prod_zone.yaml", "prod_zone", "") + return dir + }, + level: "prod_zone", + }, + { + name: "env->env+zone combined", + env: "production", + zone: "east", + before: func(t *testing.T) string { + dir := testutils.MkdirTemp(t, "", "loader.testHierarchy") + createFile(t, dir, "production.yaml", "base", "SHOULD NOT") + createFile(t, dir, "production_east.yaml", "", "development") + return dir + }, + level: "base", + level2: "development", + }, } for _, tc := range testCases { - var cfg testConfig - err := Load(tc.env, dir, tc.zone, &cfg) - s.Nil(err) - s.Equal(tc.item1, cfg.Items.Item1) - s.Equal(tc.item2, cfg.Items.Item2) + t.Run(tc.name, func(t *testing.T) { + dir := tc.before(t) + cfg, err := Load( + WithEnv(tc.env), + WithConfigDir(dir), + WithZone(tc.zone), + ) + require.NoError(t, err) + require.Equal(t, tc.level, cfg.NamespaceDefaults.Archival.History.State) + if tc.level2 != "" { + require.Equal(t, tc.level2, cfg.DCRedirectionPolicy.Policy) + } + }) } } -func (s *LoaderSuite) TestInvalidPath() { - var cfg testConfig - err := Load("prod", "", "", &cfg) - s.NotNil(err) -} +const appendItem2 = ` +dcRedirectionPolicy: + policy: REP -func (s *LoaderSuite) createFile(dir string, file string, env string, zone string) { - err := os.WriteFile(path(dir, file), []byte(buildConfig(env, zone)), fileMode) - s.Nil(err) -} +` +const appendItem1 = ` +namespaceDefaults: + archival: + history: + state: REP + URI: "file:///tmp/temporal_archival/development" -func buildConfig(env, zone string) string { - item1 := concat("hello", concat(env, zone)) - item2 := concat("world", concat(env, zone)) - return ` - items: - item1: ` + item1 + ` - item2: ` + item2 -} +` +const configBase = ` +log: + stdout: true + level: info + +persistence: + defaultStore: mysql-default + visibilityStore: mysql-visibility + numHistoryShards: 4 + datastores: + mysql-default: + sql: + pluginName: "mysql8" + databaseName: "temporal" + connectAddr: "127.0.0.1:3306" + connectProtocol: "tcp" + user: "temporal" + password: "temporal" + maxConns: 20 + maxIdleConns: 20 + maxConnLifetime: "1h" + mysql-visibility: + sql: + pluginName: "mysql8" + databaseName: "temporal_visibility" + connectAddr: "127.0.0.1:3306" + connectProtocol: "tcp" + user: "temporal" + password: "temporal" + maxConns: 2 + maxIdleConns: 2 + maxConnLifetime: "1h" + +global: + membership: + maxJoinDuration: 30s + broadcastAddress: "127.0.0.1" + pprof: + port: 7936 + metrics: + prometheus: + framework: "tally" + timerType: "histogram" + listenAddress: "127.0.0.1:8000" + +services: + frontend: + rpc: + grpcPort: 7233 + membershipPort: 6933 + bindOnLocalHost: true + httpPort: 7243 + + matching: + rpc: + grpcPort: 7235 + membershipPort: 6935 + bindOnLocalHost: true + + history: + rpc: + grpcPort: 7234 + membershipPort: 6934 + bindOnLocalHost: true + + worker: + rpc: + grpcPort: 7239 + membershipPort: 6939 + bindOnLocalHost: true + +clusterMetadata: + enableGlobalNamespace: false + failoverVersionIncrement: 10 + masterClusterName: "active" + currentClusterName: "active" + clusterInformation: + active: + enabled: true + initialFailoverVersion: 1 + rpcName: "frontend" + rpcAddress: "localhost:7233" + +archival: + history: + state: "enabled" + enableRead: true + provider: + filestore: + fileMode: "0666" + dirMode: "0766" + gstorage: + credentialsPath: "/tmp/gcloud/keyfile.json" + visibility: + state: "enabled" + enableRead: true + provider: + filestore: + fileMode: "0666" + dirMode: "0766" + + + +dynamicConfigClient: + filepath: "config/dynamicconfig/development-sql.yaml" + pollInterval: "10s"` diff --git a/common/config/localip.go b/common/config/localip.go index 687a3821a89..a66dd4e35dc 100644 --- a/common/config/localip.go +++ b/common/config/localip.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config // ** This code is copied from tchannel, we would like to not take dependency on tchannel code ** diff --git a/common/config/localip_test.go b/common/config/localip_test.go index b1d845ce7e9..5c155fde3c2 100644 --- a/common/config/localip_test.go +++ b/common/config/localip_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config // ** This code is copied from tchannel, we would like to not take dependency on tchannel code ** diff --git a/common/config/persistence.go b/common/config/persistence.go index b8e2d86bdaa..2730b27b0ce 100644 --- a/common/config/persistence.go +++ b/common/config/persistence.go @@ -1,34 +1,14 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config import ( + "bytes" + "context" "errors" "fmt" + "os/exec" "reflect" "strings" + "time" "github.com/gocql/gocql" "go.temporal.io/server/common/persistence/visibility/store/elasticsearch/client" @@ -41,6 +21,8 @@ const ( StoreTypeNoSQL = "nosql" ) +var ErrPersistenceConfig = errors.New("persistence config error") + // DefaultStoreType returns the storeType for the default persistence store func (c *Persistence) DefaultStoreType() string { if c.DataStores[c.DefaultStore].SQL != nil { @@ -58,84 +40,58 @@ func (c *Persistence) Validate() error { if c.SecondaryVisibilityStore != "" { stores = append(stores, c.SecondaryVisibilityStore) } - if c.AdvancedVisibilityStore != "" { - stores = append(stores, c.AdvancedVisibilityStore) - } // There are 3 config keys: // - visibilityStore: can set any data store // - secondaryVisibilityStore: can set any data store - // - advancedVisibilityStore: can only set elasticsearch data store // If visibilityStore is set, then it's always the primary. // If secondaryVisibilityStore is set, it's always the secondary. // // Valid dual visibility combinations (order: primary, secondary): // - visibilityStore (advanced sql), secondaryVisibilityStore (advanced sql) // - visibilityStore (es), visibilityStore (es) [via elasticsearch.indices config] - // - advancedVisibilityStore (es), advancedVisibilityStore (es) [via elasticsearch.indices config] + // - visibilityStore (es), secondaryVisibilityStore (es) // // Invalid dual visibility combinations: - // - visibilityStore (advanced sql), secondaryVisibilityStore (standard, es) - // - visibilityStore (advanced sql), advancedVisibilityStore (es) - // - visibilityStore (es), secondaryVisibilityStore (any) - // - visibilityStore (es), advancedVisibilityStore (es) - // - advancedVisibilityStore (es), secondaryVisibilityStore (any) - // - // The validation for dual visibility pair (advanced sql, advanced sql) is in visibility factory - // due to circular dependency. This will be better after standard visibility is removed. + // - visibilityStore (advanced sql), secondaryVisibilityStore (es) + // - visibilityStore (es), secondaryVisibilityStore (advanced sql) - if c.VisibilityStore == "" && c.AdvancedVisibilityStore == "" { - return errors.New("persistence config: visibilityStore must be specified") - } - if c.SecondaryVisibilityStore != "" && c.AdvancedVisibilityStore != "" { - return errors.New( - "persistence config: cannot specify both secondaryVisibilityStore and " + - "advancedVisibilityStore", - ) + if c.VisibilityStore == "" { + return fmt.Errorf("%w: visibilityStore must be specified", ErrPersistenceConfig) } - if c.AdvancedVisibilityStore != "" && c.DataStores[c.AdvancedVisibilityStore].Elasticsearch == nil { - return fmt.Errorf( - "persistence config: advanced visibility datastore %q: missing elasticsearch config", - c.AdvancedVisibilityStore, - ) - } - if c.DataStores[c.VisibilityStore].Elasticsearch != nil && - (c.SecondaryVisibilityStore != "" || c.AdvancedVisibilityStore != "") { - return errors.New( - "persistence config: cannot set secondaryVisibilityStore or advancedVisibilityStore " + - "when visibilityStore is setting elasticsearch datastore", - ) - } - if c.DataStores[c.SecondaryVisibilityStore].Elasticsearch.GetSecondaryVisibilityIndex() != "" { - return fmt.Errorf( - "persistence config: secondary visibility datastore %q: elasticsearch config: "+ - "cannot set secondary_visibility", - c.SecondaryVisibilityStore, - ) + if c.SecondaryVisibilityStore != "" { + isAnyCustom := c.DataStores[c.VisibilityStore].CustomDataStoreConfig != nil || + c.DataStores[c.SecondaryVisibilityStore].CustomDataStoreConfig != nil + isPrimaryEs := c.DataStores[c.VisibilityStore].Elasticsearch != nil + isSecondaryEs := c.DataStores[c.SecondaryVisibilityStore].Elasticsearch != nil + if !isAnyCustom && isPrimaryEs != isSecondaryEs { + return fmt.Errorf( + "%w: cannot set visibilityStore and secondaryVisibilityStore with different datastore types", + ErrPersistenceConfig) + } + if c.DataStores[c.VisibilityStore].Elasticsearch.GetSecondaryVisibilityIndex() != "" { + return fmt.Errorf( + "%w: cannot set secondaryVisibilityStore "+ + "when visibilityStore is setting Elasticsearch secondary visibility index", + ErrPersistenceConfig) + } + if c.DataStores[c.SecondaryVisibilityStore].Elasticsearch.GetSecondaryVisibilityIndex() != "" { + return fmt.Errorf( + "%w: secondary visibility datastore %q cannot set secondary_visibility", + ErrPersistenceConfig, + c.SecondaryVisibilityStore) + } } - cntEsConfigs := 0 for _, st := range stores { ds, ok := c.DataStores[st] if !ok { - return fmt.Errorf("persistence config: missing config for datastore %q", st) + return fmt.Errorf("%w: missing config for datastore %q", ErrPersistenceConfig, st) } if err := ds.Validate(); err != nil { - return fmt.Errorf("persistence config: datastore %q: %s", st, err.Error()) - } - if ds.Elasticsearch != nil { - cntEsConfigs++ + return fmt.Errorf("%w: datastore %q: %s", ErrPersistenceConfig, st, err.Error()) } } - - if cntEsConfigs > 1 { - return fmt.Errorf( - "persistence config: cannot have more than one Elasticsearch visibility store config " + - "(use `elasticsearch.indices.secondary_visibility` config key if you need to set a " + - "secondary Elasticsearch visibility store)", - ) - } - return nil } @@ -149,25 +105,18 @@ func (c *Persistence) SecondaryVisibilityConfigExist() bool { return c.SecondaryVisibilityStore != "" } -// AdvancedVisibilityConfigExist returns whether user specified advancedVisibilityStore in config -func (c *Persistence) AdvancedVisibilityConfigExist() bool { - return c.AdvancedVisibilityStore != "" -} - func (c *Persistence) IsSQLVisibilityStore() bool { return (c.VisibilityConfigExist() && c.DataStores[c.VisibilityStore].SQL != nil) || (c.SecondaryVisibilityConfigExist() && c.DataStores[c.SecondaryVisibilityStore].SQL != nil) } +func (c *Persistence) IsCustomVisibilityStore() bool { + return c.GetVisibilityStoreConfig().CustomDataStoreConfig != nil || + c.GetSecondaryVisibilityStoreConfig().CustomDataStoreConfig != nil +} + func (c *Persistence) GetVisibilityStoreConfig() DataStore { - if c.VisibilityStore != "" { - return c.DataStores[c.VisibilityStore] - } - if c.AdvancedVisibilityStore != "" { - return c.DataStores[c.AdvancedVisibilityStore] - } - // Based on validation above, this should never happen. - return DataStore{} + return c.DataStores[c.VisibilityStore] } func (c *Persistence) GetSecondaryVisibilityStoreConfig() DataStore { @@ -175,9 +124,6 @@ func (c *Persistence) GetSecondaryVisibilityStoreConfig() DataStore { return c.DataStores[c.SecondaryVisibilityStore] } if c.VisibilityStore != "" { - if c.AdvancedVisibilityStore != "" { - return c.DataStores[c.AdvancedVisibilityStore] - } ds := c.DataStores[c.VisibilityStore] if ds.Elasticsearch != nil && ds.Elasticsearch.GetSecondaryVisibilityIndex() != "" { esConfig := *ds.Elasticsearch @@ -188,17 +134,6 @@ func (c *Persistence) GetSecondaryVisibilityStoreConfig() DataStore { return ds } } - if c.AdvancedVisibilityStore != "" { - ds := c.DataStores[c.AdvancedVisibilityStore] - if ds.Elasticsearch != nil && ds.Elasticsearch.GetSecondaryVisibilityIndex() != "" { - esConfig := *ds.Elasticsearch - esConfig.Indices = map[string]string{ - client.VisibilityAppName: ds.Elasticsearch.GetSecondaryVisibilityIndex(), - } - ds.Elasticsearch = &esConfig - return ds - } - } return DataStore{} } @@ -210,6 +145,8 @@ func (ds *DataStore) GetIndexName() string { return ds.Cassandra.Keyspace case ds.Elasticsearch != nil: return ds.Elasticsearch.GetVisibilityIndex() + case ds.CustomDataStoreConfig != nil: + return ds.CustomDataStoreConfig.IndexName default: return "" } @@ -237,8 +174,13 @@ func (ds *DataStore) Validate() error { ) } - if ds.SQL != nil && ds.SQL.TaskScanPartitions == 0 { - ds.SQL.TaskScanPartitions = 1 + if ds.SQL != nil { + if ds.SQL.TaskScanPartitions == 0 { + ds.SQL.TaskScanPartitions = 1 + } + if err := ds.SQL.validate(); err != nil { + return err + } } if ds.Cassandra != nil { if err := ds.Cassandra.validate(); err != nil { @@ -338,3 +280,44 @@ func parseSerialConsistency(serialConsistency string) (gocql.SerialConsistency, err := s.UnmarshalText([]byte(strings.ToUpper(serialConsistency))) return s, err } + +func (c *SQL) validate() error { + if c.PasswordCommand != nil && c.Password != "" { + return errors.New("passwordCommand and password are mutually exclusive") + } + if c.PasswordCommand != nil && c.PasswordCommand.Command == "" { + return errors.New("passwordCommand.command must not be empty") + } + return nil +} + +const ( + defaultPasswordCommandTimeout = 30 * time.Second + passwordCommandWaitDelay = 5 * time.Second +) + +// ResolvePassword returns the database password, either from the static Password +// field or by executing PasswordCommand. If neither is set, it returns an empty string. +func (c *SQL) ResolvePassword() (string, error) { + if c.PasswordCommand == nil { + return c.Password, nil + } + timeout := c.PasswordCommand.Timeout + if timeout == 0 { + timeout = defaultPasswordCommandTimeout + } + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + cmd := exec.CommandContext(ctx, c.PasswordCommand.Command, c.PasswordCommand.Args...) //nolint:gosec + // WaitDelay caps how long we block on the stdout pipe after the process is killed. + // Without it, a subprocess that inherits the pipe could keep it open indefinitely. + cmd.WaitDelay = passwordCommandWaitDelay + var stderr bytes.Buffer + cmd.Stderr = &stderr + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("passwordCommand %q %v failed: %w (stderr: %s)", + c.PasswordCommand.Command, c.PasswordCommand.Args, err, stderr.String()) + } + return strings.TrimRight(string(out), "\n\r"), nil +} diff --git a/common/config/persistence_test.go b/common/config/persistence_test.go index 34eaf23883d..94f385bb25b 100644 --- a/common/config/persistence_test.go +++ b/common/config/persistence_test.go @@ -1,36 +1,114 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package config import ( "reflect" "testing" + "time" "github.com/gocql/gocql" + "github.com/stretchr/testify/require" ) +func TestSQLValidate_MutualExclusivity(t *testing.T) { + cfg := &SQL{ + Password: "static", + PasswordCommand: &PasswordCommandConfig{ + Command: "echo", + Args: []string{"dynamic"}, + }, + } + err := cfg.validate() + require.ErrorContains(t, err, "mutually exclusive") +} + +func TestSQLValidate_PasswordOnly(t *testing.T) { + cfg := &SQL{Password: "static"} + err := cfg.validate() + require.NoError(t, err) +} + +func TestSQLValidate_PasswordCommandOnly(t *testing.T) { + cfg := &SQL{ + PasswordCommand: &PasswordCommandConfig{ + Command: "echo", + Args: []string{"dynamic"}, + }, + } + err := cfg.validate() + require.NoError(t, err) +} + +func TestSQLValidate_PasswordCommandEmptyCommand(t *testing.T) { + cfg := &SQL{ + PasswordCommand: &PasswordCommandConfig{}, + } + err := cfg.validate() + require.ErrorContains(t, err, "passwordCommand.command must not be empty") +} + +func TestSQLResolvePassword_Static(t *testing.T) { + cfg := &SQL{Password: "static-pass"} + pw, err := cfg.ResolvePassword() + require.NoError(t, err) + require.Equal(t, "static-pass", pw) +} + +func TestSQLResolvePassword_EmptyWhenNothingSet(t *testing.T) { + cfg := &SQL{} + pw, err := cfg.ResolvePassword() + require.NoError(t, err) + require.Empty(t, pw) +} + +func TestSQLResolvePassword_Command(t *testing.T) { + cfg := &SQL{ + PasswordCommand: &PasswordCommandConfig{ + Command: "echo", + Args: []string{"hello"}, + }, + } + pw, err := cfg.ResolvePassword() + require.NoError(t, err) + require.Equal(t, "hello", pw) +} + +func TestSQLResolvePassword_CommandTrimsTrailingNewline(t *testing.T) { + cfg := &SQL{ + PasswordCommand: &PasswordCommandConfig{ + Command: "printf", + Args: []string{"hello\n\n"}, + }, + } + pw, err := cfg.ResolvePassword() + require.NoError(t, err) + require.Equal(t, "hello", pw) +} + +func TestSQLResolvePassword_CommandFailure(t *testing.T) { + cfg := &SQL{ + PasswordCommand: &PasswordCommandConfig{ + Command: "false", + }, + } + _, err := cfg.ResolvePassword() + require.ErrorContains(t, err, "passwordCommand") +} + +func TestSQLResolvePassword_CommandTimeout(t *testing.T) { + cfg := &SQL{ + PasswordCommand: &PasswordCommandConfig{ + Command: "sleep", + Args: []string{"10"}, + Timeout: 10 * time.Millisecond, + }, + } + start := time.Now() + _, err := cfg.ResolvePassword() + elapsed := time.Since(start) + require.ErrorContains(t, err, "passwordCommand") + require.Less(t, elapsed, 5*time.Second, "command should have been killed by timeout") +} + func TestCassandraStoreConsistency_GetConsistency(t *testing.T) { t.Parallel() diff --git a/common/config/template_coverage_test.go b/common/config/template_coverage_test.go new file mode 100644 index 00000000000..d912156bec0 --- /dev/null +++ b/common/config/template_coverage_test.go @@ -0,0 +1 @@ +package config diff --git a/common/config/validator.go b/common/config/validator.go new file mode 100644 index 00000000000..093ab29a769 --- /dev/null +++ b/common/config/validator.go @@ -0,0 +1,49 @@ +package config + +import ( + "reflect" + + enumspb "go.temporal.io/api/enums/v1" + "gopkg.in/validator.v2" +) + +func newValidator() *validator.Validator { + validate := validator.NewValidator() + _ = validate.SetValidationFunc("persistence_custom_search_attributes", validatePersistenceCustomSearchAttributes) + return validate +} + +func validatePersistenceCustomSearchAttributes(v any, param string) error { + st := reflect.ValueOf(v) + switch st.Kind() { + case reflect.Map: + iter := st.MapRange() + for iter.Next() { + // key must be a string and a valid search attribute type + key := iter.Key() + if key.Kind() != reflect.String { + return validator.ErrUnsupported + } + if enumspb.IndexedValueType_shorthandValue[key.String()] == 0 { + return validator.ErrInvalid + } + // value must an integer and between 0 and 99 + val := iter.Value() + var num int64 + if val.CanInt() { + num = val.Int() + } else if val.CanUint() { + num = int64(val.Uint()) + } else { + return validator.ErrUnsupported + } + if num < 0 || num > 99 { + return validator.ErrInvalid + } + } + + default: + return validator.ErrUnsupported + } + return nil +} diff --git a/common/config/validator_test.go b/common/config/validator_test.go new file mode 100644 index 00000000000..9ef9af83fa8 --- /dev/null +++ b/common/config/validator_test.go @@ -0,0 +1,78 @@ +package config + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestValidateVisibilityConfig(t *testing.T) { + testCases := []struct { + name string + in Visibility + err bool + }{ + { + name: "success", + in: Visibility{ + PersistenceCustomSearchAttributes: map[string]int{ + "Bool": 5, + "Keyword": 2, + }, + }, + err: false, + }, + { + name: "invalid search attribute type", + in: Visibility{ + PersistenceCustomSearchAttributes: map[string]int{ + "Bool": 5, + "Foo": 2, + }, + }, + err: true, + }, + { + name: "invalid unspecified", + in: Visibility{ + PersistenceCustomSearchAttributes: map[string]int{ + "Bool": 5, + "Unspecified": 2, + }, + }, + err: true, + }, + { + name: "invalid negative number", + in: Visibility{ + PersistenceCustomSearchAttributes: map[string]int{ + "Bool": 5, + "Keyword": -2, + }, + }, + err: true, + }, + { + name: "invalid large number", + in: Visibility{ + PersistenceCustomSearchAttributes: map[string]int{ + "Bool": 5, + "Keyword": 100, + }, + }, + err: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + validate := newValidator() + err := validate.Validate(tc.in) + if tc.err { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/common/constants.go b/common/constants.go index 666c826644e..d40c79fdcaa 100644 --- a/common/constants.go +++ b/common/constants.go @@ -1,34 +1,8 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package common import ( "math" "time" - - "go.temporal.io/server/common/debug" ) const ( @@ -57,45 +31,25 @@ const ( ) const ( - // GetHistoryMaxPageSize is the max page size for get history - GetHistoryMaxPageSize = 256 - // ReadDLQMessagesPageSize is the max page size for read DLQ messages - ReadDLQMessagesPageSize = 1000 -) - -const ( - // MinLongPollTimeout is the minimum context timeout for long poll API, below which - // the request won't be processed + // DefaultLongPollTimeout is the default context timeout for a long poll request. + DefaultLongPollTimeout = time.Second * 60 + // DefaultLongPollBuffer is the buffer used to adjust a long poll request timeout. + // Specifically, long poll requests are timed out at a time which leaves at least the buffer's duration + // remaining before the caller's deadline, if permitted by the caller's deadline. + DefaultLongPollBuffer = time.Second + // MinLongPollTimeout is the minimum context timeout for a long poll request, below which + // the request won't be processed. MinLongPollTimeout = time.Second * 2 - // CriticalLongPollTimeout is a threshold for the context timeout passed into long poll API, + // CriticalLongPollTimeout is a threshold for the context timeout passed into a long poll request, // below which a warning will be logged - CriticalLongPollTimeout = time.Second * 20 -) - -const ( - // DefaultWorkflowTaskTimeout sets the Default Workflow Task timeout for a Workflow - DefaultWorkflowTaskTimeout = 10 * time.Second * debug.TimeoutMultiplier - - // MaxWorkflowTaskStartToCloseTimeout sets the Max Workflow Task start to close timeout for a Workflow - MaxWorkflowTaskStartToCloseTimeout = 120 * time.Second -) - -const ( - // DefaultTransactionSizeLimit is the largest allowed transaction size to persistence - DefaultTransactionSizeLimit = 4 * 1024 * 1024 -) - -const ( - // TimeoutFailureTypePrefix is the prefix for timeout failure types - // used in retry policy - // the actual failure type will be prefix + enums.TimeoutType.String() - // e.g. "TemporalTimeout:StartToClose" or "TemporalTimeout:Heartbeat" - TimeoutFailureTypePrefix = "TemporalTimeout:" + CriticalLongPollTimeout = time.Second * 10 ) const ( // Limit for schedule notes field ScheduleNotesSizeLimit = 1000 + + ScheduledTaskMinPrecision = time.Millisecond ) const ( diff --git a/common/contextutil/deadline.go b/common/contextutil/deadline.go new file mode 100644 index 00000000000..3aa74d30651 --- /dev/null +++ b/common/contextutil/deadline.go @@ -0,0 +1,39 @@ +package contextutil + +import ( + "context" + "time" +) + +var noop = func() {} + +// WithDeadlineBuffer returns a child context with a deadline that ensures that at least buffer +// amount of time remains after the child deadline expires and before the parent deadline expires. +// The returned context timeout is therefore <= the requested timeout. If the parent deadline itself +// does not allow buffer amount of time, then the returned context deadline expires immediately. Use +// this method to create child context when the child cannot use all of parent's deadline but +// instead there is a need to leave some time for parent to do some post-work. +func WithDeadlineBuffer( + parent context.Context, + timeout time.Duration, + buffer time.Duration, +) (context.Context, context.CancelFunc) { + if parent.Err() != nil { + return parent, noop + } + + parentDeadline, parentHasDeadline := parent.Deadline() + + if !parentHasDeadline { + // No parent deadline, so buffer is available to parent after child deadline expiry. + return context.WithTimeout(parent, timeout) + } + + // If parent deadline itself does not allow buffer then set child timeout to zero. Otherwise + // compute child deadline such that at least buffer remains after it and before parent deadline. + remaining := time.Until(parentDeadline) - buffer + if remaining < timeout { + timeout = max(0, remaining) + } + return context.WithTimeout(parent, timeout) +} diff --git a/common/contextutil/deadline_test.go b/common/contextutil/deadline_test.go new file mode 100644 index 00000000000..326bb02cfd2 --- /dev/null +++ b/common/contextutil/deadline_test.go @@ -0,0 +1,59 @@ +package contextutil + +import ( + "context" + "math" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +const testTolerance = 5 * time.Second + +func TestWithDeadlineBuffer(t *testing.T) { + const timeout = 10 * time.Minute + const buffer = 1 * time.Minute + start := time.Now() + + t.Run("parent is cancelled", func(t *testing.T) { + parent, cancel := context.WithCancel(context.Background()) + cancel() + + child, _ := WithDeadlineBuffer(parent, timeout, buffer) + require.Equal(t, parent, child) + }) + + t.Run("parent has no deadline", func(t *testing.T) { + parent := context.Background() + + t.Run("timeout specified", func(t *testing.T) { + child, _ := WithDeadlineBuffer(parent, timeout, 0) + dl, _ := child.Deadline() + require.WithinDuration(t, start.Add(timeout), dl, testTolerance) + }) + }) + + t.Run("parent has deadline", func(t *testing.T) { + parent, parentCancel := context.WithTimeout(context.Background(), timeout) + defer parentCancel() + parentDeadline, _ := parent.Deadline() + + t.Run("enough buffer left", func(t *testing.T) { + child, _ := WithDeadlineBuffer(parent, math.MaxInt, buffer) + dl, _ := child.Deadline() + require.WithinDuration(t, parentDeadline.Add(-buffer), dl, testTolerance) + }) + + t.Run("no buffer left", func(t *testing.T) { + child, _ := WithDeadlineBuffer(parent, math.MaxInt, math.MaxInt) + require.Equal(t, child.Err(), context.DeadlineExceeded) + }) + + t.Run("enough buffer left but less than max timeout", func(t *testing.T) { + child, _ := WithDeadlineBuffer(parent, timeout/2, buffer) + dl, _ := child.Deadline() + require.WithinDuration(t, parentDeadline.Add(-timeout/2), dl, testTolerance) + }) + }) +} diff --git a/common/contextutil/metadata.go b/common/contextutil/metadata.go new file mode 100644 index 00000000000..6717f22d36c --- /dev/null +++ b/common/contextutil/metadata.go @@ -0,0 +1,187 @@ +package contextutil + +import ( + "context" + "strconv" + "strings" + "sync" +) + +type ( + metadataContextKey struct{} + + // metadataContext is used to store workflow and activity metadata + metadataContext struct { + sync.Mutex + Metadata map[string]any + MarkedActivityIDs map[string]struct{} + } +) + +var metadataCtxKey = metadataContextKey{} + +const ( + // MetadataKeyWorkflowType is the context metadata key for workflow type + MetadataKeyWorkflowType = "workflow-type" + // MetadataKeyWorkflowTaskQueue is the context metadata key for workflow task queue + MetadataKeyWorkflowTaskQueue = "workflow-task-queue" + // MetadataKeyStandaloneActivityType is the context metadata key for standalone activity type. + MetadataKeyStandaloneActivityType = "standalone-activity-type" + // MetadataKeyStandaloneActivityTaskQueue is the context metadata key for standalone activity task queue. + MetadataKeyStandaloneActivityTaskQueue = "standalone-activity-task-queue" + + activityTypePrefix = "activity-type-" + activityTaskQueuePrefix = "activity-task-queue-" +) + +// ActivityTypeKey returns the metadata key for an activity's type, keyed by scheduled event ID. +func ActivityTypeKey(scheduledEventID int64) string { + return activityTypePrefix + strconv.FormatInt(scheduledEventID, 10) +} + +// ActivityTaskQueueKey returns the metadata key for an activity's task queue, keyed by scheduled event ID. +func ActivityTaskQueueKey(scheduledEventID int64) string { + return activityTaskQueuePrefix + strconv.FormatInt(scheduledEventID, 10) +} + +// ContextMetadataGetActivityTypeAndTaskQueue scans the context metadata for a single +// activity's type and task queue. Returns false if no activity metadata is found +// or if multiple activities are present. +func ContextMetadataGetActivityTypeAndTaskQueue(ctx context.Context) (activityType string, taskQueue string, ok bool) { + metadataCtx := getMetadataContext(ctx) + if metadataCtx == nil { + return "", "", false + } + + metadataCtx.Lock() + defer metadataCtx.Unlock() + + var foundType, foundTaskQueue bool + for key, value := range metadataCtx.Metadata { + if strings.HasPrefix(key, activityTypePrefix) { + if foundType { + return "", "", false + } + activityType, foundType = value.(string) + } else if strings.HasPrefix(key, activityTaskQueuePrefix) { + if foundTaskQueue { + return "", "", false + } + taskQueue, foundTaskQueue = value.(string) + } + } + + return activityType, taskQueue, foundType && foundTaskQueue +} + +// ContextMetadataMarkActivityID marks an activity ID on the context for metadata resolution. +// The handler knows which activity (from the task token) but not its type or task queue. +// Mutable state knows the activity details but not which activity the request targets. +// This bridges the two: the handler marks the ID, and SetContextMetadata (during +// closeTransaction) resolves it to type and task queue from mutable state. +// Cannot be used for transactions that remove the activity from mutable state +// (e.g., activity completion), since it won't be available for resolution. +func ContextMetadataMarkActivityID(ctx context.Context, activityID string) bool { + metadataCtx := getMetadataContext(ctx) + if metadataCtx == nil { + return false + } + metadataCtx.Lock() + defer metadataCtx.Unlock() + metadataCtx.MarkedActivityIDs[activityID] = struct{}{} + return true +} + +// ContextMetadataGetMarkedActivityIDs returns the marked activity IDs from the context. +func ContextMetadataGetMarkedActivityIDs(ctx context.Context) []string { + metadataCtx := getMetadataContext(ctx) + if metadataCtx == nil { + return nil + } + + metadataCtx.Lock() + defer metadataCtx.Unlock() + + if len(metadataCtx.MarkedActivityIDs) == 0 { + return nil + } + ids := make([]string, 0, len(metadataCtx.MarkedActivityIDs)) + for id := range metadataCtx.MarkedActivityIDs { + ids = append(ids, id) + } + return ids +} + +// getMetadataContext extracts metadata context from golang context. +func getMetadataContext(ctx context.Context) *metadataContext { + metadataCtx := ctx.Value(metadataCtxKey) + if metadataCtx == nil { + return nil + } + mc, ok := metadataCtx.(*metadataContext) + if !ok { + return nil + } + return mc +} + +// WithMetadataContext adds a metadata context to the given context. +func WithMetadataContext(ctx context.Context) context.Context { + metadataCtx := &metadataContext{ + Metadata: make(map[string]any), + MarkedActivityIDs: make(map[string]struct{}), + } + return context.WithValue(ctx, metadataCtxKey, metadataCtx) +} + +// ContextHasMetadata returns true if the context has metadata support. +// This can be used to debug whether a context has been properly initialized with metadata. +func ContextHasMetadata(ctx context.Context) bool { + return getMetadataContext(ctx) != nil +} + +// ContextMetadataSet sets a metadata key-value pair in the context, overwriting any existing value. +func ContextMetadataSet(ctx context.Context, key string, value any) bool { + metadataCtx := getMetadataContext(ctx) + if metadataCtx == nil { + return false + } + + metadataCtx.Lock() + defer metadataCtx.Unlock() + + metadataCtx.Metadata[key] = value + return true +} + +// ContextMetadataGet retrieves a metadata value from the context. +func ContextMetadataGet(ctx context.Context, key string) (any, bool) { + metadataCtx := getMetadataContext(ctx) + if metadataCtx == nil { + return nil, false + } + + metadataCtx.Lock() + defer metadataCtx.Unlock() + + value, ok := metadataCtx.Metadata[key] + return value, ok +} + +// ContextMetadataGetAll retrieves all metadata from the context as a map copy. +func ContextMetadataGetAll(ctx context.Context) map[string]any { + metadataCtx := getMetadataContext(ctx) + if metadataCtx == nil { + return nil + } + + metadataCtx.Lock() + defer metadataCtx.Unlock() + + // Return a copy to prevent external modifications + result := make(map[string]any, len(metadataCtx.Metadata)) + for k, v := range metadataCtx.Metadata { + result[k] = v + } + return result +} diff --git a/common/contextutil/metadata_test.go b/common/contextutil/metadata_test.go new file mode 100644 index 00000000000..1b9d5c758c1 --- /dev/null +++ b/common/contextutil/metadata_test.go @@ -0,0 +1,541 @@ +package contextutil + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAddMetadataContext(t *testing.T) { + t.Run("adds metadata context to empty context", func(t *testing.T) { + ctx := context.Background() + ctx = WithMetadataContext(ctx) + + metadataCtx := getMetadataContext(ctx) + require.NotNil(t, metadataCtx) + require.NotNil(t, metadataCtx.Metadata) + require.Empty(t, metadataCtx.Metadata) + }) + + t.Run("returns new context with metadata", func(t *testing.T) { + ctx := context.Background() + ctxWithMetadata := WithMetadataContext(ctx) + + require.NotEqual(t, ctx, ctxWithMetadata) + require.Nil(t, getMetadataContext(ctx)) + require.NotNil(t, getMetadataContext(ctxWithMetadata)) + }) +} + +func TestContextMetadataSet(t *testing.T) { + t.Run("sets string value successfully", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + + success := ContextMetadataSet(ctx, "key1", "value1") + require.True(t, success) + + metadataCtx := getMetadataContext(ctx) + require.NotNil(t, metadataCtx) + require.Equal(t, "value1", metadataCtx.Metadata["key1"]) + }) + + t.Run("sets multiple values successfully", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + + ContextMetadataSet(ctx, "key1", "value1") + ContextMetadataSet(ctx, "key2", 42) + ContextMetadataSet(ctx, "key3", true) + + metadataCtx := getMetadataContext(ctx) + require.NotNil(t, metadataCtx) + require.Equal(t, "value1", metadataCtx.Metadata["key1"]) + require.Equal(t, 42, metadataCtx.Metadata["key2"]) + require.Equal(t, true, metadataCtx.Metadata["key3"]) + }) + + t.Run("overwrites existing value", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + + ContextMetadataSet(ctx, "key1", "value1") + ContextMetadataSet(ctx, "key1", "value2") + + metadataCtx := getMetadataContext(ctx) + require.NotNil(t, metadataCtx) + require.Equal(t, "value2", metadataCtx.Metadata["key1"]) + }) + + t.Run("returns false when context has no metadata", func(t *testing.T) { + ctx := context.Background() + + success := ContextMetadataSet(ctx, "key1", "value1") + require.False(t, success) + }) + + t.Run("supports various value types", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + + type customStruct struct { + Field string + } + + testCases := []struct { + key string + value any + }{ + {"string", "test"}, + {"int", 123}, + {"float", 3.14}, + {"bool", true}, + {"slice", []string{"a", "b", "c"}}, + {"map", map[string]int{"a": 1, "b": 2}}, + {"struct", customStruct{Field: "test"}}, + {"nil", nil}, + } + + for _, tc := range testCases { + t.Run(tc.key, func(t *testing.T) { + success := ContextMetadataSet(ctx, tc.key, tc.value) + require.True(t, success) + + metadataCtx := getMetadataContext(ctx) + require.NotNil(t, metadataCtx) + require.Equal(t, tc.value, metadataCtx.Metadata[tc.key]) + }) + } + }) +} + +func TestContextMetadataGet(t *testing.T) { + t.Run("retrieves existing value", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, "key1", "value1") + + value, ok := ContextMetadataGet(ctx, "key1") + require.True(t, ok) + require.Equal(t, "value1", value) + }) + + t.Run("returns false for non-existent key", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + + value, ok := ContextMetadataGet(ctx, "nonexistent") + require.False(t, ok) + require.Nil(t, value) + }) + + t.Run("returns false when context has no metadata", func(t *testing.T) { + ctx := context.Background() + + value, ok := ContextMetadataGet(ctx, "key1") + require.False(t, ok) + require.Nil(t, value) + }) + + t.Run("retrieves nil value correctly", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, "nilKey", nil) + + value, ok := ContextMetadataGet(ctx, "nilKey") + require.True(t, ok) + require.Nil(t, value) + }) + + t.Run("retrieves various value types", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + + ContextMetadataSet(ctx, "string", "test") + ContextMetadataSet(ctx, "int", 42) + ContextMetadataSet(ctx, "slice", []int{1, 2, 3}) + + strVal, ok := ContextMetadataGet(ctx, "string") + require.True(t, ok) + require.Equal(t, "test", strVal) + + intVal, ok := ContextMetadataGet(ctx, "int") + require.True(t, ok) + require.Equal(t, 42, intVal) + + sliceVal, ok := ContextMetadataGet(ctx, "slice") + require.True(t, ok) + require.Equal(t, []int{1, 2, 3}, sliceVal) + }) +} + +func TestContextMetadataGetAll(t *testing.T) { + t.Run("retrieves all metadata", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, "key1", "value1") + ContextMetadataSet(ctx, "key2", 42) + + allMetadata := ContextMetadataGetAll(ctx) + require.NotNil(t, allMetadata) + require.Len(t, allMetadata, 2) + require.Equal(t, "value1", allMetadata["key1"]) + require.Equal(t, 42, allMetadata["key2"]) + }) + + t.Run("returns nil when context has no metadata", func(t *testing.T) { + ctx := context.Background() + + allMetadata := ContextMetadataGetAll(ctx) + require.Nil(t, allMetadata) + }) + + t.Run("returns empty map when no metadata set", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + + allMetadata := ContextMetadataGetAll(ctx) + require.NotNil(t, allMetadata) + require.Empty(t, allMetadata) + }) + + t.Run("returned map is a copy", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, "key1", "value1") + + allMetadata := ContextMetadataGetAll(ctx) + allMetadata["key2"] = "value2" + + // Original should not be affected + _, ok := ContextMetadataGet(ctx, "key2") + require.False(t, ok) + }) +} + +func TestGetMetadataContext(t *testing.T) { + t.Run("returns nil for context without metadata", func(t *testing.T) { + ctx := context.Background() + metadataCtx := getMetadataContext(ctx) + require.Nil(t, metadataCtx) + }) + + t.Run("returns metadata context when present", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + metadataCtx := getMetadataContext(ctx) + require.NotNil(t, metadataCtx) + }) + + t.Run("returns nil for wrong type in context", func(t *testing.T) { + ctx := context.WithValue(context.Background(), metadataCtxKey, "wrong type") + metadataCtx := getMetadataContext(ctx) + require.Nil(t, metadataCtx) + }) +} + +func TestMetadataContextWithContextCancellation(t *testing.T) { + t.Run("metadata survives context cancellation", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + ctx = WithMetadataContext(ctx) + + ContextMetadataSet(ctx, "key1", "value1") + + // Cancel the context + cancel() + + // Metadata should still be accessible + value, ok := ContextMetadataGet(ctx, "key1") + require.True(t, ok) + require.Equal(t, "value1", value) + + // Should still be able to set new values + success := ContextMetadataSet(ctx, "key2", "value2") + require.True(t, success) + + value, ok = ContextMetadataGet(ctx, "key2") + require.True(t, ok) + require.Equal(t, "value2", value) + }) +} + +func TestMetadataContextIsolation(t *testing.T) { + t.Run("contexts with different metadata are isolated", func(t *testing.T) { + ctx1 := WithMetadataContext(context.Background()) + ctx2 := WithMetadataContext(context.Background()) + + ContextMetadataSet(ctx1, "key", "value1") + ContextMetadataSet(ctx2, "key", "value2") + + value1, ok1 := ContextMetadataGet(ctx1, "key") + value2, ok2 := ContextMetadataGet(ctx2, "key") + + require.True(t, ok1) + require.True(t, ok2) + require.Equal(t, "value1", value1) + require.Equal(t, "value2", value2) + }) + + t.Run("child context does not inherit parent metadata", func(t *testing.T) { + parentCtx := WithMetadataContext(context.Background()) + ContextMetadataSet(parentCtx, "key", "parent-value") + + type testContextKey string + childCtx := context.WithValue(parentCtx, testContextKey("other-key"), "other-value") + + // Child can still access parent's metadata context + value, ok := ContextMetadataGet(childCtx, "key") + require.True(t, ok) + require.Equal(t, "parent-value", value) + + // Setting in child affects parent (same metadata context) + ContextMetadataSet(childCtx, "key2", "child-value") + value, ok = ContextMetadataGet(parentCtx, "key2") + require.True(t, ok) + require.Equal(t, "child-value", value) + }) + + t.Run("adding metadata context twice creates new isolated context", func(t *testing.T) { + ctx1 := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx1, "key", "value1") + + ctx2 := WithMetadataContext(ctx1) + ContextMetadataSet(ctx2, "key", "value2") + + value1, ok1 := ContextMetadataGet(ctx1, "key") + value2, ok2 := ContextMetadataGet(ctx2, "key") + + require.True(t, ok1) + require.True(t, ok2) + require.Equal(t, "value1", value1) + require.Equal(t, "value2", value2) + }) +} + +func TestContextHasMetadata(t *testing.T) { + t.Run("returns true when context has metadata", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + require.True(t, ContextHasMetadata(ctx)) + }) + + t.Run("returns false for context without metadata", func(t *testing.T) { + ctx := context.Background() + require.False(t, ContextHasMetadata(ctx)) + }) + + t.Run("returns true after setting metadata values", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, "key1", "value1") + ContextMetadataSet(ctx, "key2", "value2") + + require.True(t, ContextHasMetadata(ctx)) + }) + + t.Run("returns true for empty metadata context", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + // No values set, but metadata context exists + require.True(t, ContextHasMetadata(ctx)) + }) + + t.Run("child context inherits metadata from parent", func(t *testing.T) { + parentCtx := WithMetadataContext(context.Background()) + ContextMetadataSet(parentCtx, "key", "value") + + type testContextKey string + childCtx := context.WithValue(parentCtx, testContextKey("other-key"), "other-value") + + require.True(t, ContextHasMetadata(parentCtx)) + require.True(t, ContextHasMetadata(childCtx)) + }) + + t.Run("returns false for wrong type in context", func(t *testing.T) { + ctx := context.WithValue(context.Background(), metadataCtxKey, "wrong type") + require.False(t, ContextHasMetadata(ctx)) + }) + + t.Run("returns true for cancelled context with metadata", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + ctx = WithMetadataContext(ctx) + cancel() + + require.True(t, ContextHasMetadata(ctx)) + }) + + t.Run("multiple contexts with metadata are independent", func(t *testing.T) { + ctx1 := WithMetadataContext(context.Background()) + ctx2 := WithMetadataContext(context.Background()) + ctx3 := context.Background() + + require.True(t, ContextHasMetadata(ctx1)) + require.True(t, ContextHasMetadata(ctx2)) + require.False(t, ContextHasMetadata(ctx3)) + }) +} + +func TestContextMetadataGetActivityTypeAndTaskQueue(t *testing.T) { + t.Run("returns false without metadata context", func(t *testing.T) { + ctx := context.Background() + actType, taskQueue, ok := ContextMetadataGetActivityTypeAndTaskQueue(ctx) + require.False(t, ok) + require.Empty(t, actType) + require.Empty(t, taskQueue) + }) + + t.Run("returns false when no activity metadata", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + actType, taskQueue, ok := ContextMetadataGetActivityTypeAndTaskQueue(ctx) + require.False(t, ok) + require.Empty(t, actType) + require.Empty(t, taskQueue) + }) + + t.Run("returns single activity type and task queue", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, ActivityTypeKey(42), "my-activity") + ContextMetadataSet(ctx, ActivityTaskQueueKey(42), "my-queue") + + actType, taskQueue, ok := ContextMetadataGetActivityTypeAndTaskQueue(ctx) + require.True(t, ok) + require.Equal(t, "my-activity", actType) + require.Equal(t, "my-queue", taskQueue) + }) + + t.Run("returns false when multiple activities present", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, ActivityTypeKey(1), "activity-a") + ContextMetadataSet(ctx, ActivityTaskQueueKey(1), "queue-a") + ContextMetadataSet(ctx, ActivityTypeKey(2), "activity-b") + ContextMetadataSet(ctx, ActivityTaskQueueKey(2), "queue-b") + + actType, taskQueue, ok := ContextMetadataGetActivityTypeAndTaskQueue(ctx) + require.False(t, ok) + require.Empty(t, actType) + require.Empty(t, taskQueue) + }) + + t.Run("returns false when only activity type present", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, ActivityTypeKey(1), "my-activity") + + actType, taskQueue, ok := ContextMetadataGetActivityTypeAndTaskQueue(ctx) + require.False(t, ok) + require.Empty(t, taskQueue) + _ = actType + }) + + t.Run("returns false when only task queue present", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, ActivityTaskQueueKey(1), "my-queue") + + actType, taskQueue, ok := ContextMetadataGetActivityTypeAndTaskQueue(ctx) + require.False(t, ok) + require.Empty(t, actType) + _ = taskQueue + }) + + t.Run("ignores workflow metadata keys", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, MetadataKeyWorkflowType, "my-workflow") + ContextMetadataSet(ctx, MetadataKeyWorkflowTaskQueue, "workflow-queue") + ContextMetadataSet(ctx, ActivityTypeKey(7), "my-activity") + ContextMetadataSet(ctx, ActivityTaskQueueKey(7), "activity-queue") + + actType, taskQueue, ok := ContextMetadataGetActivityTypeAndTaskQueue(ctx) + require.True(t, ok) + require.Equal(t, "my-activity", actType) + require.Equal(t, "activity-queue", taskQueue) + }) + + t.Run("ignores standalone activity metadata keys", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, MetadataKeyStandaloneActivityType, "standalone-act") + ContextMetadataSet(ctx, MetadataKeyStandaloneActivityTaskQueue, "standalone-queue") + ContextMetadataSet(ctx, ActivityTypeKey(3), "my-activity") + ContextMetadataSet(ctx, ActivityTaskQueueKey(3), "my-queue") + + actType, taskQueue, ok := ContextMetadataGetActivityTypeAndTaskQueue(ctx) + require.True(t, ok) + require.Equal(t, "my-activity", actType) + require.Equal(t, "my-queue", taskQueue) + }) +} + +func TestActivityTypeKey(t *testing.T) { + key := ActivityTypeKey(1) + require.Equal(t, "activity-type-1", key) + require.Equal(t, key, ActivityTypeKey(1)) + require.NotEqual(t, ActivityTypeKey(1), ActivityTypeKey(2)) +} + +func TestActivityTaskQueueKey(t *testing.T) { + key := ActivityTaskQueueKey(1) + require.Equal(t, "activity-task-queue-1", key) + require.Equal(t, key, ActivityTaskQueueKey(1)) + require.NotEqual(t, ActivityTaskQueueKey(1), ActivityTaskQueueKey(2)) +} + +func TestContextMetadataMarkActivityID(t *testing.T) { + t.Run("marks activity ID on valid context", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + require.True(t, ContextMetadataMarkActivityID(ctx, "act-1")) + + ids := ContextMetadataGetMarkedActivityIDs(ctx) + require.Equal(t, []string{"act-1"}, ids) + }) + + t.Run("returns false without metadata context", func(t *testing.T) { + ctx := context.Background() + require.False(t, ContextMetadataMarkActivityID(ctx, "act-1")) + }) + + t.Run("marks multiple activity IDs", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + require.True(t, ContextMetadataMarkActivityID(ctx, "act-1")) + require.True(t, ContextMetadataMarkActivityID(ctx, "act-2")) + + ids := ContextMetadataGetMarkedActivityIDs(ctx) + require.ElementsMatch(t, []string{"act-1", "act-2"}, ids) + }) +} + +func TestContextMetadataGetMarkedActivityIDs(t *testing.T) { + t.Run("returns nil without metadata context", func(t *testing.T) { + ctx := context.Background() + require.Nil(t, ContextMetadataGetMarkedActivityIDs(ctx)) + }) + + t.Run("returns nil when no activities marked", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + require.Nil(t, ContextMetadataGetMarkedActivityIDs(ctx)) + }) + + t.Run("returns single marked activity ID", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataMarkActivityID(ctx, "act-1") + + ids := ContextMetadataGetMarkedActivityIDs(ctx) + require.Equal(t, []string{"act-1"}, ids) + }) + + t.Run("returns multiple marked activity IDs", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataMarkActivityID(ctx, "act-1") + ContextMetadataMarkActivityID(ctx, "act-2") + + ids := ContextMetadataGetMarkedActivityIDs(ctx) + require.Len(t, ids, 2) + require.ElementsMatch(t, []string{"act-1", "act-2"}, ids) + }) + + t.Run("ignores non-activity keys", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataSet(ctx, MetadataKeyWorkflowType, "my-workflow") + ContextMetadataSet(ctx, MetadataKeyWorkflowTaskQueue, "my-queue") + ContextMetadataMarkActivityID(ctx, "act-1") + + ids := ContextMetadataGetMarkedActivityIDs(ctx) + require.Equal(t, []string{"act-1"}, ids) + }) + + t.Run("marked IDs are separate from resolved metadata", func(t *testing.T) { + ctx := WithMetadataContext(context.Background()) + ContextMetadataMarkActivityID(ctx, "act-1") + ContextMetadataSet(ctx, ActivityTaskQueueKey(5), "my-queue") + + ids := ContextMetadataGetMarkedActivityIDs(ctx) + require.Equal(t, []string{"act-1"}, ids) + + val, ok := ContextMetadataGet(ctx, ActivityTaskQueueKey(5)) + require.True(t, ok) + require.Equal(t, "my-queue", val) + }) +} diff --git a/common/convert/convert.go b/common/convert/convert.go index bd93e99cf11..134c05f69cb 100644 --- a/common/convert/convert.go +++ b/common/convert/convert.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package convert import ( diff --git a/common/daemon.go b/common/daemon.go index 65fa52c95b5..54aed98cbf4 100644 --- a/common/daemon.go +++ b/common/daemon.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package common const ( diff --git a/common/deadlock/deadlock.go b/common/deadlock/deadlock.go index 14bd346fe49..0b6e3d908df 100644 --- a/common/deadlock/deadlock.go +++ b/common/deadlock/deadlock.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package deadlock import ( @@ -31,15 +7,15 @@ import ( "sync/atomic" "time" - "go.uber.org/fx" - "google.golang.org/grpc/health" - - "go.temporal.io/server/common" + "go.temporal.io/server/common/clock" "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/goro" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" - "go.temporal.io/server/internal/goro" + "go.temporal.io/server/common/pingable" + "go.uber.org/fx" + "google.golang.org/grpc/health" ) type ( @@ -51,7 +27,7 @@ type ( HealthServer *health.Server MetricsHandler metrics.Handler - Roots []common.Pingable `group:"deadlockDetectorRoots"` + Roots []pingable.Pingable `group:"deadlockDetectorRoots"` } config struct { @@ -67,29 +43,37 @@ type ( healthServer *health.Server metricsHandler metrics.Handler config config - roots []common.Pingable + roots []pingable.Pingable + pools []*goro.AdaptivePool loops goro.Group + + // number of suspected deadlocks that have not resolved yet + current atomic.Int64 } loopContext struct { - dd *deadlockDetector - root common.Pingable - ch chan common.PingCheck - workers int32 + dd *deadlockDetector + root pingable.Pingable + p *goro.AdaptivePool } ) +// CurrentSuspected returns the number of currently unresolved suspected deadlocks. +func (dd *deadlockDetector) CurrentSuspected() int64 { + return dd.current.Load() +} + func NewDeadlockDetector(params params) *deadlockDetector { return &deadlockDetector{ logger: params.Logger, healthServer: params.HealthServer, metricsHandler: params.MetricsHandler.WithTags(metrics.OperationTag(metrics.DeadlockDetectorScope)), config: config{ - DumpGoroutines: params.Collection.GetBoolProperty(dynamicconfig.DeadlockDumpGoroutines, true), - FailHealthCheck: params.Collection.GetBoolProperty(dynamicconfig.DeadlockFailHealthCheck, false), - AbortProcess: params.Collection.GetBoolProperty(dynamicconfig.DeadlockAbortProcess, false), - Interval: params.Collection.GetDurationProperty(dynamicconfig.DeadlockInterval, 30*time.Second), - MaxWorkersPerRoot: params.Collection.GetIntProperty(dynamicconfig.DeadlockMaxWorkersPerRoot, 10), + DumpGoroutines: dynamicconfig.DeadlockDumpGoroutines.Get(params.Collection), + FailHealthCheck: dynamicconfig.DeadlockFailHealthCheck.Get(params.Collection), + AbortProcess: dynamicconfig.DeadlockAbortProcess.Get(params.Collection), + Interval: dynamicconfig.DeadlockInterval.Get(params.Collection), + MaxWorkersPerRoot: dynamicconfig.DeadlockMaxWorkersPerRoot.Get(params.Collection), }, roots: params.Roots, } @@ -97,10 +81,18 @@ func NewDeadlockDetector(params params) *deadlockDetector { func (dd *deadlockDetector) Start() error { for _, root := range dd.roots { + pool := goro.NewAdaptivePool( + clock.NewRealTimeSource(), + 0, + dd.config.MaxWorkersPerRoot(), + 100*time.Millisecond, + 10, + ) + dd.pools = append(dd.pools, pool) loopCtx := &loopContext{ dd: dd, root: root, - ch: make(chan common.PingCheck), + p: pool, } dd.loops.Go(loopCtx.run) } @@ -108,6 +100,9 @@ func (dd *deadlockDetector) Start() error { } func (dd *deadlockDetector) Stop() error { + for _, pool := range dd.pools { + pool.Stop() + } dd.loops.Cancel() // don't wait for workers to exit, they may be blocked return nil @@ -116,6 +111,8 @@ func (dd *deadlockDetector) Stop() error { func (dd *deadlockDetector) detected(name string) { dd.logger.Error("potential deadlock detected", tag.Name(name)) + metrics.DDSuspectedDeadlocks.With(dd.metricsHandler).Record(1) + if dd.config.DumpGoroutines() { dd.dumpGoroutines() } @@ -148,11 +145,16 @@ func (dd *deadlockDetector) dumpGoroutines() { dd.logger.Info(b.String()) } +func (dd *deadlockDetector) adjustCurrent(delta int64) { + dd.current.Add(delta) + metrics.DDCurrentSuspectedDeadlocks.With(dd.metricsHandler).Record(float64(dd.current.Load())) +} + func (lc *loopContext) run(ctx context.Context) error { for { // ping blocks until it has passed all checks to a worker goroutine (using an // unbuffered channel). - lc.ping(ctx, []common.Pingable{lc.root}) + lc.ping(ctx, []pingable.Pingable{lc.root}) timer := time.NewTimer(lc.dd.config.Interval()) select { @@ -164,60 +166,44 @@ func (lc *loopContext) run(ctx context.Context) error { } } -func (lc *loopContext) ping(ctx context.Context, pingables []common.Pingable) { +func (lc *loopContext) ping(ctx context.Context, pingables []pingable.Pingable) { for _, pingable := range pingables { for _, check := range pingable.GetPingChecks() { - select { - case lc.ch <- check: - case <-ctx.Done(): - return - default: - // maybe add another worker if blocked - w := atomic.LoadInt32(&lc.workers) - if w < int32(lc.dd.config.MaxWorkersPerRoot()) && atomic.CompareAndSwapInt32(&lc.workers, w, w+1) { - lc.dd.loops.Go(lc.worker) - } - // blocking send - select { - case lc.ch <- check: - case <-ctx.Done(): - return - } - } + lc.p.Do(func() { lc.check(ctx, check) }) } } } -func (lc *loopContext) worker(ctx context.Context) error { - for { - var check common.PingCheck - select { - case check = <-lc.ch: - case <-ctx.Done(): - return nil - } - - lc.dd.logger.Debug("starting ping check", tag.Name(check.Name)) - startTime := time.Now().UTC() - - // Using AfterFunc is cheaper than creating another goroutine to be the waiter, since - // we expect to always cancel it. If the go runtime is so messed up that it can't - // create a goroutine, that's a bigger problem than we can handle. - t := time.AfterFunc(check.Timeout, func() { - if ctx.Err() != nil { - // deadlock detector was stopped - return - } - lc.dd.detected(check.Name) - }) - newPingables := check.Ping() - t.Stop() - if len(check.MetricsName) > 0 { - lc.dd.metricsHandler.Timer(check.MetricsName).Record(time.Since(startTime)) +func (lc *loopContext) check(ctx context.Context, check pingable.Check) { + lc.dd.logger.Debug("starting ping check", tag.Name(check.Name)) + startTime := time.Now().UTC() + resolved := make(chan struct{}) + + // Using AfterFunc is cheaper than creating another goroutine to be the waiter, since + // we expect to always cancel it. If the go runtime is so messed up that it can't + // create a goroutine, that's a bigger problem than we can handle. + t := time.AfterFunc(check.Timeout, func() { + if ctx.Err() != nil { + // deadlock detector was stopped + return } + lc.dd.adjustCurrent(1) + + lc.dd.detected(check.Name) + + // Wait and see if Ping() returns past the timeout. If it's a true deadlock, it'll + // block here forever. + <-resolved + lc.dd.adjustCurrent(-1) + }) + newPingables := check.Ping() + t.Stop() + if len(check.MetricsName) > 0 { + lc.dd.metricsHandler.Timer(check.MetricsName).Record(time.Since(startTime)) + } + close(resolved) - lc.dd.logger.Debug("ping check succeeded", tag.Name(check.Name)) + lc.dd.logger.Debug("ping check succeeded", tag.Name(check.Name)) - lc.ping(ctx, newPingables) - } + lc.ping(ctx, newPingables) } diff --git a/common/deadlock/deadlock_test.go b/common/deadlock/deadlock_test.go new file mode 100644 index 00000000000..6c8a14a6bd3 --- /dev/null +++ b/common/deadlock/deadlock_test.go @@ -0,0 +1,77 @@ +package deadlock + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.temporal.io/server/common/clock" + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/goro" + "go.temporal.io/server/common/log" + "go.temporal.io/server/common/metrics" + "go.temporal.io/server/common/metrics/metricstest" + "go.temporal.io/server/common/pingable" +) + +type blockingPingable struct{ done chan struct{} } + +func (b *blockingPingable) GetPingChecks() []pingable.Check { + return []pingable.Check{{ + Name: "test", + Timeout: 10 * time.Millisecond, + Ping: func() []pingable.Pingable { + <-b.done + return nil + }, + }} +} + +func TestCurrentCounterAndGauge(t *testing.T) { + mh := metricstest.NewCaptureHandler() + dd := NewDeadlockDetector(params{ + Logger: log.NewNoopLogger(), + Collection: dynamicconfig.NewNoopCollection(), + MetricsHandler: mh, + }) + + lc := &loopContext{ + dd: dd, + p: goro.NewAdaptivePool(clock.NewRealTimeSource(), 0, 1, 10*time.Millisecond, 10), + root: nil, + } + defer lc.p.Stop() + + b := &blockingPingable{done: make(chan struct{})} + check := b.GetPingChecks()[0] + + capture := mh.StartCapture() + go lc.check(context.Background(), check) + + require.EventuallyWithT(t, func(collect *assert.CollectT) { + require.Equal(collect, int64(1), dd.CurrentSuspected()) + + snapshot := capture.Snapshot() + current := snapshot[metrics.DDCurrentSuspectedDeadlocks.Name()] + counter := snapshot[metrics.DDSuspectedDeadlocks.Name()] + require.Len(collect, current, 1) + require.Equal(collect, 1.0, current[0].Value) + require.Len(collect, counter, 1) + require.Equal(collect, int64(1), counter[0].Value) + }, 2*time.Second, time.Millisecond) + + close(b.done) + + require.EventuallyWithT(t, func(collect *assert.CollectT) { + require.Equal(collect, int64(0), dd.CurrentSuspected()) + + snapshot := capture.Snapshot() + current := snapshot[metrics.DDCurrentSuspectedDeadlocks.Name()] + counter := snapshot[metrics.DDSuspectedDeadlocks.Name()] + require.Len(collect, current, 2) + require.Equal(collect, 0.0, current[1].Value) + require.Len(collect, counter, 1) + }, 2*time.Second, time.Millisecond) +} diff --git a/common/deadlock/fx.go b/common/deadlock/fx.go index 792c9661085..f335dc8965a 100644 --- a/common/deadlock/fx.go +++ b/common/deadlock/fx.go @@ -1,45 +1,12 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package deadlock import ( - "context" - "go.uber.org/fx" ) var Module = fx.Options( fx.Provide(NewDeadlockDetector), fx.Invoke(func(lc fx.Lifecycle, dd *deadlockDetector) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - return dd.Start() - }, - OnStop: func(ctx context.Context) error { - return dd.Stop() - }, - }) + lc.Append(fx.StartStopHook(dd.Start, dd.Stop)) }), ) diff --git a/common/debug/debug.go b/common/debug/debug.go index e11705355dc..1395fe57b33 100644 --- a/common/debug/debug.go +++ b/common/debug/debug.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - //go:build TEMPORAL_DEBUG package debug diff --git a/common/debug/not_debug.go b/common/debug/not_debug.go index d93c6543cb4..fa3dfa7d7ef 100644 --- a/common/debug/not_debug.go +++ b/common/debug/not_debug.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - //go:build !TEMPORAL_DEBUG package debug diff --git a/common/default_retry_settings.go b/common/default_retry_settings.go deleted file mode 100644 index 99cc934479b..00000000000 --- a/common/default_retry_settings.go +++ /dev/null @@ -1,37 +0,0 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package common - -import "time" - -// DefaultRetrySettings indicates what the "default" retry settings -// are if it is not specified on an Activity or for any unset fields -// if a policy is explicitly set on a workflow -type DefaultRetrySettings struct { - InitialInterval time.Duration - MaximumIntervalCoefficient float64 - BackoffCoefficient float64 - MaximumAttempts int32 -} diff --git a/common/definition/resource_dedup.go b/common/definition/resource_dedup.go index 33f5126d54f..adcc96d18b0 100644 --- a/common/definition/resource_dedup.go +++ b/common/definition/resource_dedup.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package definition import ( diff --git a/common/definition/resource_dedup_test.go b/common/definition/resource_dedup_test.go index 4e0b2c11ea0..ab6ea770a45 100644 --- a/common/definition/resource_dedup_test.go +++ b/common/definition/resource_dedup_test.go @@ -1,27 +1,3 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package definition import ( diff --git a/common/definition/workflow_key.go b/common/definition/workflow_key.go index f43655e9a11..ae82e98dad7 100644 --- a/common/definition/workflow_key.go +++ b/common/definition/workflow_key.go @@ -1,29 +1,9 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package definition +import ( + "fmt" +) + type ( // WorkflowKey is the combinations which represent a workflow WorkflowKey struct { @@ -57,3 +37,7 @@ func (k *WorkflowKey) GetWorkflowID() string { func (k *WorkflowKey) GetRunID() string { return k.RunID } + +func (k *WorkflowKey) String() string { + return fmt.Sprintf("%v/%v/%v", k.NamespaceID, k.WorkflowID, k.RunID) +} diff --git a/common/dynamicconfig/client.go b/common/dynamicconfig/client.go index 6df62f2f1cf..5c903b2270f 100644 --- a/common/dynamicconfig/client.go +++ b/common/dynamicconfig/client.go @@ -1,32 +1,7 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package dynamicconfig import ( enumspb "go.temporal.io/api/enums/v1" - enumsspb "go.temporal.io/server/api/enums/v1" ) @@ -48,12 +23,26 @@ type ( // Note that GetValue is called very often! You should not synchronously call out to an // external system. Instead you should keep a set of all configured values, refresh it // periodically or when notified, and only do in-memory lookups inside of GetValue. + // + // Implementations should prefer to return the same slice in response to the same key + // as long as the value hasn't changed. Value conversions are cached using weak + // pointers into the returned slice, so new slices will result in unnecessary calls to + // conversion functions. GetValue(key Key) []ConstrainedValue } - // Key is a key/property stored in dynamic config. For convenience, it is recommended that - // you treat keys as case-insensitive. - Key string + // NotifyingClient is an optional interface that a Client can also implement, that adds + // support for faster notifications of dynamic config changes. + NotifyingClient interface { + // Adds a subscription to all updates from this Client. `update` will be called on any + // change to the current value set. The caller should call `cancel` to cancel the + // subscription. + Subscribe(update ClientUpdateFunc) (cancel func()) + } + + // Called with modified keys on any change to the current value set. + // Deleted keys/constraints will get a nil value. + ClientUpdateFunc func(map[Key][]ConstrainedValue) // ConstrainedValue is a value plus associated constraints. // @@ -68,6 +57,10 @@ type ( Constraints Constraints Value any } + TypedConstrainedValue[T any] struct { + Constraints Constraints + Value T + } // Constraints describe under what conditions a ConstrainedValue should be used. // There are few standard "constraint precedence orders" that the server uses: @@ -95,6 +88,8 @@ type ( Namespace string NamespaceID string TaskQueueName string + Destination string + ChasmTaskType string TaskQueueType enumspb.TaskQueueType ShardID int32 TaskType enumsspb.TaskType diff --git a/common/dynamicconfig/client_diff.go b/common/dynamicconfig/client_diff.go new file mode 100644 index 00000000000..32e72102487 --- /dev/null +++ b/common/dynamicconfig/client_diff.go @@ -0,0 +1,123 @@ +package dynamicconfig + +import ( + "fmt" + "reflect" + "strings" + + enumspb "go.temporal.io/api/enums/v1" + enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/common/log" +) + +// DiffAndLogConfigs computes the difference between two ConfigValueMaps. The result is +// returned as a ConfigValueMap that can be merged with old to produce new, except with deleted +// keys mapped to nil. It also logs the differences to a logger. +func DiffAndLogConfigs(logger log.Logger, oldValues ConfigValueMap, newValues ConfigValueMap) ConfigValueMap { + changedMap := make(map[Key][]ConstrainedValue) + + for key, newValues := range newValues { + oldValues, ok := oldValues[key] + if !ok { + for _, newValue := range newValues { + // new key added + diffAndLogValue(logger, key, nil, &newValue) + } + changedMap[Key(key)] = newValues + } else { + // compare existing keys + changed := diffAndLogConstraints(logger, key, oldValues, newValues) + if changed { + changedMap[Key(key)] = newValues + } + } + } + + // check for removed values + for key, oldValues := range oldValues { + if _, ok := newValues[key]; !ok { + for _, oldValue := range oldValues { + diffAndLogValue(logger, key, &oldValue, nil) + } + changedMap[Key(key)] = nil + } + } + + return changedMap +} + +func diffAndLogConstraints(logger log.Logger, key Key, oldValues []ConstrainedValue, newValues []ConstrainedValue) bool { + changed := false + for _, oldValue := range oldValues { + matchFound := false + for _, newValue := range newValues { + if oldValue.Constraints == newValue.Constraints { + matchFound = true + if !reflect.DeepEqual(oldValue.Value, newValue.Value) { + diffAndLogValue(logger, key, &oldValue, &newValue) + changed = true + } + } + } + if !matchFound { + diffAndLogValue(logger, key, &oldValue, nil) + changed = true + } + } + + for _, newValue := range newValues { + matchFound := false + for _, oldValue := range oldValues { + if oldValue.Constraints == newValue.Constraints { + matchFound = true + } + } + if !matchFound { + diffAndLogValue(logger, key, nil, &newValue) + changed = true + } + } + return changed +} + +func diffAndLogValue(logger log.Logger, key Key, oldValue *ConstrainedValue, newValue *ConstrainedValue) { + logLine := &strings.Builder{} + logLine.Grow(128) + logLine.WriteString("dynamic config changed for the key: ") + logLine.WriteString(key.String()) + logLine.WriteString(" oldValue: ") + appendConstrainedValue(logLine, oldValue) + logLine.WriteString(" newValue: ") + appendConstrainedValue(logLine, newValue) + logger.Info(logLine.String()) +} + +func appendConstrainedValue(logLine *strings.Builder, value *ConstrainedValue) { + if value == nil { + logLine.WriteString("nil") + } else { + logLine.WriteString("{ constraints: {") + if value.Constraints.Namespace != "" { + fmt.Fprintf(logLine, "{Namespace:%s}", value.Constraints.Namespace) + } + if value.Constraints.NamespaceID != "" { + fmt.Fprintf(logLine, "{NamespaceID:%s}", value.Constraints.NamespaceID) + } + if value.Constraints.TaskQueueName != "" { + fmt.Fprintf(logLine, "{TaskQueueName:%s}", value.Constraints.TaskQueueName) + } + if value.Constraints.TaskQueueType != enumspb.TASK_QUEUE_TYPE_UNSPECIFIED { + fmt.Fprintf(logLine, "{TaskQueueType:%s}", value.Constraints.TaskQueueType) + } + if value.Constraints.ShardID != 0 { + fmt.Fprintf(logLine, "{ShardID:%d}", value.Constraints.ShardID) + } + if value.Constraints.TaskType != enumsspb.TASK_TYPE_UNSPECIFIED { + fmt.Fprintf(logLine, "{HistoryTaskType:%s}", value.Constraints.TaskType) + } + if value.Constraints.Destination != "" { + fmt.Fprintf(logLine, "{Destination:%s}", value.Constraints.Destination) + } + fmt.Fprint(logLine, "} value: ", value.Value, " }") + } +} diff --git a/common/dynamicconfig/client_subscriptions.go b/common/dynamicconfig/client_subscriptions.go new file mode 100644 index 00000000000..3db6b6ef789 --- /dev/null +++ b/common/dynamicconfig/client_subscriptions.go @@ -0,0 +1,54 @@ +package dynamicconfig + +import ( + "sync" + + expmaps "golang.org/x/exp/maps" +) + +type ( + // NotifyingClientImpl implements NotifyingClient and is intended to be embedded in another struct. + // NotifyingClientImpl must not be copied after first use. + NotifyingClientImpl struct { + subscriptionLock sync.Mutex + subscriptionIdx int + subscriptions map[int]ClientUpdateFunc + } +) + +var _ NotifyingClient = (*NotifyingClientImpl)(nil) + +func NewNotifyingClientImpl() NotifyingClientImpl { + return NotifyingClientImpl{subscriptions: make(map[int]ClientUpdateFunc)} +} + +// Subscribe adds a subscription to all updates from this Client. +func (n *NotifyingClientImpl) Subscribe(f ClientUpdateFunc) (cancel func()) { + n.subscriptionLock.Lock() + defer n.subscriptionLock.Unlock() + + n.subscriptionIdx++ + id := n.subscriptionIdx + n.subscriptions[id] = f + + return func() { + n.subscriptionLock.Lock() + defer n.subscriptionLock.Unlock() + delete(n.subscriptions, id) + } +} + +// PublishUpdates calls all subscribed update functions with the changed keys. +func (n *NotifyingClientImpl) PublishUpdates(changed map[Key][]ConstrainedValue) { + if len(changed) == 0 { + return + } + + n.subscriptionLock.Lock() + subscriptions := expmaps.Values(n.subscriptions) + n.subscriptionLock.Unlock() + + for _, update := range subscriptions { + update(changed) + } +} diff --git a/common/dynamicconfig/collection.go b/common/dynamicconfig/collection.go index e3980b807e3..055af9a6d52 100644 --- a/common/dynamicconfig/collection.go +++ b/common/dynamicconfig/collection.go @@ -1,41 +1,27 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package dynamicconfig import ( + "context" "errors" "fmt" + "math" + "reflect" + "runtime" + "strconv" + "strings" + "sync" "sync/atomic" "time" + "weak" - enumspb "go.temporal.io/api/enums/v1" - - enumsspb "go.temporal.io/server/api/enums/v1" + "github.com/mitchellh/mapstructure" + "go.temporal.io/server/common/goro" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" + "go.temporal.io/server/common/pingable" "go.temporal.io/server/common/primitives/timestamp" + "go.temporal.io/server/common/util" + "google.golang.org/protobuf/reflect/protoreflect" ) type ( @@ -46,8 +32,44 @@ type ( client Client logger log.Logger errCount int64 + + cancelClientSubscription func() + + subscriptionLock sync.Mutex // protects subscriptions and subscriptionIdx + subscriptions map[Key]map[int]any // final "any" is *subscription[T] + subscriptionIdx int + + poller goro.Group + + // cache converted values. use weak pointers to avoid holding on to values in the cache + // that are no longer in use. this must be a pointer since the cleanup closures need to + // reference this without referencing Collection. + convertCache *sync.Map // map[weak.Pointer[ConstrainedValue]]any + + // index by constraints + indexCache *sync.Map // map[weak.Pointer[ConstrainedValue]]map[Constraints]int32 } + subscription[T any] struct { + // constant: + prec []Constraints + f func(T) + def T + cdef []TypedConstrainedValue[T] // nil for regular settings, populated for constrained default settings + // protected by subscriptionLock in Collection: + raw any // raw value that last sent value was converted from + } + + subscriptionCallbackSettings struct { + MinWorkers int + MaxWorkers int + TargetDelay time.Duration + ShrinkFactor float64 + } + + // sentinel type that doesn't compare equal to anything else + defaultValue struct{} + // These function types follow a similar pattern: // {X}PropertyFn - returns a value of type X that is global (no filters) // {X}PropertyFnWith{Y}Filter - returns a value of type X with the given filters @@ -61,491 +83,535 @@ type ( // Available filters: // Namespace func(namespace string) // NamespaceID func(namespaceID string) - // TaskQueueInfo func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) + // TaskQueue func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) (matching task queue) + // TaskType func(taskType enumspsb.TaskType) (history task type) // ShardID func(shardID int32) - BoolPropertyFn func() bool - BoolPropertyFnWithNamespaceFilter func(namespace string) bool - BoolPropertyFnWithNamespaceIDFilter func(namespaceID string) bool - BoolPropertyFnWithTaskQueueInfoFilters func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) bool - DurationPropertyFn func() time.Duration - DurationPropertyFnWithNamespaceFilter func(namespace string) time.Duration - DurationPropertyFnWithNamespaceIDFilter func(namespaceID string) time.Duration - DurationPropertyFnWithShardIDFilter func(shardID int32) time.Duration - DurationPropertyFnWithTaskQueueInfoFilters func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) time.Duration - DurationPropertyFnWithTaskTypeFilter func(task enumsspb.TaskType) time.Duration - FloatPropertyFn func() float64 - FloatPropertyFnWithNamespaceFilter func(namespace string) float64 - FloatPropertyFnWithShardIDFilter func(shardID int32) float64 - FloatPropertyFnWithTaskQueueInfoFilters func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) float64 - IntPropertyFn func() int - IntPropertyFnWithNamespaceFilter func(namespace string) int - IntPropertyFnWithShardIDFilter func(shardID int32) int - IntPropertyFnWithTaskQueueInfoFilters func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) int - MapPropertyFn func() map[string]any - MapPropertyFnWithNamespaceFilter func(namespace string) map[string]any - StringPropertyFn func() string - StringPropertyFnWithNamespaceFilter func(namespace string) string - StringPropertyFnWithNamespaceIDFilter func(namespaceID string) string ) const ( errCountLogThreshold = 1000 + // After this many constraints, switch to a cached lookup. This value was determined + // empirically on my machine using BenchmarkCollectionIndexed. + constraintsCacheThreshold = 32 ) var ( errKeyNotPresent = errors.New("key not present") errNoMatchingConstraint = errors.New("no matching constraint in key") + + protoEnumType = reflect.TypeFor[protoreflect.Enum]() + errorType = reflect.TypeFor[error]() + durationType = reflect.TypeFor[time.Duration]() + timeType = reflect.TypeFor[time.Time]() + stringType = reflect.TypeFor[string]() + + usingDefaultValue any = defaultValue{} ) -// NewCollection creates a new collection +// NewCollection creates a new collection. For subscriptions to work, you must call Start/Stop. +// Get will work without Start/Stop. func NewCollection(client Client, logger log.Logger) *Collection { + // Do this at the first convenient place we have a logger: + logSharedStructureWarnings(logger) + return &Collection{ - client: client, - logger: logger, - errCount: -1, + client: client, + logger: logger, + errCount: -1, + subscriptions: make(map[Key]map[int]any), + convertCache: new(sync.Map), + indexCache: new(sync.Map), } } -func (c *Collection) throttleLog() bool { - // TODO: This is a lot of unnecessary contention with little benefit. Consider using - // https://github.com/cespare/percpu here. - errCount := atomic.AddInt64(&c.errCount, 1) - // log only the first x errors and then one every x after that to reduce log noise - return errCount < errCountLogThreshold || errCount%errCountLogThreshold == 0 -} - -// GetIntProperty gets property and asserts that it's an integer -func (c *Collection) GetIntProperty(key Key, defaultValue any) IntPropertyFn { - return func() int { - return matchAndConvert( - c, - key, - defaultValue, - globalPrecedence(), - convertInt, - ) +func (c *Collection) Start() { + c.subscriptionLock.Lock() + defer c.subscriptionLock.Unlock() + if notifyingClient, ok := c.client.(NotifyingClient); ok { + c.cancelClientSubscription = notifyingClient.Subscribe(c.keysChanged) + } else { + c.poller.Go(c.pollForChanges) } } -// GetIntPropertyFilteredByNamespace gets property with namespace filter and asserts that it's an integer -func (c *Collection) GetIntPropertyFilteredByNamespace(key Key, defaultValue any) IntPropertyFnWithNamespaceFilter { - return func(namespace string) int { - return matchAndConvert( - c, - key, - defaultValue, - namespacePrecedence(namespace), - convertInt, - ) +func (c *Collection) Stop() { + c.poller.Cancel() + c.poller.Wait() + if c.cancelClientSubscription != nil { + c.cancelClientSubscription() } } -// GetIntPropertyFilteredByTaskQueueInfo gets property with taskQueueInfo as filters and asserts that it's an integer -func (c *Collection) GetIntPropertyFilteredByTaskQueueInfo(key Key, defaultValue any) IntPropertyFnWithTaskQueueInfoFilters { - return func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) int { - return matchAndConvert( - c, - key, - defaultValue, - taskQueuePrecedence(namespace, taskQueue, taskType), - convertInt, - ) +// Implement pingable.Pingable +func (c *Collection) GetPingChecks() []pingable.Check { + return []pingable.Check{ + { + Name: "dynamic config callbacks", + Timeout: 5 * time.Second, + Ping: func() []pingable.Pingable { + c.subscriptionLock.Lock() + //nolint:staticcheck // SA2001 just checking if we can acquire the lock + c.subscriptionLock.Unlock() + return nil + }, + }, } } -// GetIntPropertyFilteredByShardID gets property with shardID as filter and asserts that it's an integer -func (c *Collection) GetIntPropertyFilteredByShardID(key Key, defaultValue any) IntPropertyFnWithShardIDFilter { - return func(shardID int32) int { - return matchAndConvert( - c, - key, - defaultValue, - shardIDPrecedence(shardID), - convertInt, - ) +func (c *Collection) pollForChanges(ctx context.Context) error { + interval := DynamicConfigSubscriptionPollInterval.Get(c) + for ctx.Err() == nil { + util.InterruptibleSleep(ctx, interval()) + c.pollOnce() } + return ctx.Err() } -// GetFloat64Property gets property and asserts that it's a float64 -func (c *Collection) GetFloat64Property(key Key, defaultValue any) FloatPropertyFn { - return func() float64 { - return matchAndConvert( - c, - key, - defaultValue, - globalPrecedence(), - convertFloat, - ) - } -} +func (c *Collection) pollOnce() { + c.subscriptionLock.Lock() + defer c.subscriptionLock.Unlock() -// GetFloat64PropertyFilteredByShardID gets property with shardID filter and asserts that it's a float64 -func (c *Collection) GetFloat64PropertyFilteredByShardID(key Key, defaultValue any) FloatPropertyFnWithShardIDFilter { - return func(shardID int32) float64 { - return matchAndConvert( - c, - key, - defaultValue, - shardIDPrecedence(shardID), - convertFloat, - ) + for key, subs := range c.subscriptions { + setting := queryRegistry(key) + if setting == nil { + continue + } + for _, sub := range subs { + cvs := c.client.GetValue(key) + setting.dispatchUpdate(c, sub, cvs) + } } } -// GetFloatPropertyFilteredByNamespace gets property with namespace filter and asserts that it's a float64 -func (c *Collection) GetFloatPropertyFilteredByNamespace(key Key, defaultValue any) FloatPropertyFnWithNamespaceFilter { - return func(namespace string) float64 { - return matchAndConvert( - c, - key, - defaultValue, - namespacePrecedence(namespace), - convertFloat, - ) - } -} +func (c *Collection) keysChanged(changed map[Key][]ConstrainedValue) { + c.subscriptionLock.Lock() + defer c.subscriptionLock.Unlock() -// GetFloatPropertyFilteredByTaskQueueInfo gets property with taskQueueInfo as filters and asserts that it's a float64 -func (c *Collection) GetFloatPropertyFilteredByTaskQueueInfo(key Key, defaultValue any) FloatPropertyFnWithTaskQueueInfoFilters { - return func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) float64 { - return matchAndConvert( - c, - key, - defaultValue, - taskQueuePrecedence(namespace, taskQueue, taskType), - convertFloat, - ) + for key, cvs := range changed { + setting := queryRegistry(key) + if setting == nil { + continue + } + // use setting.Key instead of key to avoid changing case again + for _, sub := range c.subscriptions[setting.Key()] { + setting.dispatchUpdate(c, sub, cvs) + } } } -// GetDurationProperty gets property and asserts that it's a duration -func (c *Collection) GetDurationProperty(key Key, defaultValue any) DurationPropertyFn { - return func() time.Duration { - return matchAndConvert( - c, - key, - defaultValue, - globalPrecedence(), - convertDuration, - ) - } +func (c *Collection) throttleLog() bool { + // TODO: This is a lot of unnecessary contention with little benefit. Consider using + // https://github.com/cespare/percpu here. + errCount := atomic.AddInt64(&c.errCount, 1) + // log only the first x errors and then one every x after that to reduce log noise + return errCount < errCountLogThreshold || errCount%errCountLogThreshold == 0 } -// GetDurationPropertyFilteredByNamespace gets property with namespace filter and asserts that it's a duration -func (c *Collection) GetDurationPropertyFilteredByNamespace(key Key, defaultValue any) DurationPropertyFnWithNamespaceFilter { - return func(namespace string) time.Duration { - return matchAndConvert( - c, - key, - defaultValue, - namespacePrecedence(namespace), - convertDuration, - ) +func findMatch( + cache *sync.Map, + cvs []ConstrainedValue, + precedence []Constraints, +) (*ConstrainedValue, error) { + if len(cvs) == 0 { + return nil, errKeyNotPresent + } else if len(cvs) > constraintsCacheThreshold && len(cvs) <= math.MaxInt32 { + return findMatchWithCache(cache, cvs, precedence) } -} -// GetDurationPropertyFilteredByNamespaceID gets property with namespaceID filter and asserts that it's a duration -func (c *Collection) GetDurationPropertyFilteredByNamespaceID(key Key, defaultValue any) DurationPropertyFnWithNamespaceIDFilter { - return func(namespaceID string) time.Duration { - return matchAndConvert( - c, - key, - defaultValue, - namespaceIDPrecedence(namespaceID), - convertDuration, - ) + for _, m := range precedence { + for idx, cv := range cvs { + if m == cv.Constraints { + // Note: cvs here is the slice returned by Client.GetValue. We want to return a + // pointer into that slice so that the converted value is cached as long as the + // Client keeps the []ConstrainedValue alive. See the comment on + // Client.GetValue. + return &cvs[idx], nil + } + } } + // key is present but no constraint section matches + return nil, errNoMatchingConstraint } -// GetDurationPropertyFilteredByTaskQueueInfo gets property with taskQueueInfo as filters and asserts that it's a duration -func (c *Collection) GetDurationPropertyFilteredByTaskQueueInfo(key Key, defaultValue any) DurationPropertyFnWithTaskQueueInfoFilters { - return func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) time.Duration { - return matchAndConvert( - c, - key, - defaultValue, - taskQueuePrecedence(namespace, taskQueue, taskType), - convertDuration, - ) +func findMatchWithCache( + cache *sync.Map, + cvs []ConstrainedValue, + precedence []Constraints, +) (*ConstrainedValue, error) { + var cached map[Constraints]int32 + weakcvp := weak.Make(&cvs[0]) + if v, ok := cache.Load(weakcvp); ok { + cached = v.(map[Constraints]int32) // nolint:revive // unchecked-type-assertion + } else { + cached = make(map[Constraints]int32, len(cvs)) + for i := range cvs { + // pick first one to match behavior if multiple match + if _, ok := cached[cvs[i].Constraints]; !ok { + cached[cvs[i].Constraints] = int32(i) + } + } + if _, loaded := cache.LoadOrStore(weakcvp, cached); !loaded { + runtime.AddCleanup(&cvs[0], func(w weak.Pointer[ConstrainedValue]) { + cache.Delete(w) + }, weakcvp) + } } -} -// GetDurationPropertyFilteredByShardID gets property with shardID id as filter and asserts that it's a duration -func (c *Collection) GetDurationPropertyFilteredByShardID(key Key, defaultValue any) DurationPropertyFnWithShardIDFilter { - return func(shardID int32) time.Duration { - return matchAndConvert( - c, - key, - defaultValue, - shardIDPrecedence(shardID), - convertDuration, - ) + for _, m := range precedence { + if i, ok := cached[m]; ok { + // Note: cvs here is the slice returned by Client.GetValue. We want to return a + // pointer into that slice so that the converted value is cached as long as the + // Client keeps the []ConstrainedValue alive. See the comment on + // Client.GetValue. + return &cvs[i], nil + } } + // key is present but no constraint section matches + return nil, errNoMatchingConstraint } -// GetDurationPropertyFilteredByTaskType gets property with task type as filters and asserts that it's a duration -func (c *Collection) GetDurationPropertyFilteredByTaskType(key Key, defaultValue any) DurationPropertyFnWithTaskTypeFilter { - return func(taskType enumsspb.TaskType) time.Duration { - return matchAndConvert( - c, - key, - defaultValue, - taskTypePrecedence(taskType), - convertDuration, - ) - } +// matchAndConvert can't be a method of Collection because methods can't be generic, but we can +// take a *Collection as an argument. +func matchAndConvert[T any]( + c *Collection, + key Key, + def T, + convert func(value any) (T, error), + precedence []Constraints, +) T { + cvs := c.client.GetValue(key) + v, _ := matchAndConvertCvs(c, key, def, convert, precedence, cvs) + return v } -// GetBoolProperty gets property and asserts that it's a bool -func (c *Collection) GetBoolProperty(key Key, defaultValue any) BoolPropertyFn { - return func() bool { - return matchAndConvert( - c, - key, - defaultValue, - globalPrecedence(), - convertBool, - ) +func matchAndConvertCvs[T any]( + c *Collection, + key Key, + def T, + convert func(value any) (T, error), + precedence []Constraints, + cvs []ConstrainedValue, +) (T, any) { + cvp, err := findMatch(c.indexCache, cvs, precedence) + if err != nil { + // couldn't find a constrained match, use default + return def, usingDefaultValue } -} -// GetStringProperty gets property and asserts that it's a string -func (c *Collection) GetStringProperty(key Key, defaultValue any) StringPropertyFn { - return func() string { - return matchAndConvert( - c, - key, - defaultValue, - globalPrecedence(), - convertString, - ) + typedVal, err := convertWithCache(c, key, convert, cvp) + if err != nil { + // We failed to convert the value to the desired type. Use the default. + if c.throttleLog() { + c.logger.Warn("Failed to convert value, using default", tag.Key(key.String()), tag.IgnoredValue(cvp), tag.Error(err)) + } + return def, usingDefaultValue } + return typedVal, cvp.Value } -// GetMapProperty gets property and asserts that it's a map -func (c *Collection) GetMapProperty(key Key, defaultValue any) MapPropertyFn { - return func() map[string]interface{} { - return matchAndConvert( - c, - key, - defaultValue, - globalPrecedence(), - convertMap, - ) +// Returns matched value out of cvs, matched default out of defaultCVs, and also the priorities +// of each of the matches (lower matched first). For no match, order will be 0. +func findMatchWithConstrainedDefaults[T any](cvs []ConstrainedValue, defaultCVs []TypedConstrainedValue[T], precedence []Constraints) ( + matchedValue *ConstrainedValue, + matchedDefault T, + valueOrder int, + defaultOrder int, +) { + order := 0 + for _, m := range precedence { + for idx, cv := range cvs { + order++ + if m == cv.Constraints { + if valueOrder == 0 { + valueOrder = order + // Note: cvs here is the slice returned by Client.GetValue. We want to + // return a pointer into that slice instead of copying the ConstrainedValue. + // See findMatch. + matchedValue = &cvs[idx] + } + } + } + for _, cv := range defaultCVs { + order++ + if m == cv.Constraints { + if defaultOrder == 0 { + defaultOrder = order + matchedDefault = cv.Value + } + } + } } + return } -// GetStringPropertyFnWithNamespaceFilter gets property with namespace filter and asserts that it's a string -func (c *Collection) GetStringPropertyFnWithNamespaceFilter(key Key, defaultValue any) StringPropertyFnWithNamespaceFilter { - return func(namespace string) string { - return matchAndConvert( - c, - key, - defaultValue, - namespacePrecedence(namespace), - convertString, - ) +func findAndResolveWithConstrainedDefaults[T any]( + c *Collection, + key Key, + convert func(value any) (T, error), + cvs []ConstrainedValue, + defaultCVs []TypedConstrainedValue[T], + precedence []Constraints, +) (value T, raw any) { + cvp, defVal, valOrder, defOrder := findMatchWithConstrainedDefaults(cvs, defaultCVs, precedence) + + if defOrder == 0 { + // This is a server bug: all precedence lists must end with no-constraints, and all + // constrained defaults must have a no-constraints value, so we should have gotten a match. + c.logger.Warn("Constrained defaults had no match (this is a bug; fix server code)", tag.Key(key.String())) + // leave value as the zero value, that's the best we can do + return value, usingDefaultValue + } else if valOrder == 0 { + return defVal, usingDefaultValue + } else if defOrder < valOrder { + // value was present but constrained default took precedence + return defVal, usingDefaultValue // use sentinel since we're using default + } + typedVal, err := convertWithCache(c, key, convert, cvp) + if err != nil { + // We failed to convert the value to the desired type. Use the default. + if c.throttleLog() { + c.logger.Warn("Failed to convert value, using default", tag.Key(key.String()), tag.IgnoredValue(cvp), tag.Error(err)) + } + return defVal, usingDefaultValue } + return typedVal, cvp.Value } -// GetStringPropertyFnWithNamespaceIDFilter gets property with namespace ID filter and asserts that it's a string -func (c *Collection) GetStringPropertyFnWithNamespaceIDFilter(key Key, defaultValue any) StringPropertyFnWithNamespaceIDFilter { - return func(namespaceID string) string { - return matchAndConvert( - c, - key, - defaultValue, - namespaceIDPrecedence(namespaceID), - convertString, - ) - } +func matchAndConvertWithConstrainedDefault[T any]( + c *Collection, + key Key, + cdef []TypedConstrainedValue[T], + convert func(value any) (T, error), + precedence []Constraints, +) T { + cvs := c.client.GetValue(key) + value, _ := findAndResolveWithConstrainedDefaults(c, key, convert, cvs, cdef, precedence) + return value } -// GetMapPropertyFnWithNamespaceFilter gets property and asserts that it's a map -func (c *Collection) GetMapPropertyFnWithNamespaceFilter(key Key, defaultValue any) MapPropertyFnWithNamespaceFilter { - return func(namespace string) map[string]interface{} { - return matchAndConvert( - c, - key, - defaultValue, - namespacePrecedence(namespace), - convertMap, - ) - } -} +func subscribe[T any]( + c *Collection, + key Key, + def T, + convert func(value any) (T, error), + prec []Constraints, + callback func(T), +) (T, func()) { + c.subscriptionLock.Lock() + defer c.subscriptionLock.Unlock() + + // get one value immediately (note that subscriptionLock is held here so we can't race with + // an update) + cvs := c.client.GetValue(key) + init, raw := matchAndConvertCvs(c, key, def, convert, prec, cvs) -// GetBoolPropertyFnWithNamespaceFilter gets property with namespace filter and asserts that it's a bool -func (c *Collection) GetBoolPropertyFnWithNamespaceFilter(key Key, defaultValue any) BoolPropertyFnWithNamespaceFilter { - return func(namespace string) bool { - return matchAndConvert( - c, - key, - defaultValue, - namespacePrecedence(namespace), - convertBool, - ) + // As a convenience (and for efficiency), you can pass in a nil callback; we just return the + // current value and skip the subscription. The cancellation func returned is also nil. + if callback == nil { + return init, nil } -} -// GetBoolPropertyFnWithNamespaceIDFilter gets property with namespaceID filter and asserts that it's a bool -func (c *Collection) GetBoolPropertyFnWithNamespaceIDFilter(key Key, defaultValue any) BoolPropertyFnWithNamespaceIDFilter { - return func(namespaceID string) bool { - return matchAndConvert( - c, - key, - defaultValue, - namespaceIDPrecedence(namespaceID), - convertBool, - ) - } -} + c.subscriptionIdx++ + id := c.subscriptionIdx -// GetBoolPropertyFilteredByTaskQueueInfo gets property with taskQueueInfo as filters and asserts that it's a bool -func (c *Collection) GetBoolPropertyFilteredByTaskQueueInfo(key Key, defaultValue any) BoolPropertyFnWithTaskQueueInfoFilters { - return func(namespace string, taskQueue string, taskType enumspb.TaskQueueType) bool { - return matchAndConvert( - c, - key, - defaultValue, - taskQueuePrecedence(namespace, taskQueue, taskType), - convertBool, - ) + if c.subscriptions[key] == nil { + c.subscriptions[key] = make(map[int]any) } -} - -// Task queue partitions use a dedicated function to handle defaults. -func (c *Collection) GetTaskQueuePartitionsProperty(key Key) IntPropertyFnWithTaskQueueInfoFilters { - return c.GetIntPropertyFilteredByTaskQueueInfo(key, defaultNumTaskQueuePartitions) -} -func (c *Collection) HasKey(key Key) bool { - cvs := c.client.GetValue(key) - return len(cvs) > 0 -} - -func findMatch(cvs, defaultCVs []ConstrainedValue, precedence []Constraints) (any, error) { - if len(cvs)+len(defaultCVs) == 0 { - return nil, errKeyNotPresent + c.subscriptions[key][id] = &subscription[T]{ + prec: prec, + f: callback, + def: def, + raw: raw, } - for _, m := range precedence { - // duplicate the code so that we don't have to allocate a new slice to hold the - // concatenation of cvs and defaultCVs - for _, cv := range cvs { - if m == cv.Constraints { - return cv.Value, nil - } - } - for _, cv := range defaultCVs { - if m == cv.Constraints { - return cv.Value, nil - } - } + + return init, func() { + c.subscriptionLock.Lock() + defer c.subscriptionLock.Unlock() + delete(c.subscriptions[key], id) } - // key is present but no constraint section matches - return nil, errNoMatchingConstraint } -// matchAndConvert can't be a method of Collection because methods can't be generic, but we can -// take a *Collection as an argument. -func matchAndConvert[T any]( +func subscribeWithConstrainedDefault[T any]( c *Collection, key Key, - defaultValue any, - precedence []Constraints, - converter func(value any) (T, error), -) T { + cdef []TypedConstrainedValue[T], + convert func(value any) (T, error), + prec []Constraints, + callback func(T), +) (T, func()) { + c.subscriptionLock.Lock() + defer c.subscriptionLock.Unlock() + + // get one value immediately (note that subscriptionLock is held here so we can't race with + // an update) cvs := c.client.GetValue(key) + init, raw := findAndResolveWithConstrainedDefaults(c, key, convert, cvs, cdef, prec) - // defaultValue may be a list of constrained values. In that case, one of them must have an - // empty constraint set to be the fallback default. Otherwise we'll return the zero value - // and log an error (since []ConstrainedValue can't be converted to the desired type). - defaultCVs, _ := defaultValue.([]ConstrainedValue) - - val, matchErr := findMatch(cvs, defaultCVs, precedence) - if matchErr != nil { - if c.throttleLog() { - c.logger.Debug("No such key in dynamic config, using default", tag.Key(key.String()), tag.Error(matchErr)) - } - // couldn't find a constrained match, use default - val = defaultValue + // As a convenience (and for efficiency), you can pass in a nil callback; we just return the + // current value and skip the subscription. The cancellation func returned is also nil. + if callback == nil { + return init, nil } - typedVal, convertErr := converter(val) - if convertErr != nil && matchErr == nil { - // We failed to convert the value to the desired type. Try converting the default. note - // that if matchErr != nil then val _is_ defaultValue and we don't have to try this again. - if c.throttleLog() { - c.logger.Warn("Failed to convert value, using default", tag.Key(key.String()), tag.IgnoredValue(val), tag.Error(convertErr)) - } - typedVal, convertErr = converter(defaultValue) - } - if convertErr != nil { - // If we can't convert the default, that's a bug in our code, use Warn level. - c.logger.Warn("Can't convert default value (this is a bug; fix server code)", tag.Key(key.String()), tag.IgnoredValue(defaultValue), tag.Error(convertErr)) - // Return typedVal anyway since we have to return something. + c.subscriptionIdx++ + id := c.subscriptionIdx + + if c.subscriptions[key] == nil { + c.subscriptions[key] = make(map[int]any) } - return typedVal -} -func globalPrecedence() []Constraints { - return []Constraints{ - {}, + c.subscriptions[key][id] = &subscription[T]{ + prec: prec, + f: callback, + cdef: cdef, + raw: raw, } -} -func namespacePrecedence(namespace string) []Constraints { - return []Constraints{ - {Namespace: namespace}, - {}, + return init, func() { + c.subscriptionLock.Lock() + defer c.subscriptionLock.Unlock() + delete(c.subscriptions[key], id) } } -func namespaceIDPrecedence(namespaceID string) []Constraints { - return []Constraints{ - {NamespaceID: namespaceID}, - {}, +// called with subscriptionLock +func dispatchUpdate[T any]( + c *Collection, + key Key, + convert func(value any) (T, error), + sub *subscription[T], + cvs []ConstrainedValue, +) { + var raw any + cvp, err := findMatch(c.indexCache, cvs, sub.prec) + if err != nil { + raw = usingDefaultValue + } else { + raw = cvp.Value + } + + // compare raw (pre-conversion) values, if unchanged, skip this update. note that + // `usingDefaultValue` is equal to itself but nothing else. + if reflect.DeepEqual(sub.raw, raw) { + // make raw field point to new one, not old one, so that old loaded files can get + // garbage collected. + sub.raw = raw + return + } + + // raw value changed, need to dispatch default or converted value + var newVal T + if cvp == nil { + newVal = sub.def + } else { + newVal, err = convertWithCache(c, key, convert, cvp) + if err != nil { + // We failed to convert the value to the desired type. Use the default. + if c.throttleLog() { + c.logger.Warn("Failed to convert value, using default", tag.Key(key.String()), tag.IgnoredValue(cvp), tag.Error(err)) + } + newVal, raw = sub.def, usingDefaultValue + } } + + sub.raw = raw + go sub.f(newVal) } -func taskQueuePrecedence(namespace string, taskQueue string, taskType enumspb.TaskQueueType) []Constraints { - return []Constraints{ - {Namespace: namespace, TaskQueueName: taskQueue, TaskQueueType: taskType}, - {Namespace: namespace, TaskQueueName: taskQueue}, - // A task-queue-name-only filter applies to a single task queue name across all - // namespaces, with higher precedence than a namespace-only filter. This is intended to - // be used by defaultNumTaskQueuePartitions and is probably not useful otherwise. - {TaskQueueName: taskQueue}, - {Namespace: namespace}, - {}, +// called with subscriptionLock +func dispatchUpdateWithConstrainedDefault[T any]( + c *Collection, + key Key, + convert func(value any) (T, error), + sub *subscription[T], + cvs []ConstrainedValue, +) { + // Note: This performs the conversion even if the raw value is unchanged. This isn't ideal, + // but so far constrained default settings are only used for primitive values so it's okay. + // If we have a constrained default value with a complex conversion function, this could be + // optimized to delay conversion until after we check DeepEqual. + newVal, raw := findAndResolveWithConstrainedDefaults(c, key, convert, cvs, sub.cdef, sub.prec) + + // compare raw (pre-conversion) values, if unchanged, skip this update. note that + // `usingDefaultValue` is equal to itself but nothing else. + if reflect.DeepEqual(sub.raw, raw) { + // make raw field point to new one, not old one, so that old loaded files can get + // garbage collected. + sub.raw = raw + return + } + + sub.raw = raw + go sub.f(newVal) +} + +func convertWithCache[T any](c *Collection, key Key, convert func(any) (T, error), cvp *ConstrainedValue) (T, error) { + weakcvp := weak.Make(cvp) + + if converted, ok := c.convertCache.Load(weakcvp); ok { + if t, ok := converted.(T); ok { + return t, nil + } + // Each key can only be used with a single type, so this shouldn't happen + c.logger.Warn("Cached converted value has wrong type", tag.Key(key.String())) + // Fall through to regular conversion } -} -func shardIDPrecedence(shardID int32) []Constraints { - return []Constraints{ - {ShardID: shardID}, - {}, + t, err := convert(cvp.Value) + if err != nil { + var zero T + return zero, err } -} -func taskTypePrecedence(taskType enumsspb.TaskType) []Constraints { - return []Constraints{ - {TaskType: taskType}, - {}, + if _, loaded := c.convertCache.LoadOrStore(weakcvp, t); !loaded { + cc := c.convertCache // capture only this pointer, not the whole Collection + runtime.AddCleanup(cvp, func(w weak.Pointer[ConstrainedValue]) { + cc.Delete(w) + }, weakcvp) } + + return t, nil } func convertInt(val any) (int, error) { - if intVal, ok := val.(int); ok { - return intVal, nil + switch val := val.(type) { + case int: + return int(val), nil + case int8: + return int(val), nil + case int16: + return int(val), nil + case int32: + return int(val), nil + case int64: + return int(val), nil + case uint: + return int(val), nil + case uint8: + return int(val), nil + case uint16: + return int(val), nil + case uint32: + return int(val), nil + case uint64: + return int(val), nil + case uintptr: + return int(val), nil + default: + return 0, errors.New("value type is not int") } - return 0, errors.New("value type is not int") } func convertFloat(val any) (float64, error) { - if floatVal, ok := val.(float64); ok { - return floatVal, nil - } else if intVal, ok := val.(int); ok { - return float64(intVal), nil + switch val := val.(type) { + case float32: + return float64(val), nil + case float64: + return float64(val), nil + } + if ival, err := convertInt(val); err == nil { + return float64(ival), nil } return 0, errors.New("value type is not float64") } @@ -554,9 +620,6 @@ func convertDuration(val any) (time.Duration, error) { switch v := val.(type) { case time.Duration: return v, nil - case int: - // treat plain int as seconds - return time.Duration(v) * time.Second, nil case string: d, err := timestamp.ParseDurationDefaultSeconds(v) if err != nil { @@ -564,6 +627,12 @@ func convertDuration(val any) (time.Duration, error) { } return d, nil } + // treat numeric values as seconds + if ival, err := convertInt(val); err == nil { + return time.Duration(ival) * time.Second, nil + } else if fval, err := convertFloat(val); err == nil { + return time.Duration(fval * float64(time.Second)), nil + } return 0, errors.New("value not convertible to Duration") } @@ -575,10 +644,14 @@ func convertString(val any) (string, error) { } func convertBool(val any) (bool, error) { - if boolVal, ok := val.(bool); ok { - return boolVal, nil + switch v := val.(type) { + case bool: + return v, nil + case string: + return strconv.ParseBool(v) + default: + return false, errors.New("value type is not bool") } - return false, errors.New("value type is not bool") } func convertMap(val any) (map[string]any, error) { @@ -587,3 +660,120 @@ func convertMap(val any) (map[string]any, error) { } return nil, errors.New("value type is not map") } + +// ConvertStructure can be used as a conversion function for New*TypedSettingWithConverter. +// The value from dynamic config will be converted to T, on top of the given default. +// +// Note that any failure in conversion of _any_ field will result in the overall default being used, +// ignoring the fields that successfully converted. +// +// Note that the default value will be deep-copied and then passed to mapstructure with the +// ZeroFields setting false, so the config value will be _merged_ on top of it. Be very careful +// when using non-empty maps or slices, the result may not be what you want. +// +// To avoid confusion, the default passed to ConvertStructure should be either the same as the +// overall default for the setting (if you want any value set to be merged over the default, i.e. +// treat the fields independently), or the zero value of its type (if you want to treat the fields +// as a group and default unset fields to zero). +func ConvertStructure[T any](def T) func(v any) (T, error) { + return func(v any) (T, error) { + // if we already have the right type, no conversion is necessary + if typedV, ok := v.(T); ok { + return typedV, nil + } + + // Deep-copy the default and decode over it. This allows using e.g. a struct with some + // default fields filled in and a config that only set some fields. + out := deepCopyForMapstructure(def) + + dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &out, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructureHookDuration, + mapstructureHookTimestamp, + mapstructureHookProtoEnum, + mapstructureHookGeneric, + ), + }) + if err != nil { + return out, err + } + err = dec.Decode(v) + return out, err + } +} + +// Parses string into time.Duration. mapstructure has an implementation of this already but it +// calls time.ParseDuration and we want to use our own method. +func mapstructureHookDuration(f, t reflect.Type, data any) (any, error) { + if t != durationType { + return data, nil + } + return convertDuration(data) +} + +// Parses string or int into time.Time. +func mapstructureHookTimestamp(f, t reflect.Type, data any) (any, error) { + if t != timeType { + return data, nil + } + switch v := data.(type) { + case time.Time: + return v, nil + case string: + ts, err := time.Parse(time.RFC3339, v) + if err != nil { + return time.Time{}, fmt.Errorf("failed to parse time: %v", err) + } + return ts, nil + } + // treat numeric values as seconds + if ival, err := convertInt(data); err == nil { + return time.Unix(int64(ival), 0), nil + } else if fval, err := convertFloat(data); err == nil { + ipart, fpart := math.Modf(fval) + return time.Unix(int64(ipart), int64(fpart*float64(time.Second))), nil + } + return time.Time{}, errors.New("value not convertible to Time") +} + +// Parses proto enum values from strings. +func mapstructureHookProtoEnum(f, t reflect.Type, data any) (any, error) { + if f != stringType || !t.Implements(protoEnumType) { + return data, nil + } + vals := reflect.New(t).Interface().(protoreflect.Enum).Descriptor().Values() + str := strings.ToLower(data.(string)) // we checked f above so this can't fail + for i := 0; i < vals.Len(); i++ { + val := vals.Get(i) + if str == strings.ToLower(string(val.Name())) { + return val.Number(), nil + } + } + return nil, fmt.Errorf("name %q not found in enum %s", data, t.Name()) +} + +// Parses generic values. See GenericParseHook. +func mapstructureHookGeneric(f, t reflect.Type, data any) (any, error) { + if mth, ok := t.MethodByName("DynamicConfigParseHook"); ok && + mth.Func.IsValid() && + mth.Type != nil && + mth.Type.NumIn() == 2 && + mth.Type.In(1) == f && + mth.Type.NumOut() == 2 && + mth.Type.Out(0) == t && + mth.Type.Out(1) == errorType { + + out := mth.Func.Call([]reflect.Value{reflect.Zero(t), reflect.ValueOf(data)}) + if !out[1].IsNil() { + if err, ok := out[1].Interface().(error); ok { + return nil, err + } + return nil, errors.New("failed to convert DynamicConfigParseHook error") + } + return out[0].Interface(), nil + } + + // pass through + return data, nil +} diff --git a/common/dynamicconfig/collection_bench_test.go b/common/dynamicconfig/collection_bench_test.go new file mode 100644 index 00000000000..1355cdb950e --- /dev/null +++ b/common/dynamicconfig/collection_bench_test.go @@ -0,0 +1,162 @@ +package dynamicconfig_test + +import ( + "fmt" + "testing" + + "go.temporal.io/server/common/dynamicconfig" + "go.temporal.io/server/common/log" +) + +func BenchmarkCollection(b *testing.B) { + client1 := dynamicconfig.StaticClient{ + dynamicconfig.MatchingMaxTaskBatchSize.Key(): []dynamicconfig.ConstrainedValue{{Value: 12}}, + dynamicconfig.HistoryRPS.Key(): []dynamicconfig.ConstrainedValue{{Value: 100}}, + dynamicconfig.BlobSizeLimitError.Key(): []dynamicconfig.ConstrainedValue{{Value: 100}}, + dynamicconfig.BlobSizeLimitWarn.Key(): []dynamicconfig.ConstrainedValue{{Value: 100}}, + dynamicconfig.MatchingShutdownDrainDuration.Key(): []dynamicconfig.ConstrainedValue{{Value: "100s"}}, + } + cln1 := dynamicconfig.NewCollection(client1, log.NewNoopLogger()) + b.Run("global int default", func(b *testing.B) { + b.ReportAllocs() + size := dynamicconfig.MatchingThrottledLogRPS.Get(cln1) + for i := 0; i < b.N/2; i++ { + _ = size() + _ = size() + } + }) + b.Run("global int present", func(b *testing.B) { + b.ReportAllocs() + size := dynamicconfig.HistoryRPS.Get(cln1) + for i := 0; i < b.N/2; i++ { + _ = size() + _ = size() + } + }) + b.Run("namespace int default", func(b *testing.B) { + b.ReportAllocs() + size1 := dynamicconfig.HistoryMaxPageSize.Get(cln1) + size2 := dynamicconfig.WorkflowExecutionMaxInFlightUpdates.Get(cln1) + for i := 0; i < b.N/2; i++ { + _ = size1("my-namespace") + _ = size2("my-namespace") + } + }) + b.Run("namespace int present", func(b *testing.B) { + b.ReportAllocs() + size1 := dynamicconfig.BlobSizeLimitError.Get(cln1) + size2 := dynamicconfig.BlobSizeLimitWarn.Get(cln1) + for i := 0; i < b.N/2; i++ { + _ = size1("my-namespace") + _ = size2("my-namespace") + } + }) + b.Run("taskqueue int default", func(b *testing.B) { + b.ReportAllocs() + size := dynamicconfig.MatchingMaxTaskDeleteBatchSize.Get(cln1) + for i := 0; i < b.N/2; i++ { + _ = size("my-namespace", "my-task-queue", 1) + _ = size("my-namespace", "my-task-queue", 1) + } + }) + b.Run("taskqueue int present", func(b *testing.B) { + b.ReportAllocs() + size := dynamicconfig.MatchingMaxTaskBatchSize.Get(cln1) + for i := 0; i < b.N/2; i++ { + _ = size("my-namespace", "my-task-queue", 1) + _ = size("my-namespace", "my-task-queue", 1) + } + }) + b.Run("global duration default", func(b *testing.B) { + b.ReportAllocs() + size := dynamicconfig.MatchingAlignMembershipChange.Get(cln1) + for i := 0; i < b.N/2; i++ { + _ = size() + _ = size() + } + }) + b.Run("global duration present", func(b *testing.B) { + b.ReportAllocs() + size := dynamicconfig.MatchingShutdownDrainDuration.Get(cln1) + for i := 0; i < b.N/2; i++ { + _ = size() + _ = size() + } + }) + + // client with more constrained values + client2 := dynamicconfig.StaticClient{ + dynamicconfig.MatchingMaxTaskBatchSize.Key(): []dynamicconfig.ConstrainedValue{ + { + Constraints: dynamicconfig.Constraints{ + TaskQueueName: "other-tq", + }, + Value: 18, + }, + { + Constraints: dynamicconfig.Constraints{ + Namespace: "other-ns", + }, + Value: 15, + }, + }, + } + cln2 := dynamicconfig.NewCollection(client2, log.NewNoopLogger()) + b.Run("single default", func(b *testing.B) { + b.ReportAllocs() + size := dynamicconfig.MatchingMaxTaskBatchSize.Get(cln2) + for i := 0; i < b.N/4; i++ { + _ = size("my-namespace", "my-task-queue", 1) + _ = size("my-namespace", "other-tq", 1) + _ = size("other-ns", "my-task-queue", 1) + _ = size("other-ns", "other-tq", 1) + } + }) + b.Run("structured default", func(b *testing.B) { + b.ReportAllocs() + size := dynamicconfig.MatchingNumTaskqueueWritePartitions.Get(cln2) + for i := 0; i < b.N/4; i++ { + _ = size("my-namespace", "my-task-queue", 1) + _ = size("my-namespace", "other-tq", 1) + _ = size("other-ns", "my-task-queue", 1) + _ = size("other-ns", "other-tq", 1) + } + }) +} + +func BenchmarkCollectionIndexed(b *testing.B) { + // You might want to set constraintsCacheThreshold to a high value before running this to + // measure the performance of linear search. + + var nums []int + for v := 1.0; v < 1000; v *= 1.5 { + nums = append(nums, int(v+0.999)) + } + for _, numNs := range nums { + // query for the middle one to measure the average + queryNs := numNs / 2 + + b.Run(fmt.Sprintf("num%d", numNs), func(b *testing.B) { + cvs := make([]dynamicconfig.ConstrainedValue, numNs) + for i := range cvs { + cvs[i] = dynamicconfig.ConstrainedValue{ + Constraints: dynamicconfig.Constraints{ + Namespace: fmt.Sprintf("namespace%d", i), + }, + Value: 1000 + i, + } + } + + cli := dynamicconfig.StaticClient{ + dynamicconfig.FrontendGlobalNamespaceRPS.Key(): cvs, + } + cln := dynamicconfig.NewCollection(cli, log.NewNoopLogger()) + get := dynamicconfig.FrontendGlobalNamespaceRPS.Get(cln) + query := fmt.Sprintf("namespace%d", queryNs) + + for b.Loop() { + get(query) + } + }) + } +} diff --git a/common/dynamicconfig/collection_test.go b/common/dynamicconfig/collection_test.go index cccd9ec3dfc..7ae80fdbd63 100644 --- a/common/dynamicconfig/collection_test.go +++ b/common/dynamicconfig/collection_test.go @@ -1,37 +1,21 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - -package dynamicconfig +package dynamicconfig_test import ( + "errors" + "maps" + "strings" + "sync" + "sync/atomic" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/suite" - + enumspb "go.temporal.io/api/enums/v1" enumsspb "go.temporal.io/server/api/enums/v1" + "go.temporal.io/server/common/dynamicconfig" "go.temporal.io/server/common/log" + "go.temporal.io/server/common/namespace" ) const ( @@ -45,6 +29,7 @@ const ( testGetBoolPropertyKey = "testGetBoolPropertyKey" testGetStringPropertyKey = "testGetStringPropertyKey" testGetMapPropertyKey = "testGetMapPropertyKey" + testGetTypedPropertyKey = "testGetTypedPropertyKey" testGetIntPropertyFilteredByNamespaceKey = "testGetIntPropertyFilteredByNamespaceKey" testGetDurationPropertyFilteredByNamespaceKey = "testGetDurationPropertyFilteredByNamespaceKey" testGetIntPropertyFilteredByTaskQueueInfoKey = "testGetIntPropertyFilteredByTaskQueueInfoKey" @@ -53,15 +38,18 @@ const ( testGetDurationPropertyStructuredDefaults = "testGetDurationPropertyStructuredDefaults" testGetBoolPropertyFilteredByNamespaceIDKey = "testGetBoolPropertyFilteredByNamespaceIDKey" testGetBoolPropertyFilteredByTaskQueueInfoKey = "testGetBoolPropertyFilteredByTaskQueueInfoKey" + testGetStringPropertyFilteredByNamespaceKey = "testGetStringPropertyFilteredByNamespaceKey" testGetStringPropertyFilteredByNamespaceIDKey = "testGetStringPropertyFilteredByNamespaceIDKey" + testGetIntPropertyFilteredByDestinationKey = "testGetIntPropertyFilteredByDestinationKey" + testGetDurationPropertyFilteredByChasmTaskTypeKey = "testGetDurationPropertyFilteredByChasmTaskTypeKey" ) // Note: fileBasedClientSuite also heavily tests Collection, since some tests are easier with data // provided from a file. type collectionSuite struct { suite.Suite - client StaticClient - cln *Collection + client *testSubscribableClient + cln *dynamicconfig.Collection } func TestCollectionSuite(t *testing.T) { @@ -69,139 +57,183 @@ func TestCollectionSuite(t *testing.T) { suite.Run(t, s) } -func (s *collectionSuite) SetupSuite() { - s.client = make(StaticClient) +func (s *collectionSuite) SetupTest() { + dynamicconfig.ResetRegistryForTest() + s.client = newTestSubscribableClient() logger := log.NewNoopLogger() - s.cln = NewCollection(s.client, logger) + s.cln = dynamicconfig.NewCollection(s.client, logger) + s.cln.Start() +} + +func (s *collectionSuite) TearDownTest() { + s.cln.Stop() } func (s *collectionSuite) TestGetIntProperty() { - value := s.cln.GetIntProperty(testGetIntPropertyKey, 10) + setting := dynamicconfig.NewGlobalIntSetting(testGetIntPropertyKey, 10, "") + value := setting.Get(s.cln) s.Equal(10, value()) - s.client[testGetIntPropertyKey] = 50 + s.client.SetValue(testGetIntPropertyKey, 50) s.Equal(50, value()) + s.client.SetValue(testGetIntPropertyKey, uint32(50000)) + s.Equal(50000, value()) } func (s *collectionSuite) TestGetIntPropertyFilteredByNamespace() { + setting := dynamicconfig.NewNamespaceIntSetting(testGetIntPropertyFilteredByNamespaceKey, 10, "") namespace := "testNamespace" - value := s.cln.GetIntPropertyFilteredByNamespace(testGetIntPropertyFilteredByNamespaceKey, 10) + value := setting.Get(s.cln) s.Equal(10, value(namespace)) - s.client[testGetIntPropertyFilteredByNamespaceKey] = 50 + s.client.SetValue(testGetIntPropertyFilteredByNamespaceKey, 50) s.Equal(50, value(namespace)) } -func (s *collectionSuite) TestGetStringPropertyFnWithNamespaceFilter() { - namespace := "testNamespace" - value := s.cln.GetStringPropertyFnWithNamespaceFilter(DefaultEventEncoding, "abc") - s.Equal("abc", value(namespace)) - s.client[DefaultEventEncoding] = "efg" - s.Equal("efg", value(namespace)) +func (s *collectionSuite) TestGetStringPropertyFnFilteredByNamespace() { + ns := "testNamespace" + setting := dynamicconfig.NewNamespaceStringSetting(testGetStringPropertyFilteredByNamespaceKey, "abc", "") + value := setting.Get(s.cln) + s.Equal("abc", value(ns)) + s.client.SetValue(testGetStringPropertyFilteredByNamespaceKey, "efg") + s.Equal("efg", value(ns)) } -func (s *collectionSuite) TestGetStringPropertyFnWithNamespaceIDFilter() { - namespaceID := "testNamespaceID" - value := s.cln.GetStringPropertyFnWithNamespaceIDFilter(testGetStringPropertyFilteredByNamespaceIDKey, "abc") +func (s *collectionSuite) TestGetStringPropertyFnFilteredByNamespaceID() { + namespaceID := namespace.ID("testNamespaceID") + setting := dynamicconfig.NewNamespaceIDStringSetting(testGetStringPropertyFilteredByNamespaceIDKey, "abc", "") + value := setting.Get(s.cln) s.Equal("abc", value(namespaceID)) - s.client[testGetStringPropertyFilteredByNamespaceIDKey] = "efg" + s.client.SetValue(testGetStringPropertyFilteredByNamespaceIDKey, "efg") s.Equal("efg", value(namespaceID)) } func (s *collectionSuite) TestGetIntPropertyFilteredByTaskQueueInfo() { + setting := dynamicconfig.NewTaskQueueIntSetting(testGetIntPropertyFilteredByTaskQueueInfoKey, 10, "") namespace := "testNamespace" taskQueue := "testTaskQueue" - value := s.cln.GetIntPropertyFilteredByTaskQueueInfo(testGetIntPropertyFilteredByTaskQueueInfoKey, 10) + value := setting.Get(s.cln) s.Equal(10, value(namespace, taskQueue, 0)) - s.client[testGetIntPropertyFilteredByTaskQueueInfoKey] = 50 + s.client.SetValue(testGetIntPropertyFilteredByTaskQueueInfoKey, 50) s.Equal(50, value(namespace, taskQueue, 0)) } func (s *collectionSuite) TestGetFloat64Property() { - value := s.cln.GetFloat64Property(testGetFloat64PropertyKey, 0.1) - s.Equal(0.1, value()) - s.client[testGetFloat64PropertyKey] = 0.01 - s.Equal(0.01, value()) + setting := dynamicconfig.NewGlobalFloatSetting(testGetFloat64PropertyKey, 0.1, "") + value := setting.Get(s.cln) + s.InEpsilon(0.1, value(), 1e-10) + s.client.SetValue(testGetFloat64PropertyKey, 0.01) + s.InEpsilon(0.01, value(), 1e-10) + s.client.SetValue(testGetFloat64PropertyKey, int64(123456789)) + s.InEpsilon(float64(123456789), value(), 1e-10) } func (s *collectionSuite) TestGetBoolProperty() { - value := s.cln.GetBoolProperty(testGetBoolPropertyKey, true) + setting := dynamicconfig.NewGlobalBoolSetting(testGetBoolPropertyKey, true, "") + value := setting.Get(s.cln) s.Equal(true, value()) - s.client[testGetBoolPropertyKey] = false + s.client.SetValue(testGetBoolPropertyKey, false) + s.Equal(false, value()) + s.client.SetValue(testGetBoolPropertyKey, "false") s.Equal(false, value()) } func (s *collectionSuite) TestGetBoolPropertyFilteredByNamespaceID() { - namespaceID := "testNamespaceID" - value := s.cln.GetBoolPropertyFnWithNamespaceIDFilter(testGetBoolPropertyFilteredByNamespaceIDKey, true) + setting := dynamicconfig.NewNamespaceIDBoolSetting(testGetBoolPropertyFilteredByNamespaceIDKey, true, "") + namespaceID := namespace.ID("testNamespaceID") + value := setting.Get(s.cln) s.Equal(true, value(namespaceID)) - s.client[testGetBoolPropertyFilteredByNamespaceIDKey] = false + s.client.SetValue(testGetBoolPropertyFilteredByNamespaceIDKey, false) s.Equal(false, value(namespaceID)) } func (s *collectionSuite) TestGetBoolPropertyFilteredByTaskQueueInfo() { + setting := dynamicconfig.NewTaskQueueBoolSetting(testGetBoolPropertyFilteredByTaskQueueInfoKey, false, "") namespace := "testNamespace" taskQueue := "testTaskQueue" - value := s.cln.GetBoolPropertyFilteredByTaskQueueInfo(testGetBoolPropertyFilteredByTaskQueueInfoKey, false) + value := setting.Get(s.cln) s.Equal(false, value(namespace, taskQueue, 0)) - s.client[testGetBoolPropertyFilteredByTaskQueueInfoKey] = true + s.client.SetValue(testGetBoolPropertyFilteredByTaskQueueInfoKey, true) s.Equal(true, value(namespace, taskQueue, 0)) } func (s *collectionSuite) TestGetDurationProperty() { - value := s.cln.GetDurationProperty(testGetDurationPropertyKey, time.Second) + setting := dynamicconfig.NewGlobalDurationSetting(testGetDurationPropertyKey, 1*time.Second, "") + value := setting.Get(s.cln) s.Equal(time.Second, value()) - s.client[testGetDurationPropertyKey] = time.Minute + s.client.SetValue(testGetDurationPropertyKey, time.Minute) s.Equal(time.Minute, value()) - s.client[testGetDurationPropertyKey] = 33 + s.client.SetValue(testGetDurationPropertyKey, 33) + s.Equal(33*time.Second, value()) + s.client.SetValue(testGetDurationPropertyKey, int16(33)) s.Equal(33*time.Second, value()) - s.client[testGetDurationPropertyKey] = "33" + s.client.SetValue(testGetDurationPropertyKey, "33") s.Equal(33*time.Second, value()) + s.client.SetValue(testGetDurationPropertyKey, "33h") + s.Equal(33*time.Hour, value()) + s.client.SetValue(testGetDurationPropertyKey, float32(33.5)) + s.Equal(33*time.Second+500*time.Millisecond, value()) } func (s *collectionSuite) TestGetDurationPropertyFilteredByNamespace() { + setting := dynamicconfig.NewNamespaceDurationSetting(testGetDurationPropertyFilteredByNamespaceKey, time.Second, "") namespace := "testNamespace" - value := s.cln.GetDurationPropertyFilteredByNamespace(testGetDurationPropertyFilteredByNamespaceKey, time.Second) + value := setting.Get(s.cln) s.Equal(time.Second, value(namespace)) - s.client[testGetDurationPropertyFilteredByNamespaceKey] = time.Minute + s.client.SetValue(testGetDurationPropertyFilteredByNamespaceKey, time.Minute) s.Equal(time.Minute, value(namespace)) } func (s *collectionSuite) TestGetDurationPropertyFilteredByTaskQueueInfo() { + setting := dynamicconfig.NewTaskQueueDurationSetting(testGetDurationPropertyFilteredByTaskQueueInfoKey, time.Second, "") namespace := "testNamespace" taskQueue := "testTaskQueue" - value := s.cln.GetDurationPropertyFilteredByTaskQueueInfo(testGetDurationPropertyFilteredByTaskQueueInfoKey, time.Second) + value := setting.Get(s.cln) s.Equal(time.Second, value(namespace, taskQueue, 0)) - s.client[testGetDurationPropertyFilteredByTaskQueueInfoKey] = time.Minute + s.client.SetValue(testGetDurationPropertyFilteredByTaskQueueInfoKey, time.Minute) s.Equal(time.Minute, value(namespace, taskQueue, 0)) } func (s *collectionSuite) TestGetDurationPropertyFilteredByTaskType() { + setting := dynamicconfig.NewTaskTypeDurationSetting(testGetDurationPropertyFilteredByTaskTypeKey, time.Second, "") taskType := enumsspb.TASK_TYPE_UNSPECIFIED - value := s.cln.GetDurationPropertyFilteredByTaskType(testGetDurationPropertyFilteredByTaskTypeKey, time.Second) + value := setting.Get(s.cln) s.Equal(time.Second, value(taskType)) - s.client[testGetDurationPropertyFilteredByTaskTypeKey] = time.Minute + s.client.SetValue(testGetDurationPropertyFilteredByTaskTypeKey, time.Minute) s.Equal(time.Minute, value(taskType)) } +func (s *collectionSuite) TestGetDurationPropertyFilteredByChasmTaskType() { + setting := dynamicconfig.NewChasmTaskTypeDurationSetting(testGetDurationPropertyFilteredByChasmTaskTypeKey, time.Second, "") + chasmTaskType := "activity.dispatch" + value := setting.Get(s.cln) + s.Equal(time.Second, value(chasmTaskType)) + s.client.SetValue(testGetDurationPropertyFilteredByChasmTaskTypeKey, time.Minute) + s.Equal(time.Minute, value(chasmTaskType)) +} + func (s *collectionSuite) TestGetDurationPropertyStructuredDefaults() { - defaults := []ConstrainedValue{ - { - Constraints: Constraints{ - Namespace: "ns2", - TaskQueueName: "tq2", + setting := dynamicconfig.NewTaskQueueDurationSettingWithConstrainedDefault( + testGetDurationPropertyStructuredDefaults, + []dynamicconfig.TypedConstrainedValue[time.Duration]{ + { + Constraints: dynamicconfig.Constraints{ + Namespace: "ns2", + TaskQueueName: "tq2", + }, + Value: 2 * time.Minute, }, - Value: 2 * time.Minute, - }, - { - Constraints: Constraints{ - TaskQueueName: "tq2", + { + Constraints: dynamicconfig.Constraints{ + TaskQueueName: "tq2", + }, + Value: 5 * time.Minute, + }, + { + Value: 7 * time.Minute, }, - Value: 5 * time.Minute, - }, - { - Value: 7 * time.Minute, }, - } - value := s.cln.GetDurationPropertyFilteredByTaskQueueInfo(testGetDurationPropertyStructuredDefaults, defaults) + "", + ) + value := setting.Get(s.cln) s.Equal(7*time.Minute, value("ns1", "tq1", 0)) s.Equal(7*time.Minute, value("ns2", "tq1", 0)) s.Equal(5*time.Minute, value("ns1", "tq2", 0)) @@ -209,9 +241,9 @@ func (s *collectionSuite) TestGetDurationPropertyStructuredDefaults() { // user-set values should take precedence. defaults are included below in the interleaved // precedence order to make the test easier to read - s.client[testGetDurationPropertyStructuredDefaults] = []ConstrainedValue{ + s.client.Set(testGetDurationPropertyStructuredDefaults, []dynamicconfig.ConstrainedValue{ { - Constraints: Constraints{ + Constraints: dynamicconfig.Constraints{ Namespace: "ns2", TaskQueueName: "tq2", }, @@ -231,7 +263,7 @@ func (s *collectionSuite) TestGetDurationPropertyStructuredDefaults() { // Value: 5 * time.Minute, // }, { - Constraints: Constraints{ + Constraints: dynamicconfig.Constraints{ Namespace: "ns1", }, Value: 5 * time.Second, @@ -242,7 +274,7 @@ func (s *collectionSuite) TestGetDurationPropertyStructuredDefaults() { // { // Value: 7 * time.Minute, // }, - } + }) s.Equal(5*time.Second, value("ns1", "tq1", 0)) s.Equal(7*time.Second, value("ns2", "tq1", 0)) @@ -251,145 +283,514 @@ func (s *collectionSuite) TestGetDurationPropertyStructuredDefaults() { } func (s *collectionSuite) TestGetMapProperty() { - val := map[string]interface{}{ - "testKey": 123, - } - value := s.cln.GetMapProperty(testGetMapPropertyKey, val) - s.Equal(val, value()) + def := map[string]any{"testKey": 123} + setting := dynamicconfig.NewGlobalMapSetting( + testGetMapPropertyKey, + def, + "", + ) + value := setting.Get(s.cln) + s.Equal(def, value()) + val := maps.Clone(def) val["testKey"] = "321" - s.client[testGetMapPropertyKey] = val + s.client.SetValue(testGetMapPropertyKey, val) s.Equal(val, value()) s.Equal("321", value()["testKey"]) } -func (s *collectionSuite) TestFindMatch() { - testCases := []struct { - v []ConstrainedValue - filters []Constraints - matched bool - }{ +func (s *collectionSuite) TestGetTyped() { + type myFancyType struct { + Number int + Names []string + } + def := myFancyType{28, []string{"global", "typed", "setting"}} + setting := dynamicconfig.NewGlobalTypedSettingWithConverter( + testGetTypedPropertyKey, + dynamicconfig.ConvertStructure(myFancyType{-3, nil}), // used if convert is called + def, + "", + ) + get := setting.Get(s.cln) + + s.Run("Default", func() { + s.Equal(def, get()) + }) + + s.Run("Basic", func() { + // map[string]any is what the yaml library decodes arbitrary data into + s.client.SetValue(testGetTypedPropertyKey, map[string]any{ + "Number": 39, + "Names": []string{"new", "names"}, + }) + s.Equal(myFancyType{ + Number: 39, + Names: []string{"new", "names"}, + }, get()) + }) + + s.Run("CaseInsensitive", func() { + s.client.SetValue(testGetTypedPropertyKey, map[string]any{ + "naMES": []string{"case", "insensitive"}, + }) + s.Equal(-3, get().Number) // note the convert default is used here + s.Equal([]string{"case", "insensitive"}, get().Names) + }) + + s.Run("WrongType", func() { + s.client.SetValue(testGetTypedPropertyKey, 200) + s.Equal(def, get()) + }) +} + +func (s *collectionSuite) TestGetTypedSimpleList() { + def := []float64{1.5, 1.1, 2.6, 3.7, 6.3} + setting := dynamicconfig.NewGlobalTypedSettingWithConverter( + testGetTypedPropertyKey, + dynamicconfig.ConvertStructure([]float64(nil)), + def, + "", + ) + get := setting.Get(s.cln) + + s.Run("Default", func() { + s.Equal(def, get()) + }) + + s.Run("Basic", func() { + s.client.SetValue(testGetTypedPropertyKey, []any{19.0, -2.0}) + s.Equal([]float64{19.0, -2.0}, get()) + }) + + s.Run("WrongType", func() { + s.client.SetValue(testGetTypedPropertyKey, []any{88.8, false, -5, "oops"}) + s.Equal(def, get()) + }) +} + +func (s *collectionSuite) TestGetTypedListOfStruct() { + type simple struct{ A, B int } + def := []simple{{1, 5}, {2, 9}} + setting := dynamicconfig.NewGlobalTypedSettingWithConverter( + testGetTypedPropertyKey, + dynamicconfig.ConvertStructure([]simple(nil)), + def, + "", + ) + get := setting.Get(s.cln) + + s.Run("Default", func() { + s.Equal(def, get()) + }) + + s.Run("Basic", func() { + s.client.SetValue(testGetTypedPropertyKey, []any{ + map[string]any{"A": 12, "B": 6}, + map[string]any{"A": -23, "B": 0}, + map[string]any{"B": 555, "C": "ignored"}, + }) + s.Equal([]simple{{12, 6}, {-23, 0}, {0, 555}}, get()) + }) + + s.Run("WrongType", func() { + s.client.SetValue(testGetTypedPropertyKey, []any{ + map[string]any{"A": false, "B": true}, + }) + s.Equal(def, get()) + }) +} + +func (s *collectionSuite) TestGetTypedProtoEnum() { + def := enumspb.ARCHIVAL_STATE_UNSPECIFIED + setting := dynamicconfig.NewGlobalTypedSetting( + testGetTypedPropertyKey, + def, + "", + ) + get := setting.Get(s.cln) + + s.Run("Default", func() { + s.Equal(def, get()) + }) + + s.Run("Basic", func() { + s.client.SetValue(testGetTypedPropertyKey, "ARCHIVAL_STATE_DISABLED") + s.Equal(enumspb.ARCHIVAL_STATE_DISABLED, get()) + }) + + s.Run("CaseInsensitive", func() { + s.client.SetValue(testGetTypedPropertyKey, "archival_state_disabled") + s.Equal(enumspb.ARCHIVAL_STATE_DISABLED, get()) + }) + + s.Run("NotFound", func() { + s.client.SetValue(testGetTypedPropertyKey, "some_other_string") + s.Equal(def, get()) + }) + + s.Run("Int", func() { + s.client.SetValue(testGetTypedPropertyKey, 2) + s.Equal(enumspb.ARCHIVAL_STATE_ENABLED, get()) + }) + + s.Run("WrongType", func() { + s.client.SetValue(testGetTypedPropertyKey, true) + s.Equal(def, get()) + }) +} + +// someEnum is an example type for DynamicConfigParseHook. +type someEnum int32 + +const ( + someEnumValueUnset someEnum = iota + someEnumValueOne + someEnumValueTwo + someEnumValueThree +) + +func (someEnum) DynamicConfigParseHook(s string) (someEnum, error) { + switch strings.ToLower(s) { + case "one": + return someEnumValueOne, nil + case "two": + return someEnumValueTwo, nil + case "three": + return someEnumValueThree, nil + default: + return 0, errors.New("unknown value") + } +} + +func (s *collectionSuite) TestGetGenericParseHook() { + def := someEnumValueOne + setting := dynamicconfig.NewGlobalTypedSetting( + testGetTypedPropertyKey, + def, + "", + ) + get := setting.Get(s.cln) + + s.Run("Default", func() { + s.Equal(def, get()) + }) + + s.Run("Basic", func() { + s.client.SetValue(testGetTypedPropertyKey, "THRee") + s.Equal(someEnumValueThree, get()) + }) + + s.Run("Missing", func() { + s.client.SetValue(testGetTypedPropertyKey, "four") + s.Equal(def, get()) // default since there was a parse error + }) +} + +func (s *collectionSuite) TestGetGenericParseHookValue_Struct() { + type myStruct struct { + FieldA someEnum + FieldB someEnum + } + def := myStruct{ + FieldA: someEnumValueTwo, + FieldB: someEnumValueThree, + } + setting := dynamicconfig.NewGlobalTypedSetting( + testGetTypedPropertyKey, + def, + "", + ) + get := setting.Get(s.cln) + + s.Run("Default", func() { + s.Equal(def, get()) + }) + + s.Run("Basic", func() { + s.client.SetValue(testGetTypedPropertyKey, map[string]any{"fielda": "one"}) + s.Equal(myStruct{ + FieldA: someEnumValueOne, + FieldB: someEnumValueThree, // from default + }, get()) + }) + + s.Run("Missing", func() { + s.client.SetValue(testGetTypedPropertyKey, map[string]any{"FieldA": "one", "FieldB": "four"}) + s.Equal(def, get()) // default since there was a parse error + }) +} + +func (s *collectionSuite) TestGetIntPropertyFilteredByDestination() { + setting := dynamicconfig.NewDestinationIntSetting(testGetIntPropertyFilteredByDestinationKey, 10, "") + namespaceName := "testNamespace" + destination1 := "testDestination1" + destination2 := "testDestination2" + value := setting.Get(s.cln) + s.Equal(10, value(namespaceName, destination1)) + s.client.Set(testGetIntPropertyFilteredByDestinationKey, []dynamicconfig.ConstrainedValue{ { - v: []ConstrainedValue{ - {Constraints: Constraints{}}, - }, - filters: []Constraints{ - {Namespace: "some random namespace"}, + Constraints: dynamicconfig.Constraints{ + Namespace: namespaceName, + Destination: destination1, }, - matched: false, + Value: 50, }, { - v: []ConstrainedValue{ - {Constraints: Constraints{Namespace: "samples-namespace"}}, + Constraints: dynamicconfig.Constraints{ + Namespace: namespaceName, }, - filters: []Constraints{ - {Namespace: "some random namespace"}, - }, - matched: false, + Value: 75, }, { - v: []ConstrainedValue{ - {Constraints: Constraints{Namespace: "samples-namespace", TaskQueueName: "sample-task-queue"}}, - }, - filters: []Constraints{ - {Namespace: "samples-namespace", TaskQueueName: "sample-task-queue"}, + Constraints: dynamicconfig.Constraints{ + Destination: destination1, }, - matched: true, + Value: 90, }, { - v: []ConstrainedValue{ - {Constraints: Constraints{Namespace: "samples-namespace"}}, + Constraints: dynamicconfig.Constraints{ + Destination: destination2, }, - filters: []Constraints{ - {TaskQueueName: "sample-task-queue"}, - }, - matched: false, + Value: 100, }, + }) + s.Equal(50, value(namespaceName, destination1)) + s.Equal(75, value(namespaceName, "testAnotherDestination")) + s.Equal(90, value("testAnotherNamespace", destination1)) + s.Equal(100, value(namespaceName, destination2)) // priority: destination >>> namespace + s.Equal(10, value("testAnotherNamespace", "testAnotherDestination")) +} + +type ( + subscriptionSuite struct { + suite.Suite + client *testSubscribableClient + cln *dynamicconfig.Collection + } + + testSubscribableClient struct { + lock sync.Mutex + m map[dynamicconfig.Key][]dynamicconfig.ConstrainedValue + subs []dynamicconfig.ClientUpdateFunc + } +) + +var _ dynamicconfig.NotifyingClient = (*testSubscribableClient)(nil) + +func TestSubscriptionSuite(t *testing.T) { + suite.Run(t, new(subscriptionSuite)) +} + +func (s *subscriptionSuite) SetupSuite() { + s.client = newTestSubscribableClient() + logger := log.NewNoopLogger() + s.cln = dynamicconfig.NewCollection(s.client, logger) + s.cln.Start() +} + +func (s *subscriptionSuite) TearDownSuite() { + s.cln.Stop() +} + +func (s *subscriptionSuite) SetupTest() { + dynamicconfig.ResetRegistryForTest() +} + +func newTestSubscribableClient() *testSubscribableClient { + return &testSubscribableClient{ + m: make(map[dynamicconfig.Key][]dynamicconfig.ConstrainedValue), } +} + +func (c *testSubscribableClient) Subscribe(f dynamicconfig.ClientUpdateFunc) func() { + c.lock.Lock() + defer c.lock.Unlock() + c.subs = append(c.subs, f) + return func() {} // ignore cancel +} + +func (c *testSubscribableClient) GetValue(k dynamicconfig.Key) []dynamicconfig.ConstrainedValue { + c.lock.Lock() + defer c.lock.Unlock() + return c.m[k] +} + +func (c *testSubscribableClient) SetValue(k string, v any) { + c.Set(k, []dynamicconfig.ConstrainedValue{{Value: v}}) +} - for _, tc := range testCases { - _, err := findMatch(tc.v, nil, tc.filters) - s.Equal(tc.matched, err == nil) - _, err = findMatch(nil, tc.v, tc.filters) - s.Equal(tc.matched, err == nil) +func (c *testSubscribableClient) Set(ks string, cvs []dynamicconfig.ConstrainedValue) { + k := dynamicconfig.MakeKey(ks) + c.lock.Lock() + defer c.lock.Unlock() + c.m[k] = cvs + for _, f := range c.subs { + f(map[dynamicconfig.Key][]dynamicconfig.ConstrainedValue{k: cvs}) } } -func BenchmarkCollection(b *testing.B) { - // client with just one value - client1 := StaticClient(map[Key]any{ - MatchingMaxTaskBatchSize: []ConstrainedValue{{Value: 12}}, +func (s *subscriptionSuite) TestSubscriptionGlobal() { + setting := dynamicconfig.NewGlobalBoolSetting(testGetBoolPropertyKey, false, "") + + vals := make(chan bool, 1) + cb := func(newVal bool) { vals <- newVal } + initial, cancel := setting.Subscribe(s.cln)(cb) + + s.False(initial) + + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{{Value: true}}) + s.Require().Eventually(func() bool { return len(vals) == 1 }, time.Second, time.Millisecond) + s.True(<-vals) + + s.client.Set(setting.Key().String(), nil) // back to default + s.Require().Eventually(func() bool { return len(vals) == 1 }, time.Second, time.Millisecond) + s.False(<-vals) + + cancel() + + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{{Value: true}}) + // no update should be delivered + time.Sleep(10 * time.Millisecond) + s.Empty(vals, "should not deliver update") +} + +func (s *subscriptionSuite) TestSubscriptionGlobal_DoesNotCallUnchanged() { + setting := dynamicconfig.NewGlobalBoolSetting(testGetBoolPropertyKey, true, "") + vals := make(chan bool, 1) + cb := func(newVal bool) { vals <- newVal } + initial, _ := setting.Subscribe(s.cln)(cb) + s.True(initial) + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{{Value: true}}) + time.Sleep(10 * time.Millisecond) + s.Empty(vals, "should not deliver update") +} + +func (s *subscriptionSuite) TestSubscriptionNamespace() { + setting := dynamicconfig.NewNamespaceIntSetting(testGetIntPropertyKey, 0, "") + + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{ + {Constraints: dynamicconfig.Constraints{Namespace: "ns1"}, Value: 1}, + {Constraints: dynamicconfig.Constraints{Namespace: "ns3"}, Value: 3}, }) - cln1 := NewCollection(client1, log.NewNoopLogger()) - b.Run("global int", func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N/2; i++ { - size := cln1.GetIntProperty(MatchingMaxTaskBatchSize, 10) - _ = size() - size = cln1.GetIntProperty(MatchingGetTasksBatchSize, 10) - _ = size() - } + + vals1 := make(chan int, 1) + init1, _ := setting.Subscribe(s.cln)("ns1", func(n int) { vals1 <- n }) + vals2 := make(chan int, 1) + init2, _ := setting.Subscribe(s.cln)("ns2", func(n int) { vals2 <- n }) + vals3 := make(chan int, 1) + init3, _ := setting.Subscribe(s.cln)("ns3", func(n int) { vals3 <- n }) + + s.Equal(1, init1) + s.Equal(0, init2) + s.Equal(3, init3) + + // change ns3 to 33 + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{ + {Constraints: dynamicconfig.Constraints{Namespace: "ns1"}, Value: 1}, + {Constraints: dynamicconfig.Constraints{Namespace: "ns3"}, Value: 33}, }) - b.Run("namespace int", func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N/2; i++ { - size := cln1.GetIntPropertyFilteredByNamespace(MatchingMaxTaskBatchSize, 10) - _ = size("my-namespace") - size = cln1.GetIntPropertyFilteredByNamespace(MatchingGetTasksBatchSize, 10) - _ = size("my-namespace") - } + + s.Require().Eventually(func() bool { return len(vals3) == 1 }, time.Second, time.Millisecond) + s.Equal(33, <-vals3) + s.Empty(vals1) + s.Empty(vals2) + + // add ns2 + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{ + {Constraints: dynamicconfig.Constraints{Namespace: "ns1"}, Value: 1}, + {Constraints: dynamicconfig.Constraints{Namespace: "ns2"}, Value: 2}, + {Constraints: dynamicconfig.Constraints{Namespace: "ns3"}, Value: 33}, }) - b.Run("taskqueue int", func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N/2; i++ { - size := cln1.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, 10) - _ = size("my-namespace", "my-task-queue", 1) - size = cln1.GetIntPropertyFilteredByTaskQueueInfo(MatchingGetTasksBatchSize, 10) - _ = size("my-namespace", "my-task-queue", 1) - } + s.Require().Eventually(func() bool { return len(vals2) == 1 }, time.Second, time.Millisecond) + s.Equal(2, <-vals2) + s.Empty(vals1) + s.Empty(vals3) + + // remove ns1 and ns3 + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{ + {Constraints: dynamicconfig.Constraints{Namespace: "ns2"}, Value: 2}, }) + s.Require().Eventually( + func() bool { return len(vals1) == 1 && len(vals3) == 1 }, + time.Second, time.Millisecond) + s.Equal(0, <-vals1) + s.Empty(vals2) + s.Equal(0, <-vals3) +} - // client with more constrained values - client2 := StaticClient(map[Key]any{ - MatchingMaxTaskBatchSize: []ConstrainedValue{ - { - Constraints: Constraints{ - TaskQueueName: "other-tq", - }, - Value: 18, - }, - { - Constraints: Constraints{ - Namespace: "other-ns", - }, - Value: 15, - }, +func (s *subscriptionSuite) TestSubscriptionWithDefault() { + baseSetting := dynamicconfig.NewGlobalIntSetting(testGetIntPropertyKey, 0, "") + setting := baseSetting.WithDefault(100) + + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{{Value: 50}}) + + vals := make(chan int, 1) + init, _ := setting.Subscribe(s.cln)(func(n int) { vals <- n }) + s.Equal(50, init) + + // remove, should get default + s.client.Set(setting.Key().String(), nil) + s.Require().Eventually(func() bool { return len(vals) == 1 }, time.Second, time.Millisecond) + s.Equal(100, <-vals) + + // test nil callback + v, cancel := setting.Subscribe(s.cln)(nil) + s.Equal(100, v) + s.Nil(cancel) +} + +func (s *subscriptionSuite) TestSubscriptionConstrainedDefaults() { + setting := dynamicconfig.NewNamespaceIntSettingWithConstrainedDefault( + testGetIntPropertyKey, + []dynamicconfig.TypedConstrainedValue[int]{ + {Value: 34, Constraints: dynamicconfig.Constraints{Namespace: "special"}}, + {Value: 10}, // no constraints = default for all }, + "", + ) + + var normal, special atomic.Int64 + var normalCalls, specialCalls atomic.Int64 + + waitFor := func(normalv, specialv, normalc, specialc int) { + s.EventuallyWithT(func(c *assert.CollectT) { + assert.Equal(c, normalv, int(normal.Load())) + assert.Equal(c, specialv, int(special.Load())) + }, time.Second, time.Millisecond) + s.Equal(normalc, int(normalCalls.Load())) + s.Equal(specialc, int(specialCalls.Load())) + } + + // normal ns + normalInit, normalCancel := setting.Subscribe(s.cln)("normal", func(v int) { normal.Store(int64(v)); normalCalls.Add(1) }) + normal.Store(int64(normalInit)) + defer normalCancel() + s.Equal(10, normalInit) + + // special ns + specialInit, specialCancel := setting.Subscribe(s.cln)("special", func(v int) { special.Store(int64(v)); specialCalls.Add(1) }) + special.Store(int64(specialInit)) + defer specialCancel() + s.Equal(34, specialInit) // Should get the constrained default for "special" + + // set a value for special + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{ + {Value: 200, Constraints: dynamicconfig.Constraints{Namespace: "special"}}, }) - cln2 := NewCollection(client2, log.NewNoopLogger()) - b.Run("single default", func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N/4; i++ { - size := cln2.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, 10) - _ = size("my-namespace", "my-task-queue", 1) - size = cln2.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, 10) - _ = size("my-namespace", "other-tq", 1) - size = cln2.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, 10) - _ = size("other-ns", "my-task-queue", 1) - size = cln2.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, 10) - _ = size("other-ns", "other-tq", 1) - } + waitFor(10, 200, 0, 1) + + // set a value for normal + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{ + {Value: 123, Constraints: dynamicconfig.Constraints{Namespace: "normal"}}, }) - b.Run("structured default", func(b *testing.B) { - b.ReportAllocs() - for i := 0; i < b.N/4; i++ { - size := cln2.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, defaultNumTaskQueuePartitions) - _ = size("my-namespace", "my-task-queue", 1) - size = cln2.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, defaultNumTaskQueuePartitions) - _ = size("my-namespace", "other-tq", 1) - size = cln2.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, defaultNumTaskQueuePartitions) - _ = size("other-ns", "my-task-queue", 1) - size = cln2.GetIntPropertyFilteredByTaskQueueInfo(MatchingMaxTaskBatchSize, defaultNumTaskQueuePartitions) - _ = size("other-ns", "other-tq", 1) - } + waitFor(123, 34, 1, 2) + + // set a default value + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{ + {Value: 19}, }) + waitFor(19, 34, 2, 2) + + // remove values + s.client.Set(setting.Key().String(), []dynamicconfig.ConstrainedValue{}) + waitFor(10, 34, 3, 2) } diff --git a/common/dynamicconfig/config/testConfig.yaml b/common/dynamicconfig/config/testConfig.yaml index 9ae9ad01e2e..258b8ee08c7 100644 --- a/common/dynamicconfig/config/testConfig.yaml +++ b/common/dynamicconfig/config/testConfig.yaml @@ -1,82 +1,89 @@ frontend.namespaceCount: -- value: - NamespaceId: 1 - constraints: {} + - value: + NamespaceId: 1 + constraints: {} TestCaseInsensitivePropertykEy: - value: true constraints: {} testGetBoolPropertyKey: -- value: false - constraints: {} -- value: true - constraints: - namespace: global-samples-namespace -- value: true - constraints: - namespace: samples-namespace + - value: false + constraints: {} + - value: true + constraints: + namespace: global-samples-namespace + - value: true + constraints: + namespace: samples-namespace testGetDurationPropertyKey: -- value: 1m - constraints: {} -- value: wrong duration string - constraints: - namespace: samples-namespace - taskQueueName: longIdleTimeTaskqueue -- value: 2 - constraints: - namespace: samples-namespace -- value: true - constraints: - namespace: broken-namespace + - value: 1m + constraints: {} + - value: wrong duration string + constraints: + namespace: samples-namespace + taskQueueName: longIdleTimeTaskqueue + - value: 2 + constraints: + namespace: samples-namespace + - value: true + constraints: + namespace: broken-namespace testGetFloat64PropertyKey: -- value: 12 - constraints: {} -- value: wrong type - constraints: - namespace: samples-namespace + - value: 12 + constraints: {} + - value: wrong type + constraints: + namespace: samples-namespace testGetIntPropertyKey: -- value: 1000 - constraints: {} -- value: 1000.1 - constraints: - namespace: global-samples-namespace -- value: 1001 - constraints: - namespace: global-samples-namespace - taskQueueName: test-tq - taskType: Workflow -- value: 1002 - constraints: - namespace: global-samples-namespace - taskQueueName: test-tq - taskType: Activity -- value: 1003 - constraints: - namespace: global-samples-namespace - taskQueueName: test-tq -- value: 1004 - constraints: - namespace: another-namespace -- value: 1005 - constraints: - taskQueueName: other-test-tq + - value: 1000 + constraints: {} + - value: 1000.1 + constraints: + namespace: global-samples-namespace + - value: 1001 + constraints: + namespace: global-samples-namespace + taskQueueName: test-tq + taskType: Workflow + - value: 1002 + constraints: + namespace: global-samples-namespace + taskQueueName: test-tq + taskType: Activity + - value: 1003 + constraints: + namespace: global-samples-namespace + taskQueueName: test-tq + - value: 1004 + constraints: + namespace: another-namespace + - value: 1005 + constraints: + taskQueueName: other-test-tq testGetMapPropertyKey: -- value: - key1: "1" - key2: 1 - key3: - - false - - key4: true - key5: 2.1 - constraints: {} -- value: "1" - constraints: - namespace: random-namespace + - value: + key1: "1" + key2: 1 + key3: + - false + - key4: true + key5: 2.1 + constraints: {} + - value: "1" + constraints: + namespace: random-namespace +testGetTypedPropertyKey: + - value: + number: 23.2 # note field is int + days: "6d" + inner: + key1: 12345 # note field is float + key2: true testGetStringPropertyKey: -- value: some random string - constraints: {} -- value: constrained-string - constraints: - namespace: random-namespace + - value: some random string + constraints: {} + - value: constrained-string + constraints: + namespace: random-namespace testGetDurationPropertyFilteredByTaskTypeKey: - value: 10s constraints: @@ -84,3 +91,25 @@ testGetDurationPropertyFilteredByTaskTypeKey: - value: 10s constraints: historytasktype: 1 +testGetDurationPropertyFilteredByChasmTaskTypeKey: + - value: 30s + constraints: + chasmtasktype: "activity.dispatch" + - value: 24h + constraints: {} +testGetIntPropertyFilteredByDestinationKey: + - value: 10 + constraints: {} + - value: 20 + constraints: + namespace: test-namespace + destination: test-destination-1 + - value: 30 + constraints: + namespace: test-namespace + - value: 40 + constraints: + destination: test-destination-1 + - value: 50 + constraints: + destination: test-destination-2 diff --git a/common/dynamicconfig/constants.go b/common/dynamicconfig/constants.go index 1d11e9c15e8..30bc93c04a2 100644 --- a/common/dynamicconfig/constants.go +++ b/common/dynamicconfig/constants.go @@ -1,1031 +1,3333 @@ -// The MIT License -// -// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. -// -// Copyright (c) 2020 Uber Technologies, Inc. -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. - package dynamicconfig -func (k Key) String() string { - return string(k) -} +import ( + "math" + "os" + "time" + + sdkworker "go.temporal.io/sdk/worker" + "go.temporal.io/server/common/debug" + "go.temporal.io/server/common/primitives" + "go.temporal.io/server/common/retrypolicy" + "go.temporal.io/server/common/util" + "go.temporal.io/server/service/matching/counter" +) + +var ( + // keys for dynamic config itself + DynamicConfigSubscriptionPollInterval = NewGlobalDurationSetting( + "dynamicconfig.subscriptionPollInterval", + time.Minute, + `Poll interval for emulating subscriptions on non-subscribable Client.`, + ) -const ( // keys for admin - // AdminEnableListHistoryTasks is the key for enabling listing history tasks - AdminEnableListHistoryTasks = "admin.enableListHistoryTasks" - // AdminMatchingNamespaceToPartitionDispatchRate is the max qps of any task queue partition for a given namespace - AdminMatchingNamespaceToPartitionDispatchRate = "admin.matchingNamespaceToPartitionDispatchRate" - // AdminMatchingNamespaceTaskqueueToPartitionDispatchRate is the max qps of a task queue partition for a given namespace & task queue - AdminMatchingNamespaceTaskqueueToPartitionDispatchRate = "admin.matchingNamespaceTaskqueueToPartitionDispatchRate" + AdminEnableListHistoryTasks = NewGlobalBoolSetting( + "admin.enableListHistoryTasks", + true, + `AdminEnableListHistoryTasks is the key for enabling listing history tasks`, + ) + AdminMatchingNamespaceToPartitionDispatchRate = NewNamespaceFloatSetting( + "admin.matchingNamespaceToPartitionDispatchRate", + 10000, + `AdminMatchingNamespaceToPartitionDispatchRate is the max qps of any task queue partition for a given namespace`, + ) + AdminMatchingNamespaceTaskqueueToPartitionDispatchRate = NewTaskQueueFloatSetting( + "admin.matchingNamespaceTaskqueueToPartitionDispatchRate", + 1000, + `AdminMatchingNamespaceTaskqueueToPartitionDispatchRate is the max qps of a task queue partition for a given namespace & task queue`, + ) // keys for system - // VisibilityPersistenceMaxReadQPS is the max QPC system host can query visibility DB for read. - VisibilityPersistenceMaxReadQPS = "system.visibilityPersistenceMaxReadQPS" - // VisibilityPersistenceMaxWriteQPS is the max QPC system host can query visibility DB for write. - VisibilityPersistenceMaxWriteQPS = "system.visibilityPersistenceMaxWriteQPS" - // EnableReadFromSecondaryVisibility is the config to enable read from secondary visibility - EnableReadFromSecondaryVisibility = "system.enableReadFromSecondaryVisibility" - // SecondaryVisibilityWritingMode is key for how to write to secondary visibility - SecondaryVisibilityWritingMode = "system.secondaryVisibilityWritingMode" - // VisibilityDisableOrderByClause is the config to disable ORDERY BY clause for Elasticsearch - VisibilityDisableOrderByClause = "system.visibilityDisableOrderByClause" - // VisibilityEnableManualPagination is the config to enable manual pagination for Elasticsearch - VisibilityEnableManualPagination = "system.visibilityEnableManualPagination" - // VisibilityAllowList is the config to allow list of values for regular types - VisibilityAllowList = "system.visibilityAllowList" - // SuppressErrorSetSystemSearchAttribute suppresses errors when trying to set - // values in system search attributes. - SuppressErrorSetSystemSearchAttribute = "system.suppressErrorSetSystemSearchAttribute" - - // HistoryArchivalState is key for the state of history archival - HistoryArchivalState = "system.historyArchivalState" - // EnableReadFromHistoryArchival is key for enabling reading history from archival store - EnableReadFromHistoryArchival = "system.enableReadFromHistoryArchival" - // VisibilityArchivalState is key for the state of visibility archival - VisibilityArchivalState = "system.visibilityArchivalState" - // EnableReadFromVisibilityArchival is key for enabling reading visibility from archival store - EnableReadFromVisibilityArchival = "system.enableReadFromVisibilityArchival" - // EnableNamespaceNotActiveAutoForwarding whether enabling DC auto forwarding to active cluster - // for signal / start / signal with start API if namespace is not active - EnableNamespaceNotActiveAutoForwarding = "system.enableNamespaceNotActiveAutoForwarding" - // TransactionSizeLimit is the largest allowed transaction size to persistence - TransactionSizeLimit = "system.transactionSizeLimit" - // DisallowQuery is the key to disallow query for a namespace - DisallowQuery = "system.disallowQuery" - // EnableAuthorization is the key to enable authorization for a namespace - EnableAuthorization = "system.enableAuthorization" - // EnableCrossNamespaceCommands is the key to enable commands for external namespaces - EnableCrossNamespaceCommands = "system.enableCrossNamespaceCommands" - // ClusterMetadataRefreshInterval is config to manage cluster metadata table refresh interval - ClusterMetadataRefreshInterval = "system.clusterMetadataRefreshInterval" - // ForceSearchAttributesCacheRefreshOnRead forces refreshing search attributes cache on a read operation, so we always - // get the latest data from DB. This effectively bypasses cache value and is used to facilitate testing of changes in - // search attributes. This should not be turned on in production. - ForceSearchAttributesCacheRefreshOnRead = "system.forceSearchAttributesCacheRefreshOnRead" - // EnableRingpopTLS controls whether to use TLS for ringpop, using the same "internode" TLS - // config as the other services. - EnableRingpopTLS = "system.enableRingpopTLS" - // RingpopApproximateMaxPropagationTime is used for timing certain startup and shutdown processes. - // (It is not and doesn't have to be a guarantee.) - RingpopApproximateMaxPropagationTime = "system.ringpopApproximateMaxPropagationTime" - // EnableParentClosePolicyWorker decides whether or not enable system workers for processing parent close policy task - EnableParentClosePolicyWorker = "system.enableParentClosePolicyWorker" - // EnableStickyQuery indicates if sticky query should be enabled per namespace - EnableStickyQuery = "system.enableStickyQuery" - // EnableActivityEagerExecution indicates if activity eager execution is enabled per namespace - EnableActivityEagerExecution = "system.enableActivityEagerExecution" - // EnableEagerWorkflowStart toggles "eager workflow start" - returning the first workflow task inline in the - // response to a StartWorkflowExecution request and skipping the trip through matching. - EnableEagerWorkflowStart = "system.enableEagerWorkflowStart" - // NamespaceCacheRefreshInterval is the key for namespace cache refresh interval dynamic config - NamespaceCacheRefreshInterval = "system.namespaceCacheRefreshInterval" - // PersistenceHealthSignalMetricsEnabled determines whether persistence shard RPS metrics are emitted - PersistenceHealthSignalMetricsEnabled = "system.persistenceHealthSignalMetricsEnabled" - // PersistenceHealthSignalAggregationEnabled determines whether persistence latency and error averages are tracked - PersistenceHealthSignalAggregationEnabled = "system.persistenceHealthSignalAggregationEnabled" - // PersistenceHealthSignalWindowSize is the time window size in seconds for aggregating persistence signals - PersistenceHealthSignalWindowSize = "system.persistenceHealthSignalWindowSize" - // PersistenceHealthSignalBufferSize is the maximum number of persistence signals to buffer in memory per signal key - PersistenceHealthSignalBufferSize = "system.persistenceHealthSignalBufferSize" - // ShardRPSWarnLimit is the per-shard RPS limit for warning - ShardRPSWarnLimit = "system.shardRPSWarnLimit" - // ShardPerNsRPSWarnPercent is the per-shard per-namespace RPS limit for warning as a percentage of ShardRPSWarnLimit - // these warning are not emitted if the value is set to 0 or less - ShardPerNsRPSWarnPercent = "system.shardPerNsRPSWarnPercent" - // OperatorRPSRatio is the percentage of the rate limit provided to priority rate limiters that should be used for - // operator API calls (highest priority). Should be >0.0 and <= 1.0 (defaults to 20% if not specified) - OperatorRPSRatio = "system.operatorRPSRatio" - - // Whether the deadlock detector should dump goroutines - DeadlockDumpGoroutines = "system.deadlock.DumpGoroutines" - // Whether the deadlock detector should cause the grpc server to fail health checks - DeadlockFailHealthCheck = "system.deadlock.FailHealthCheck" - // Whether the deadlock detector should abort the process - DeadlockAbortProcess = "system.deadlock.AbortProcess" - // How often the detector checks each root. - DeadlockInterval = "system.deadlock.Interval" - // How many extra goroutines can be created per root. - DeadlockMaxWorkersPerRoot = "system.deadlock.MaxWorkersPerRoot" + VisibilityPersistenceMaxReadQPS = NewGlobalIntSetting( + "system.visibilityPersistenceMaxReadQPS", + 9000, + `VisibilityPersistenceMaxReadQPS is the max QPC system host can query visibility DB for read.`, + ) + VisibilityPersistenceMaxWriteQPS = NewGlobalIntSetting( + "system.visibilityPersistenceMaxWriteQPS", + 9000, + `VisibilityPersistenceMaxWriteQPS is the max QPC system host can query visibility DB for write.`, + ) + VisibilityPersistenceSlowQueryThreshold = NewGlobalDurationSetting( + "system.visibilityPersistenceSlowQueryThreshold", + time.Second, + `VisibilityPersistenceSlowQueryThreshold is the threshold above which a query is considered slow and logged.`, + ) + EnableReadFromSecondaryVisibility = NewNamespaceBoolSetting( + "system.enableReadFromSecondaryVisibility", + false, + `EnableReadFromSecondaryVisibility is the config to enable read from secondary visibility`, + ) + VisibilityEnableShadowReadMode = NewGlobalBoolSetting( + "system.visibilityEnableShadowReadMode", + false, + `VisibilityEnableShadowReadMode is the config to enable shadow read from secondary visibility`, + ) + SecondaryVisibilityWritingMode = NewGlobalStringSetting( + "system.secondaryVisibilityWritingMode", + "off", + `SecondaryVisibilityWritingMode is key for how to write to secondary visibility`, + ) + VisibilityDisableOrderByClause = NewNamespaceBoolSetting( + "system.visibilityDisableOrderByClause", + true, + `VisibilityDisableOrderByClause is the config to disable ORDERY BY clause for Elasticsearch`, + ) + VisibilityEnableManualPagination = NewNamespaceBoolSetting( + "system.visibilityEnableManualPagination", + true, + `VisibilityEnableManualPagination is the config to enable manual pagination for Elasticsearch`, + ) + VisibilityAllowList = NewNamespaceBoolSetting( + "system.visibilityAllowList", + true, + `VisibilityAllowList is the config to allow list of values for regular types`, + ) + SuppressErrorSetSystemSearchAttribute = NewNamespaceBoolSetting( + "system.suppressErrorSetSystemSearchAttribute", + false, + `SuppressErrorSetSystemSearchAttribute suppresses errors when trying to set +values in system search attributes.`, + ) + VisibilityEnableUnifiedQueryConverter = NewGlobalBoolSetting( + "system.visibilityEnableUnifiedQueryConverter", + false, + `VisibilityEnableUnifiedQueryConverter enables the unified query converter for parsing the +query.`, + ) + + HistoryArchivalState = NewGlobalStringSetting( + "system.historyArchivalState", + "", // actual default is from static config + `HistoryArchivalState is key for the state of history archival`, + ) + EnableReadFromHistoryArchival = NewGlobalBoolSetting( + "system.enableReadFromHistoryArchival", + false, // actual default is from static config + `EnableReadFromHistoryArchival is key for enabling reading history from archival store`, + ) + VisibilityArchivalState = NewGlobalStringSetting( + "system.visibilityArchivalState", + "", // actual default is from static config + `VisibilityArchivalState is key for the state of visibility archival`, + ) + EnableReadFromVisibilityArchival = NewGlobalBoolSetting( + "system.enableReadFromVisibilityArchival", + false, // actual default is from static config + `EnableReadFromVisibilityArchival is key for enabling reading visibility from archival store`, + ) + EnableNamespaceNotActiveAutoForwarding = NewNamespaceBoolSetting( + "system.enableNamespaceNotActiveAutoForwarding", + true, + `EnableNamespaceNotActiveAutoForwarding whether enabling DC auto forwarding to active cluster +for signal / start / signal with start API if namespace is not active`, + ) + ForceNamespaceSelectedAPIAutoForwarding = NewNamespaceBoolSetting( + "system.forceNamespaceSelectedAPIAutoForwarding", + false, + `ForceNamespaceSelectedAPIAutoForwarding forces selective (whitelist) API forwarding for the namespace when true, overriding all-apis-forwarding policy for that namespace`, + ) + EnableNamespaceHandoverWait = NewNamespaceBoolSetting( + "system.enableNamespaceHandoverWait", + false, + `EnableNamespaceHandoverWait whether waiting for namespace replication state update before serve the request`, + ) + TransactionSizeLimit = NewGlobalIntSetting( + "system.transactionSizeLimit", + primitives.DefaultTransactionSizeLimit, + `TransactionSizeLimit is the largest allowed transaction size to persistence`, + ) + DisallowQuery = NewNamespaceBoolSetting( + "system.disallowQuery", + false, + `DisallowQuery is the key to disallow query for a namespace`, + ) + EnableCrossNamespaceCommands = NewGlobalBoolSetting( + "system.enableCrossNamespaceCommands", + false, + `EnableCrossNamespaceCommands is the key to enable commands for external namespaces`, + ) + DisableStreamingAuthorizer = NewGlobalBoolSetting( + "system.disableStreamingAuthorizer", + false, + `DisableStreamingAuthorizer is the key to disable the auth on streaming endpoint`, + ) + ClusterMetadataRefreshInterval = NewGlobalDurationSetting( + "system.clusterMetadataRefreshInterval", + time.Minute, + `ClusterMetadataRefreshInterval is config to manage cluster metadata table refresh interval`, + ) + ForceSearchAttributesCacheRefreshOnRead = NewGlobalBoolSetting( + "system.forceSearchAttributesCacheRefreshOnRead", + false, + `ForceSearchAttributesCacheRefreshOnRead forces refreshing search attributes cache on a read operation, so we always +get the latest data from DB. This effectively bypasses cache value and is used to facilitate testing of changes in +search attributes. This should not be turned on in production.`, + ) + EnableRingpopTLS = NewGlobalBoolSetting( + "system.enableRingpopTLS", + false, + `EnableRingpopTLS controls whether to use TLS for ringpop, using the same "internode" TLS +config as the other services.`, + ) + RingpopApproximateMaxPropagationTime = NewGlobalDurationSetting( + "system.ringpopApproximateMaxPropagationTime", + 3*time.Second, + `RingpopApproximateMaxPropagationTime is used for timing certain startup and shutdown processes. +(It is not and doesn't have to be a guarantee.)`, + ) + RingpopReplicaPoints = NewGlobalIntSetting( + "system.ringpopReplicaPoints", + 100, + `RingpopReplicaPoints is the number of virtual nodes (replica points) per physical host +in the consistent hash ring used by ringpop. Changing it may cause service disruption during deployment.`, + ) + EnableParentClosePolicyWorker = NewGlobalBoolSetting( + "system.enableParentClosePolicyWorker", + true, + `EnableParentClosePolicyWorker decides whether or not enable system workers for processing parent close policy task`, + ) + EnableStickyQuery = NewNamespaceBoolSetting( + "system.enableStickyQuery", + true, + `EnableStickyQuery indicates if sticky query should be enabled per namespace`, + ) + EnableActivityEagerExecution = NewNamespaceBoolSetting( + "system.enableActivityEagerExecution", + false, + `EnableActivityEagerExecution indicates if activity eager execution is enabled per namespace`, + ) + EnableCancelActivityWorkerCommand = NewGlobalBoolSetting( + "system.enableCancelActivityWorkerCommand", + false, + `EnableCancelActivityWorkerCommand enables pushing activity cancellation to workers via Nexus worker commands`, + ) + NamespaceMinRetentionGlobal = NewGlobalDurationSetting( + "system.namespaceMinRetentionGlobal", + 24*time.Hour, + `Minimum retention duration for global namespaces. This value should only be lowered for testing purposes.`, + ) + NamespaceMinRetentionLocal = NewGlobalDurationSetting( + "system.namespaceMinRetentionLocal", + time.Hour, + `Minimum retention duration for local namespaces. This value should only be lowered for testing purposes.`, + ) + EnableActivityRetryStampIncrement = NewGlobalBoolSetting( + "system.enableActivityRetryStampIncrement", + false, + `EnableActivityRetryStampIncrement indicates if activity retry stamp increment is enabled`, + ) + EnableEagerWorkflowStart = NewNamespaceBoolSetting( + "system.enableEagerWorkflowStart", + true, + `Toggles "eager workflow start" - returning the first workflow task inline in the +response to a StartWorkflowExecution request and skipping the trip through matching.`, + ) + NamespaceCacheRefreshInterval = NewGlobalDurationSetting( + "system.namespaceCacheRefreshInterval", + 2*time.Second, + `NamespaceCacheRefreshInterval is the key for namespace cache refresh interval dynamic config`, + ) + PersistenceHealthSignalMetricsEnabled = NewGlobalBoolSetting( + "system.persistenceHealthSignalMetricsEnabled", + true, + `PersistenceHealthSignalMetricsEnabled determines whether persistence shard RPS metrics are emitted`, + ) + HistoryHealthSignalMetricsEnabled = NewGlobalBoolSetting( + "system.historyHealthSignalMetricsEnabled", + true, + `HistoryHealthSignalMetricsEnabled determines whether history service RPC metrics are emitted`, + ) + PersistenceHealthSignalAggregationEnabled = NewGlobalBoolSetting( + "system.persistenceHealthSignalAggregationEnabled", + true, + `PersistenceHealthSignalAggregationEnabled determines whether persistence latency and error averages are tracked`, + ) + PersistenceHealthSignalWindowSize = NewGlobalDurationSetting( + "system.persistenceHealthSignalWindowSize", + 10*time.Second, + `PersistenceHealthSignalWindowSize is the time window size in seconds for aggregating persistence signals`, + ) + PersistenceHealthSignalBufferSize = NewGlobalIntSetting( + "system.persistenceHealthSignalBufferSize", + 5000, + `PersistenceHealthSignalBufferSize is the maximum number of persistence signals to buffer in memory per signal key`, + ) + OperatorRPSRatio = NewGlobalFloatSetting( + "system.operatorRPSRatio", + 0.2, + `OperatorRPSRatio is the percentage of the rate limit provided to priority rate limiters that should be used for +operator API calls (highest priority). Should be >0.0 and <= 1.0 (defaults to 20% if not specified)`, + ) + // TODO: The following 2 configs should be removed once server keepalive and client keepalive are enabled by default + EnableInternodeServerKeepAlive = NewGlobalBoolSetting( + "system.enableInternodeServerKeepAlive", + false, + `enableInternodeServerKeepAlive is the config to enable keep alive for inter-node connections on server side.`, + ) + EnableInternodeClientKeepAlive = NewGlobalBoolSetting( + "system.enableInternodeClientKeepAlive", + false, + `enableInternodeClientKeepAlive is the config to enable keep alive for inter-node connections on client side.`, + ) + + PersistenceQPSBurstRatio = NewGlobalFloatSetting( + "system.persistenceQPSBurstRatio", + 1.0, + `PersistenceQPSBurstRatio is the burst ratio for persistence QPS. This flag controls the burst ratio for all services.`, + ) + + EnableDataLossMetrics = NewGlobalBoolSetting( + "system.enableDataLossMetrics", + false, + `EnableDataLossMetrics determines whether dataloss metrics are emitted when dataloss errors are encountered`, + ) + + // deadlock detector + + DeadlockDumpGoroutines = NewGlobalBoolSetting( + "system.deadlock.DumpGoroutines", + true, + `Whether the deadlock detector should dump goroutines`, + ) + DeadlockFailHealthCheck = NewGlobalBoolSetting( + "system.deadlock.FailHealthCheck", + false, + `Whether the deadlock detector should cause the grpc server to fail health checks`, + ) + DeadlockAbortProcess = NewGlobalBoolSetting( + "system.deadlock.AbortProcess", + false, + `Whether the deadlock detector should abort the process`, + ) + DeadlockInterval = NewGlobalDurationSetting( + "system.deadlock.Interval", + 60*time.Second, + `How often the detector checks each root.`, + ) + DeadlockMaxWorkersPerRoot = NewGlobalIntSetting( + "system.deadlock.MaxWorkersPerRoot", + 10, + `How many extra goroutines can be created per root.`, + ) + + NumConsecutiveWorkflowTaskProblemsToTriggerSearchAttribute = NewNamespaceIntSetting( + "system.numConsecutiveWorkflowTaskProblemsToTriggerSearchAttribute", + 5, + `NumConsecutiveWorkflowTaskProblemsToTriggerSearchAttribute is the number of consecutive workflow task problems to trigger the TemporalReportedProblems search attribute. +Setting this to 0 prevents the search attribute from being set when a problem is detected, and unset when the problem is resolved.`, + ) + + PollWaitForNamespaceRateLimitToken = NewNamespaceBoolSetting( + "system.pollWaitForNamespaceRateLimitToken", + false, + `PollWaitForNamespaceRateLimitToken controls whether poll requests wait for +a namespace RPS rate limit token to become available instead of immediately rejecting +with ResourceExhausted. When enabled, poll requests block until a token is available +or the request context deadline is reached. The concurrent request rate limiter fires +before this limiter and will still reject requests that exceed the concurrent limit.`, + ) // keys for size limit - // BlobSizeLimitError is the per event blob size limit - BlobSizeLimitError = "limit.blobSize.error" - // BlobSizeLimitWarn is the per event blob size limit for warning - BlobSizeLimitWarn = "limit.blobSize.warn" - // MemoSizeLimitError is the per event memo size limit - MemoSizeLimitError = "limit.memoSize.error" - // MemoSizeLimitWarn is the per event memo size limit for warning - MemoSizeLimitWarn = "limit.memoSize.warn" - // NumPendingChildExecutionsLimitError is the maximum number of pending child workflows a workflow can have before - // StartChildWorkflowExecution commands will fail. - NumPendingChildExecutionsLimitError = "limit.numPendingChildExecutions.error" - // NumPendingActivitiesLimitError is the maximum number of pending activities a workflow can have before - // ScheduleActivityTask will fail. - NumPendingActivitiesLimitError = "limit.numPendingActivities.error" - // NumPendingSignalsLimitError is the maximum number of pending signals a workflow can have before - // SignalExternalWorkflowExecution commands from this workflow will fail. - NumPendingSignalsLimitError = "limit.numPendingSignals.error" - // NumPendingCancelRequestsLimitError is the maximum number of pending requests to cancel other workflows a workflow can have before - // RequestCancelExternalWorkflowExecution commands will fail. - NumPendingCancelRequestsLimitError = "limit.numPendingCancelRequests.error" - // HistorySizeLimitError is the per workflow execution history size limit - HistorySizeLimitError = "limit.historySize.error" - // HistorySizeLimitWarn is the per workflow execution history size limit for warning - HistorySizeLimitWarn = "limit.historySize.warn" - // HistorySizeSuggestContinueAsNew is the workflow execution history size limit to suggest - // continue-as-new (in workflow task started event) - HistorySizeSuggestContinueAsNew = "limit.historySize.suggestContinueAsNew" - // HistoryCountLimitError is the per workflow execution history event count limit - HistoryCountLimitError = "limit.historyCount.error" - // HistoryCountLimitWarn is the per workflow execution history event count limit for warning - HistoryCountLimitWarn = "limit.historyCount.warn" - // MutableStateActivityFailureSizeLimitError is the per activity failure size limit for workflow mutable state. - // If exceeded, failure will be truncated before being stored in mutable state. - MutableStateActivityFailureSizeLimitError = "limit.mutableStateActivityFailureSize.error" - // MutableStateActivityFailureSizeLimitWarn is the per activity failure size warning limit for workflow mutable state - MutableStateActivityFailureSizeLimitWarn = "limit.mutableStateActivityFailureSize.warn" - // MutableStateSizeLimitError is the per workflow execution mutable state size limit in bytes - MutableStateSizeLimitError = "limit.mutableStateSize.error" - // MutableStateSizeLimitWarn is the per workflow execution mutable state size limit in bytes for warning - MutableStateSizeLimitWarn = "limit.mutableStateSize.warn" - // HistoryCountSuggestContinueAsNew is the workflow execution history event count limit to - // suggest continue-as-new (in workflow task started event) - HistoryCountSuggestContinueAsNew = "limit.historyCount.suggestContinueAsNew" - // HistoryMaxPageSize is default max size for GetWorkflowExecutionHistory in one page - HistoryMaxPageSize = "limit.historyMaxPageSize" - // MaxIDLengthLimit is the length limit for various IDs, including: Namespace, TaskQueue, WorkflowID, ActivityID, TimerID, - // WorkflowType, ActivityType, SignalName, MarkerName, ErrorReason/FailureReason/CancelCause, Identity, RequestID - MaxIDLengthLimit = "limit.maxIDLength" - // WorkerBuildIdSizeLimit is the byte length limit for a worker build id as used in the rpc methods for updating - // the version sets for a task queue. - // Do not set this to a value higher than 255 for clusters using SQL based persistence due to predefined VARCHAR - // column width. - WorkerBuildIdSizeLimit = "limit.workerBuildIdSize" - // VersionCompatibleSetLimitPerQueue is the max number of compatible sets allowed in the versioning data for a task - // queue. Update requests which would cause the versioning data to exceed this number will fail with a - // FailedPrecondition error. - VersionCompatibleSetLimitPerQueue = "limit.versionCompatibleSetLimitPerQueue" - // VersionBuildIdLimitPerQueue is the max number of build IDs allowed to be defined in the versioning data for a - // task queue. Update requests which would cause the versioning data to exceed this number will fail with a - // FailedPrecondition error. - VersionBuildIdLimitPerQueue = "limit.versionBuildIdLimitPerQueue" - // ReachabilityTaskQueueScanLimit limits the number of task queues to scan when responding to a - // GetWorkerTaskReachability query. - ReachabilityTaskQueueScanLimit = "limit.reachabilityTaskQueueScan" - // ReachabilityQueryBuildIdLimit limits the number of build ids that can be requested in a single call to the - // GetWorkerTaskReachability API. - ReachabilityQueryBuildIdLimit = "limit.reachabilityQueryBuildIds" - // ReachabilityQuerySetDurationSinceDefault is the minimum period since a version set was demoted from being the - // queue default before it is considered unreachable by new workflows. - // This setting allows some propogation delay of versioning data for the reachability queries, which may happen for - // the following reasons: - // 1. There are no workflows currently marked as open in the visibility store but a worker for the demoted version - // is currently processing a task. - // 2. There are delays in the visibility task processor (which is asynchronous). - // 3. There's propagation delay of the versioning data between matching nodes. - ReachabilityQuerySetDurationSinceDefault = "frontend.reachabilityQuerySetDurationSinceDefault" - // TaskQueuesPerBuildIdLimit limits the number of task queue names that can be mapped to a single build id. - TaskQueuesPerBuildIdLimit = "limit.taskQueuesPerBuildId" - // NexusOutgoingServiceURLMaxLength is the maximum length of an outgoing service URL - NexusOutgoingServiceURLMaxLength = "limit.outgoingServiceURLMaxLength" - // NexusOutgoingServiceNameMaxLength is the maximum length of an outgoing service name - NexusOutgoingServiceNameMaxLength = "limit.outgoingServiceNameMaxLength" - // NexusOutgoingServiceListDefaultPageSize is the default page size for listing outgoing services - NexusOutgoingServiceListDefaultPageSize = "limit.outgoingServiceListDefaultPageSize" - // NexusOutgoingServiceListMaxPageSize is the maximum page size for listing outgoing services - NexusOutgoingServiceListMaxPageSize = "limit.outgoingServiceListMaxPageSize" - - // RemovableBuildIdDurationSinceDefault is the minimum duration since a build id was last default in its containing - // set for it to be considered for removal, used by the build id scavenger. - // This setting allows some propogation delay of versioning data, which may happen for the following reasons: - // 1. There are no workflows currently marked as open in the visibility store but a worker for the demoted version - // is currently processing a task. - // 2. There are delays in the visibility task processor (which is asynchronous). - // 3. There's propagation delay of the versioning data between matching nodes. - RemovableBuildIdDurationSinceDefault = "worker.removableBuildIdDurationSinceDefault" - // BuildIdScavengerVisibilityRPS is the rate limit for visibility calls from the build id scavenger - BuildIdScavenengerVisibilityRPS = "worker.buildIdScavengerVisibilityRPS" + BlobSizeLimitError = NewNamespaceIntSetting( + "limit.blobSize.error", + 2*1024*1024, + `BlobSizeLimitError is the per event blob size limit`, + ) + BlobSizeLimitWarn = NewNamespaceIntSetting( + "limit.blobSize.warn", + 512*1024, + `BlobSizeLimitWarn is the per event blob size limit for warning`, + ) + MemoSizeLimitError = NewNamespaceIntSetting( + "limit.memoSize.error", + 2*1024*1024, + `MemoSizeLimitError is the per event memo size limit`, + ) + MemoSizeLimitWarn = NewNamespaceIntSetting( + "limit.memoSize.warn", + 2*1024, + `MemoSizeLimitWarn is the per event memo size limit for warning`, + ) + NumPendingChildExecutionsLimitError = NewNamespaceIntSetting( + "limit.numPendingChildExecutions.error", + 2000, + `NumPendingChildExecutionsLimitError is the maximum number of pending child workflows a workflow can have before +StartChildWorkflowExecution commands will fail.`, + ) + NumPendingActivitiesLimitError = NewNamespaceIntSetting( + "limit.numPendingActivities.error", + 2000, + `NumPendingActivitiesLimitError is the maximum number of pending activities a workflow can have before +ScheduleActivityTask will fail.`, + ) + NumPendingSignalsLimitError = NewNamespaceIntSetting( + "limit.numPendingSignals.error", + 2000, + `NumPendingSignalsLimitError is the maximum number of pending signals a workflow can have before +SignalExternalWorkflowExecution commands from this workflow will fail.`, + ) + NumPendingCancelRequestsLimitError = NewNamespaceIntSetting( + "limit.numPendingCancelRequests.error", + 2000, + `NumPendingCancelRequestsLimitError is the maximum number of pending requests to cancel other workflows a workflow can have before +RequestCancelExternalWorkflowExecution commands will fail.`, + ) + HistorySizeLimitError = NewNamespaceIntSetting( + "limit.historySize.error", + 50*1024*1024, + `HistorySizeLimitError is the per workflow execution history size limit`, + ) + HistorySizeLimitWarn = NewNamespaceIntSetting( + "limit.historySize.warn", + 10*1024*1024, + `HistorySizeLimitWarn is the per workflow execution history size limit for warning`, + ) + HistorySizeSuggestContinueAsNew = NewNamespaceIntSetting( + "limit.historySize.suggestContinueAsNew", + 4*1024*1024, + `HistorySizeSuggestContinueAsNew is the workflow execution history size limit to suggest +continue-as-new (in workflow task started event)`, + ) + HistoryCountLimitError = NewNamespaceIntSetting( + "limit.historyCount.error", + 50*1024, + `HistoryCountLimitError is the per workflow execution history event count limit`, + ) + HistoryCountLimitWarn = NewNamespaceIntSetting( + "limit.historyCount.warn", + 10*1024, + `HistoryCountLimitWarn is the per workflow execution history event count limit for warning`, + ) + MutableStateActivityFailureSizeLimitError = NewNamespaceIntSetting( + "limit.mutableStateActivityFailureSize.error", + 4*1024, + `MutableStateActivityFailureSizeLimitError is the per activity failure size limit for workflow mutable state. +If exceeded, failure will be truncated before being stored in mutable state.`, + ) + MutableStateActivityFailureSizeLimitWarn = NewNamespaceIntSetting( + "limit.mutableStateActivityFailureSize.warn", + 2*1024, + `MutableStateActivityFailureSizeLimitWarn is the per activity failure size warning limit for workflow mutable state`, + ) + MutableStateSizeLimitError = NewGlobalIntSetting( + "limit.mutableStateSize.error", + 8*1024*1024, + `MutableStateSizeLimitError is the per workflow execution mutable state size limit in bytes`, + ) + MutableStateSizeLimitWarn = NewGlobalIntSetting( + "limit.mutableStateSize.warn", + 1*1024*1024, + `MutableStateSizeLimitWarn is the per workflow execution mutable state size limit in bytes for warning`, + ) + MutableStateTombstoneCountLimit = NewGlobalIntSetting( + "limit.mutableStateTombstoneCountLimit", + 16, + `MutableStateTombstoneCountLimit is the maximum number of deleted sub state machines tracked in mutable state.`, + ) + HistoryCountSuggestContinueAsNew = NewNamespaceIntSetting( + "limit.historyCount.suggestContinueAsNew", + 4*1024, + `HistoryCountSuggestContinueAsNew is the workflow execution history event count limit to +suggest continue-as-new (in workflow task started event)`, + ) + HistoryMaxPageSize = NewNamespaceIntSetting( + "limit.historyMaxPageSize", + primitives.GetHistoryMaxPageSize, + `HistoryMaxPageSize is default max size for GetWorkflowExecutionHistory in one page`, + ) + MaxIDLengthLimit = NewGlobalIntSetting( + "limit.maxIDLength", + 1000, + `MaxIDLengthLimit is the length limit for various IDs, including: Namespace, TaskQueue, WorkflowID, ActivityID, TimerID, +WorkflowType, ActivityType, SignalName, MarkerName, ErrorReason/FailureReason/CancelCause, Identity, RequestID`, + ) + WorkerBuildIdSizeLimit = NewGlobalIntSetting( + "limit.workerBuildIdSize", + 255, + `WorkerBuildIdSizeLimit is the byte length limit for a worker build id as used in the rpc methods for updating +the version sets for a task queue. +Do not set this to a value higher than 255 for clusters using SQL based persistence due to predefined VARCHAR +column width.`, + ) + VersionCompatibleSetLimitPerQueue = NewNamespaceIntSetting( + "limit.versionCompatibleSetLimitPerQueue", + 10, + `VersionCompatibleSetLimitPerQueue is the max number of compatible sets allowed in the versioning data for a task +queue. Update requests which would cause the versioning data to exceed this number will fail with a +FailedPrecondition error.`, + ) + VersionBuildIdLimitPerQueue = NewNamespaceIntSetting( + "limit.versionBuildIdLimitPerQueue", + 100, + `VersionBuildIdLimitPerQueue is the max number of build IDs allowed to be defined in the versioning data for a +task queue. Update requests which would cause the versioning data to exceed this number will fail with a +FailedPrecondition error.`, + ) + AssignmentRuleLimitPerQueue = NewNamespaceIntSetting( + "limit.wv.AssignmentRuleLimitPerQueue", + 100, + `AssignmentRuleLimitPerQueue is the max number of Build ID assignment rules allowed to be defined in the +versioning data for a task queue. Update requests which would cause the versioning data to exceed this number +will fail with a FailedPrecondition error.`, + ) + RedirectRuleLimitPerQueue = NewNamespaceIntSetting( + "limit.wv.RedirectRuleLimitPerQueue", + 500, + `RedirectRuleLimitPerQueue is the max number of compatible redirect rules allowed to be defined +in the versioning data for a task queue. Update requests which would cause the versioning data to exceed this +number will fail with a FailedPrecondition error.`, + ) + RedirectRuleMaxUpstreamBuildIDsPerQueue = NewNamespaceIntSetting( + "limit.wv.RedirectRuleMaxUpstreamBuildIDsPerQueue", + 50, + `RedirectRuleMaxUpstreamBuildIDsPerQueue is the max number of compatible redirect rules allowed to be connected +in one chain in the versioning data for a task queue. Update requests which would cause the versioning data +to exceed this number will fail with a FailedPrecondition error.`, + ) + MatchingDeletedRuleRetentionTime = NewNamespaceDurationSetting( + "matching.wv.DeletedRuleRetentionTime", + 14*24*time.Hour, + `MatchingDeletedRuleRetentionTime is the length of time that deleted Version Assignment Rules and +Deleted Redirect Rules will be kept in the DB (with DeleteTimestamp). After this time, the tombstones are deleted at the next time update of versioning data for the task queue.`, + ) + PollerHistoryTTL = NewNamespaceDurationSetting( + "matching.PollerHistoryTTL", + 5*time.Minute, + `PollerHistoryTTL is the time to live for poller histories in the pollerHistory cache of a physical task queue. Poller histories are fetched when + requiring a list of pollers that polled a given task queue.`, + ) + ReachabilityBuildIdVisibilityGracePeriod = NewNamespaceDurationSetting( + "matching.wv.ReachabilityBuildIdVisibilityGracePeriod", + 3*time.Minute, + `ReachabilityBuildIdVisibilityGracePeriod is the time period for which deleted versioning rules are still considered active +to account for the delay in updating the build id field in visibility. Not yet supported for GetDeploymentReachability. We recommend waiting +at least 2 minutes between changing the current deployment and calling GetDeployment, so that newly started workflow executions using the +recently-current deployment can arrive in visibility.`, + ) + VersionDrainageStatusVisibilityGracePeriod = NewNamespaceDurationSetting( + "matching.wv.VersionDrainageStatusVisibilityGracePeriod", + 3*time.Minute, + `VersionDrainageStatusVisibilityGracePeriod is the time period for which non-current / non-ramping worker deployment versions +are still considered active to account for the delay in updating the build id field in visibility.`, + ) + VersionDrainageStatusRefreshInterval = NewNamespaceDurationSetting( + "matching.wv.VersionDrainageStatusRefreshInterval", + 3*time.Minute, + `VersionDrainageStatusRefreshInterval is the interval at which each draining deployment version refreshes its +Drainage Status by querying visibility for open pinned workflows using that version.`, + ) + ReachabilityTaskQueueScanLimit = NewGlobalIntSetting( + "limit.reachabilityTaskQueueScan", + 20, + `ReachabilityTaskQueueScanLimit limits the number of task queues to scan when responding to a +GetWorkerTaskReachability query.`, + ) + ReachabilityQueryBuildIdLimit = NewGlobalIntSetting( + "limit.reachabilityQueryBuildIds", + 5, + `ReachabilityQueryBuildIdLimit limits the number of build ids that can be requested in a single call to the +DescribeTaskQueue API with ReportTaskQueueReachability==true, or to the GetWorkerTaskReachability API.`, + ) + ReachabilityCacheOpenWFsTTL = NewGlobalDurationSetting( + "matching.wv.reachabilityCacheOpenWFsTTL", + time.Minute, + `ReachabilityCacheOpenWFsTTL is the TTL for the reachability open workflows cache.`, + ) + ReachabilityCacheClosedWFsTTL = NewGlobalDurationSetting( + "matching.wv.reachabilityCacheClosedWFsTTL", + 10*time.Minute, + `ReachabilityCacheClosedWFsTTL is the TTL for the reachability closed workflows cache.`, + ) + ReachabilityQuerySetDurationSinceDefault = NewGlobalDurationSetting( + "frontend.reachabilityQuerySetDurationSinceDefault", + 5*time.Minute, + `ReachabilityQuerySetDurationSinceDefault is the minimum period since a version set was demoted from being the +queue default before it is considered unreachable by new workflows. +This setting allows some propagation delay of versioning data for the reachability queries, which may happen for +the following reasons: +1. There are no workflows currently marked as open in the visibility store but a worker for the demoted version +is currently processing a task. +2. There are delays in the visibility task processor (which is asynchronous). +3. There's propagation delay of the versioning data between matching nodes.`, + ) + TaskQueuesPerBuildIdLimit = NewNamespaceIntSetting( + "limit.taskQueuesPerBuildId", + 20, + `TaskQueuesPerBuildIdLimit limits the number of task queue names that can be mapped to a single build id.`, + ) + + NexusEndpointNameMaxLength = NewGlobalIntSetting( + "limit.endpointNameMaxLength", + 200, + `NexusEndpointNameMaxLength is the maximum length of a Nexus endpoint name.`, + ) + NexusEndpointExternalURLMaxLength = NewGlobalIntSetting( + "limit.endpointExternalURLMaxLength", + 4*1024, + `NexusEndpointExternalURLMaxLength is the maximum length of a Nexus endpoint external target URL.`, + ) + NexusEndpointDescriptionMaxSize = NewNamespaceIntSetting( + "limit.endpointDescriptionMaxSize", + 20000, + `Maximum size of Nexus Endpoint description payload in bytes including data and metadata.`, + ) + NexusEndpointListDefaultPageSize = NewGlobalIntSetting( + "limit.endpointListDefaultPageSize", + 100, + `NexusEndpointListDefaultPageSize is the default page size for listing Nexus endpoints.`, + ) + NexusEndpointListMaxPageSize = NewGlobalIntSetting( + "limit.endpointListMaxPageSize", + 1000, + `NexusEndpointListMaxPageSize is the maximum page size for listing Nexus endpoints.`, + ) + + RemovableBuildIdDurationSinceDefault = NewGlobalDurationSetting( + "worker.removableBuildIdDurationSinceDefault", + time.Hour, + `RemovableBuildIdDurationSinceDefault is the minimum duration since a build id was last default in its containing +set for it to be considered for removal, used by the build id scavenger. +This setting allows some propagation delay of versioning data, which may happen for the following reasons: +1. There are no workflows currently marked as open in the visibility store but a worker for the demoted version +is currently processing a task. +2. There are delays in the visibility task processor (which is asynchronous). +3. There's propagation delay of the versioning data between matching nodes.`, + ) + BuildIdScavengerVisibilityRPS = NewGlobalFloatSetting( + "worker.buildIdScavengerVisibilityRPS", + 1.0, + `BuildIdScavengerVisibilityRPS is the rate limit for visibility calls from the build id scavenger`, + ) // keys for frontend + FrontendAllowedExperiments = NewNamespaceTypedSetting( + "frontend.allowedExperiments", + []string(nil), + `FrontendAllowedExperiments is a list of experiment names that can be enabled via the temporal-experiment header for a specific namespace.`, + ) + FrontendHTTPAllowedHosts = NewGlobalTypedSettingWithConverter( + "frontend.httpAllowedHosts", + ConvertWildcardStringListToRegexp, + MatchAnythingRE, + `HTTP API Requests with a "Host" header matching the allowed hosts will be processed, otherwise rejected. +Wildcards (*) are expanded to allow any substring. By default any Host header is allowed. +Concrete type should be list of strings.`, + ) + FrontendPersistenceMaxQPS = NewGlobalIntSetting( + "frontend.persistenceMaxQPS", + 2000, + `FrontendPersistenceMaxQPS is the max qps frontend host can query DB`, + ) + FrontendPersistenceGlobalMaxQPS = NewGlobalIntSetting( + "frontend.persistenceGlobalMaxQPS", + 0, + `FrontendPersistenceGlobalMaxQPS is the max qps frontend cluster can query DB`, + ) + FrontendPersistenceNamespaceMaxQPS = NewNamespaceIntSetting( + "frontend.persistenceNamespaceMaxQPS", + 0, + `FrontendPersistenceNamespaceMaxQPS is the max qps each namespace on frontend host can query DB`, + ) + FrontendPersistenceGlobalNamespaceMaxQPS = NewNamespaceIntSetting( + "frontend.persistenceGlobalNamespaceMaxQPS", + 0, + `FrontendPersistenceGlobalNamespaceMaxQPS is the max qps each namespace in frontend cluster can query DB`, + ) + FrontendPersistenceDynamicRateLimitingParams = NewGlobalTypedSetting( + "frontend.persistenceDynamicRateLimitingParams", + DefaultDynamicRateLimitingParams, + `FrontendPersistenceDynamicRateLimitingParams is a struct that contains all adjustable dynamic rate limiting params. +Fields: Enabled, RefreshInterval, LatencyThreshold, ErrorThreshold, RateBackoffStepSize, RateIncreaseStepSize, RateMultiMin, RateMultiMax. +See DynamicRateLimitingParams comments for more details.`, + ) + FrontendVisibilityMaxPageSize = NewNamespaceIntSetting( + "frontend.visibilityMaxPageSize", + 1000, + `FrontendVisibilityMaxPageSize is default max size for ListWorkflowExecutions in one page`, + ) + FrontendHistoryMaxPageSize = NewNamespaceIntSetting( + "frontend.historyMaxPageSize", + primitives.GetHistoryMaxPageSize, + `FrontendHistoryMaxPageSize is default max size for GetWorkflowExecutionHistory in one page`, + ) + FrontendRPS = NewGlobalIntSetting( + "frontend.rps", + 2400, + `FrontendRPS is workflow rate limit per second per-instance`, + ) + FrontendGlobalRPS = NewGlobalIntSetting( + "frontend.globalRPS", + 0, + `FrontendGlobalRPS is workflow rate limit per second for the whole cluster`, + ) + FrontendNamespaceReplicationInducingAPIsRPS = NewGlobalIntSetting( + "frontend.rps.namespaceReplicationInducingAPIs", + 20, + `FrontendNamespaceReplicationInducingAPIsRPS limits the per second request rate for namespace replication inducing +APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility). +This config is EXPERIMENTAL and may be changed or removed in a later release.`, + ) + FrontendMaxNamespaceRPSPerInstance = NewNamespaceIntSetting( + "frontend.namespaceRPS", + 2400, + `FrontendMaxNamespaceRPSPerInstance is workflow namespace rate limit per second`, + ) + FrontendMaxNamespaceBurstRatioPerInstance = NewNamespaceFloatSetting( + "frontend.namespaceBurstRatio", + 2, + `FrontendMaxNamespaceBurstRatioPerInstance is workflow namespace burst limit as a ratio of namespace RPS. The RPS +used here will be the effective RPS from global and per-instance limits. The value must be 1 or higher.`, + ) + FrontendGlobalWorkerDeploymentReadRPS = NewNamespaceIntSetting( + "frontend.globalNamespaceWorkerDeploymentReadRPS", + 50, + `FrontendGlobalWorkerDeploymentReadRPS is the global, per-namespace rate limit for Worker Deployment Read APIs (DescribeWorkerDeployment, DescribeWorkerDeploymentVersion). The limit is evenly distributed among available frontend service instances.`, + ) + FrontendMaxConcurrentLongRunningRequestsPerInstance = NewNamespaceIntSetting( + "frontend.namespaceCount", + 1200, + `FrontendMaxConcurrentLongRunningRequestsPerInstance limits concurrent long-running requests per-instance, +per-API. Example requests include long-poll requests, and 'Query' requests (which need to wait for WFTs). The +limit is applied individually to each API method. This value is ignored if +FrontendGlobalMaxConcurrentLongRunningRequests is greater than zero. Warning: setting this to zero will cause all +long-running requests to fail. The name 'frontend.namespaceCount' is kept for backwards compatibility with +existing deployments even though it is a bit of a misnomer. This does not limit the number of namespaces; it is a +per-_namespace_ limit on the _count_ of long-running requests. Requests are only throttled when the limit is +exceeded, not when it is only reached.`, + ) + FrontendGlobalMaxConcurrentLongRunningRequests = NewNamespaceIntSetting( + "frontend.globalNamespaceCount", + 0, + `FrontendGlobalMaxConcurrentLongRunningRequests limits concurrent long-running requests across all frontend +instances in the cluster, for a given namespace, per-API method. If this is set to 0 (the default), then it is +ignored. The name 'frontend.globalNamespaceCount' is kept for consistency with the per-instance limit name, +'frontend.namespaceCount'.`, + ) + FrontendMaxNamespaceVisibilityRPSPerInstance = NewNamespaceIntSetting( + "frontend.namespaceRPS.visibility", + 10, + `FrontendMaxNamespaceVisibilityRPSPerInstance is namespace rate limit per second for visibility APIs. +This config is EXPERIMENTAL and may be changed or removed in a later release.`, + ) + FrontendMaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance = NewNamespaceIntSetting( + "frontend.namespaceRPS.namespaceReplicationInducingAPIs", + 1, + `FrontendMaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance is a per host/per namespace RPS limit for +namespace replication inducing APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility). +This config is EXPERIMENTAL and may be changed or removed in a later release.`, + ) + FrontendMaxNamespaceVisibilityBurstRatioPerInstance = NewNamespaceFloatSetting( + "frontend.namespaceBurstRatio.visibility", + 1, + `FrontendMaxNamespaceVisibilityBurstRatioPerInstance is namespace burst limit for visibility APIs as a ratio of +namespace visibility RPS. The RPS used here will be the effective RPS from global and per-instance limits. This +config is EXPERIMENTAL and may be changed or removed in a later release. The value must be 1 or higher.`, + ) + FrontendMaxNamespaceNamespaceReplicationInducingAPIsBurstRatioPerInstance = NewNamespaceFloatSetting( + "frontend.namespaceBurstRatio.namespaceReplicationInducingAPIs", + 10, + `FrontendMaxNamespaceNamespaceReplicationInducingAPIsBurstRatioPerInstance is a per host/per namespace burst limit for +namespace replication inducing APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility) +as a ratio of namespace ReplicationInducingAPIs RPS. The RPS used here will be the effective RPS from global and +per-instance limits. This config is EXPERIMENTAL and may be changed or removed in a later release. The value must +be 1 or higher.`, + ) + FrontendGlobalNamespaceRPS = NewNamespaceIntSetting( + "frontend.globalNamespaceRPS", + 0, + `FrontendGlobalNamespaceRPS is namespace rate limit per second for the whole cluster. +The limit is evenly distributed among available frontend service instances. +If this is set, it overwrites per instance limit "frontend.namespaceRPS".`, + ) + InternalFrontendGlobalNamespaceRPS = NewNamespaceIntSetting( + "internal-frontend.globalNamespaceRPS", + 0, + `InternalFrontendGlobalNamespaceRPS is workflow namespace rate limit per second across +all internal-frontends.`, + ) + FrontendGlobalNamespaceVisibilityRPS = NewNamespaceIntSetting( + "frontend.globalNamespaceRPS.visibility", + 0, + `FrontendGlobalNamespaceVisibilityRPS is workflow namespace rate limit per second for the whole cluster for visibility API. +The limit is evenly distributed among available frontend service instances. +If this is set, it overwrites per instance limit "frontend.namespaceRPS.visibility". +This config is EXPERIMENTAL and may be changed or removed in a later release.`, + ) + FrontendGlobalNamespaceNamespaceReplicationInducingAPIsRPS = NewNamespaceIntSetting( + "frontend.globalNamespaceRPS.namespaceReplicationInducingAPIs", + 10, + `FrontendGlobalNamespaceNamespaceReplicationInducingAPIsRPS is a cluster global, per namespace RPS limit for +namespace replication inducing APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility). +The limit is evenly distributed among available frontend service instances. +If this is set, it overwrites the per instance limit configured with +"frontend.namespaceRPS.namespaceReplicationInducingAPIs". +This config is EXPERIMENTAL and may be changed or removed in a later release.`, + ) + InternalFrontendGlobalNamespaceVisibilityRPS = NewNamespaceIntSetting( + "internal-frontend.globalNamespaceRPS.visibility", + 0, + `InternalFrontendGlobalNamespaceVisibilityRPS is workflow namespace rate limit per second +across all internal-frontends. +This config is EXPERIMENTAL and may be changed or removed in a later release.`, + ) + FrontendThrottledLogRPS = NewGlobalIntSetting( + "frontend.throttledLogRPS", + 20, + `FrontendThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger`, + ) + FrontendShutdownDrainDuration = NewGlobalDurationSetting( + "frontend.shutdownDrainDuration", + 0*time.Second, + `FrontendShutdownDrainDuration is the duration of traffic drain during shutdown`, + ) + FrontendShutdownFailHealthCheckDuration = NewGlobalDurationSetting( + "frontend.shutdownFailHealthCheckDuration", + 0*time.Second, + `FrontendShutdownFailHealthCheckDuration is the duration of shutdown failure detection`, + ) + FrontendMaxBadBinaries = NewNamespaceIntSetting( + "frontend.maxBadBinaries", + 10, + `FrontendMaxBadBinaries is the max number of bad binaries in namespace config`, + ) + FrontendMaskInternalErrorDetails = NewNamespaceBoolSetting( + "frontend.maskInternalErrorDetails", + true, + `MaskInternalOrUnknownErrors is whether to replace internal/unknown errors with default error`, + ) + HistoryHostErrorPercentage = NewGlobalFloatSetting( + "frontend.historyHostErrorPercentage", + 0.5, + `HistoryHostErrorPercentage is the proportion of hosts that are unhealthy through observation external to the host and internal host health checks`, + ) + HistoryHostSelfErrorProportion = NewGlobalFloatSetting( + "frontend.historyHostSelfErrorProportion", + 0.05, + `HistoryHostStartingProportion is the proportion of hosts that have marked themselves as not ready -- this could due to waiting to acquire all shards on startup, or an internal health check failure`, + ) + SendRawWorkflowHistory = NewNamespaceBoolSetting( + "frontend.sendRawWorkflowHistory", + false, + `SendRawWorkflowHistory is whether to enable raw history retrieving`, + ) + SearchAttributesNumberOfKeysLimit = NewNamespaceIntSetting( + "frontend.searchAttributesNumberOfKeysLimit", + 100, + `SearchAttributesNumberOfKeysLimit is the limit of number of keys`, + ) + SearchAttributesSizeOfValueLimit = NewNamespaceIntSetting( + "frontend.searchAttributesSizeOfValueLimit", + 2*1024, + `SearchAttributesSizeOfValueLimit is the size limit of each value`, + ) + SearchAttributesTotalSizeLimit = NewNamespaceIntSetting( + "frontend.searchAttributesTotalSizeLimit", + 40*1024, + `SearchAttributesTotalSizeLimit is the size limit of the whole map`, + ) + VisibilityArchivalQueryMaxPageSize = NewGlobalIntSetting( + "frontend.visibilityArchivalQueryMaxPageSize", + 10000, + `VisibilityArchivalQueryMaxPageSize is the maximum page size for a visibility archival query`, + ) + EnableServerVersionCheck = NewGlobalBoolSetting( + "frontend.enableServerVersionCheck", + os.Getenv("TEMPORAL_VERSION_CHECK_DISABLED") == "", + `EnableServerVersionCheck is a flag that controls whether or not periodic version checking is enabled`, + ) + EnableTokenNamespaceEnforcement = NewGlobalBoolSetting( + "frontend.enableTokenNamespaceEnforcement", + true, + `EnableTokenNamespaceEnforcement enables enforcement that namespace in completion token matches namespace of the request`, + ) + DisableListVisibilityByFilter = NewNamespaceBoolSetting( + "frontend.disableListVisibilityByFilter", + false, + `DisableListVisibilityByFilter is config to disable list open/close workflow using filter`, + ) + ExposeAuthorizerErrors = NewGlobalBoolSetting( + "frontend.exposeAuthorizerErrors", + false, + `ExposeAuthorizerErrors controls whether the frontend authorization interceptor will pass through errors returned by +the Authorizer component. If false, a generic PermissionDenied error without details will be returned. Default false.`, + ) + EnablePrincipalPropagation = NewNamespaceBoolSetting( + "frontend.enablePrincipalPropagation", + false, + `EnablePrincipalPropagation controls whether the authorization interceptor propagates the authenticated +principal identity as gRPC headers.`, + ) + KeepAliveMinTime = NewGlobalDurationSetting( + "frontend.keepAliveMinTime", + 10*time.Second, + `KeepAliveMinTime is the minimum amount of time a client should wait before sending a keepalive ping.`, + ) + KeepAlivePermitWithoutStream = NewGlobalBoolSetting( + "frontend.keepAlivePermitWithoutStream", + true, + `KeepAlivePermitWithoutStream If true, server allows keepalive pings even when there are no active +streams(RPCs). If false, and client sends ping when there are no active +streams, server will send GOAWAY and close the connection.`, + ) + KeepAliveMaxConnectionIdle = NewGlobalDurationSetting( + "frontend.keepAliveMaxConnectionIdle", + 2*time.Minute, + `KeepAliveMaxConnectionIdle is a duration for the amount of time after which an +idle connection would be closed by sending a GoAway. Idleness duration is +defined since the most recent time the number of outstanding RPCs became +zero or the connection establishment.`, + ) + KeepAliveMaxConnectionAge = NewGlobalDurationSetting( + "frontend.keepAliveMaxConnectionAge", + 5*time.Minute, + `KeepAliveMaxConnectionAge is a duration for the maximum amount of time a +connection may exist before it will be closed by sending a GoAway. A +random jitter of +/-10% will be added to MaxConnectionAge to spread out +connection storms.`, + ) + KeepAliveMaxConnectionAgeGrace = NewGlobalDurationSetting( + "frontend.keepAliveMaxConnectionAgeGrace", + 70*time.Second, + `KeepAliveMaxConnectionAgeGrace is an additive period after MaxConnectionAge after +which the connection will be forcibly closed.`, + ) + KeepAliveTime = NewGlobalDurationSetting( + "frontend.keepAliveTime", + 1*time.Minute, + `KeepAliveTime After a duration of this time if the server doesn't see any activity it +pings the client to see if the transport is still alive. +If set below 1s, a minimum value of 1s will be used instead.`, + ) + KeepAliveTimeout = NewGlobalDurationSetting( + "frontend.keepAliveTimeout", + 10*time.Second, + `KeepAliveTimeout After having pinged for keepalive check, the server waits for a duration +of Timeout and if no activity is seen even after that the connection is closed.`, + ) + FrontendEnableSchedules = NewNamespaceBoolSetting( + "frontend.enableSchedules", + true, + `FrontendEnableSchedules enables schedule-related RPCs in the frontend`, + ) + // [cleanup-wv-pre-release] + EnableDeployments = NewNamespaceBoolSetting( + "system.enableDeployments", + false, + `EnableDeployments enables deployments (deprecated versioning v3 pre-release) in all services, +including deployment-related RPCs in the frontend, deployment entity workflows in the worker, +and deployment interaction in matching and history.`, + ) + EnableDeploymentVersions = NewNamespaceBoolSetting( + "system.enableDeploymentVersions", + true, + `EnableDeploymentVersions enables deployment versions (versioning v3) in all services, +including deployment-related RPCs in the frontend, deployment version entity workflows in the worker, +and deployment interaction in matching and history.`, + ) + UseRevisionNumberForWorkerVersioning = NewNamespaceBoolSetting( + "system.useRevisionNumberForWorkerVersioning", + true, + `UseRevisionNumberForWorkerVersioning enables the use of revision number to resolve consistency problems that may arise during task dispatch time.`, + ) + EnableSuggestCaNOnNewTargetVersion = NewNamespaceBoolSetting( + "system.enableSuggestCaNOnNewTargetVersion", + false, + `EnableSuggestCaNOnNewTargetVersion lets Pinned workflows receive SuggestContinueAsNew when a new target version is available.`, + ) + EnableSendTargetVersionChanged = NewNamespaceBoolSetting( + "system.enableSendTargetVersionChanged", + true, + `EnableSendTargetVersionChanged lets Pinned workflows receive TargetWorkerDeploymentVersionChanged=true when a new target version is available for that workflow.`, + ) + AllowDeleteNamespaceIfNexusEndpointTarget = NewGlobalBoolSetting( + "frontend.allowDeleteNamespaceIfNexusEndpointTarget", + false, + `If set to true (default is false), namespaces that are Nexus endpoint targets will be prevented from being deleted.`, + ) + + RefreshNexusEndpointsLongPollTimeout = NewGlobalDurationSetting( + "system.refreshNexusEndpointsLongPollTimeout", + 5*time.Minute, + `RefreshNexusEndpointsLongPollTimeout is the maximum duration of background long poll requests to update Nexus endpoints.`, + ) + RefreshNexusEndpointsMinWait = NewGlobalDurationSetting( + "system.refreshNexusEndpointsMinWait", + 1*time.Second, + `RefreshNexusEndpointsMinWait is the minimum wait time between background long poll requests to update Nexus endpoints.`, + ) + ForceNexusEndpointRefreshOnRead = NewGlobalBoolSetting( + "system.forceNexusEndpointRefreshOnRead", + false, + `ForceNexusEndpointRefreshOnRead forces the Nexus endpoint registry to refresh from matching service on read. +This effectively bypasses the cache so that endpoint writes are visible to readers immediately, instead of after the +next background long-poll refresh. This should not be turned on in production, as it would introduce scalability +and reliability problems.`, + ) + NexusReadThroughCacheSize = NewGlobalIntSetting( + "system.nexusReadThroughCacheSize", + 100, + `The size of the Nexus endpoint registry's readthrough LRU cache - the cache is a secondary cache and is only +used when the first cache layer has a miss. Requires server restart for change to be applied.`, + ) + NexusReadThroughCacheTTL = NewGlobalDurationSetting( + "system.nexusReadThroughCacheTTL", + 30*time.Second, + `The TTL of the Nexus endpoint registry's readthrough LRU cache - the cache is a secondary cache and is only +used when the first cache layer has a miss. Requires server restart for change to be applied.`, + ) + FrontendNexusRequestHeadersBlacklist = NewGlobalTypedSettingWithConverter( + "frontend.nexusRequestHeadersBlacklist", + ConvertWildcardStringListToRegexp, + // Failure support is an internal implementation detail that shouldn't propagate to the user. + util.MustWildCardStringsToRegexp([]string{ + "accept-encoding", + "x-forwarded-for", + "xdc-redirection", + "xdc-redirection-api", + "temporal-nexus-failure-support", + }), + `Nexus request headers to be removed before being sent to a user handler. Wildcards (*) are expanded to +allow any substring. By default headers that are meant for internal use are disallowed. Concrete type should be list of +strings.`, + ) + FrontendNexusForwardRequestUseEndpointDispatch = NewGlobalBoolSetting( + "frontend.nexusForwardRequestUseEndpointDispatch", + false, + `!EXPERIMENTAL! NB: This config will be removed in a future release. Controls whether to use Nexus +task dispatch by endpoint URLs for forwarded Nexus requests. If set to true, forwarded requests will use the same +dispatch type (by endpoint or by namespace + task queue) as the original request. If false, dispatch by namespace + task +queue will always be used for forwarded requests. Defaults to false because Nexus endpoints do not support replication, +so forwarding by endpoint ID will not work out of the box.`, + ) + FrontendCallbackURLMaxLength = NewNamespaceIntSetting( + "frontend.callbackURLMaxLength", + 1000, + `FrontendCallbackURLMaxLength is the maximum length of callback URL`, + ) + FrontendCallbackHeaderMaxSize = NewNamespaceIntSetting( + "frontend.callbackHeaderMaxLength", + 8*1024, + `FrontendCallbackHeaderMaxSize is the maximum accumulated size of callback header keys and values`, + ) + MaxCallbacksPerWorkflow = NewNamespaceIntSetting( + "system.maxCallbacksPerWorkflow", + 32, + `MaxCallbacksPerWorkflow is the maximum number of callbacks that can be attached to a workflow.`, + ) + FrontendLinkMaxSize = NewNamespaceIntSetting( + "frontend.linkMaxSize", + 4000, // Links may include a workflow ID and namespace name, both of which are limited to a length of 1000. + `Maximum size in bytes of temporal.api.common.v1.Link object in an API request.`, + ) + FrontendMaxLinksPerRequest = NewNamespaceIntSetting( + "frontend.maxlinksPerRequest", + 10, + `Maximum number of links allowed to be attached via a single API request.`, + ) + FrontendMaxConcurrentBatchOperationPerNamespace = NewNamespaceIntSetting( + "frontend.MaxConcurrentBatchOperationPerNamespace", + 1, + `FrontendMaxConcurrentBatchOperationPerNamespace is the max concurrent batch operation job count per namespace`, + ) + FrontendMaxExecutionCountBatchOperationPerNamespace = NewNamespaceIntSetting( + "frontend.MaxExecutionCountBatchOperationPerNamespace", + 1000, + `FrontendMaxExecutionCountBatchOperationPerNamespace is the max execution count batch operation supports per namespace`, + ) + FrontendEnableBatcher = NewNamespaceBoolSetting( + "frontend.enableBatcher", + true, + `FrontendEnableBatcher enables batcher-related RPCs in the frontend`, + ) + FrontendMaxConcurrentAdminBatchOperationPerNamespace = NewNamespaceIntSetting( + "frontend.MaxConcurrentAdminBatchOperationPerNamespace", + 1, + `FrontendMaxConcurrentAdminBatchOperationPerNamespace is the max concurrent admin batch operation job count per namespace`, + ) + + FrontendEnableUpdateWorkflowExecution = NewNamespaceBoolSetting( + "frontend.enableUpdateWorkflowExecution", + true, + `FrontendEnableUpdateWorkflowExecution enables UpdateWorkflowExecution API in the frontend.`, + ) + + FrontendEnableUpdateWorkflowExecutionAsyncAccepted = NewNamespaceBoolSetting( + "frontend.enableUpdateWorkflowExecutionAsyncAccepted", + true, + `FrontendEnableUpdateWorkflowExecutionAsyncAccepted enables the UpdateWorkflowExecution API +to allow waiting on the "Accepted" lifecycle stage.`, + ) - // FrontendPersistenceMaxQPS is the max qps frontend host can query DB - FrontendPersistenceMaxQPS = "frontend.persistenceMaxQPS" - // FrontendPersistenceGlobalMaxQPS is the max qps frontend cluster can query DB - FrontendPersistenceGlobalMaxQPS = "frontend.persistenceGlobalMaxQPS" - // FrontendPersistenceNamespaceMaxQPS is the max qps each namespace on frontend host can query DB - FrontendPersistenceNamespaceMaxQPS = "frontend.persistenceNamespaceMaxQPS" - // FrontendPersistenceNamespaceMaxQPS is the max qps each namespace in frontend cluster can query DB - FrontendPersistenceGlobalNamespaceMaxQPS = "frontend.persistenceGlobalNamespaceMaxQPS" - // FrontendEnablePersistencePriorityRateLimiting indicates if priority rate limiting is enabled in frontend persistence client - FrontendEnablePersistencePriorityRateLimiting = "frontend.enablePersistencePriorityRateLimiting" - // FrontendPersistenceDynamicRateLimitingParams is a map that contains all adjustable dynamic rate limiting params - // see DefaultDynamicRateLimitingParams for available options and defaults - FrontendPersistenceDynamicRateLimitingParams = "frontend.persistenceDynamicRateLimitingParams" - // FrontendVisibilityMaxPageSize is default max size for ListWorkflowExecutions in one page - FrontendVisibilityMaxPageSize = "frontend.visibilityMaxPageSize" - // FrontendHistoryMaxPageSize is default max size for GetWorkflowExecutionHistory in one page - FrontendHistoryMaxPageSize = "frontend.historyMaxPageSize" - // FrontendRPS is workflow rate limit per second per-instance - FrontendRPS = "frontend.rps" - // FrontendGlobalRPS is workflow rate limit per second for the whole cluster - FrontendGlobalRPS = "frontend.globalRPS" - // FrontendNamespaceReplicationInducingAPIsRPS limits the per second request rate for namespace replication inducing - // APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility). - // This config is EXPERIMENTAL and may be changed or removed in a later release. - FrontendNamespaceReplicationInducingAPIsRPS = "frontend.rps.namespaceReplicationInducingAPIs" - // FrontendMaxNamespaceRPSPerInstance is workflow namespace rate limit per second - FrontendMaxNamespaceRPSPerInstance = "frontend.namespaceRPS" - // FrontendMaxNamespaceBurstRatioPerInstance is workflow namespace burst limit as a ratio of namespace RPS. The RPS - // used here will be the effective RPS from global and per-instance limits. The value must be 1 or higher. - FrontendMaxNamespaceBurstRatioPerInstance = "frontend.namespaceBurstRatio" - // FrontendMaxConcurrentLongRunningRequestsPerInstance limits concurrent long-running requests per-instance, - // per-API. Example requests include long-poll requests, and `Query` requests (which need to wait for WFTs). The - // limit is applied individually to each API method. This value is ignored if - // FrontendGlobalMaxConcurrentLongRunningRequests is greater than zero. Warning: setting this to zero will cause all - // long-running requests to fail. The name `frontend.namespaceCount` is kept for backwards compatibility with - // existing deployments even though it is a bit of a misnomer. This does not limit the number of namespaces; it is a - // per-_namespace_ limit on the _count_ of long-running requests. Requests are only throttled when the limit is - // exceeded, not when it is only reached. - FrontendMaxConcurrentLongRunningRequestsPerInstance = "frontend.namespaceCount" - // FrontendGlobalMaxConcurrentLongRunningRequests limits concurrent long-running requests across all frontend - // instances in the cluster, for a given namespace, per-API method. If this is set to 0 (the default), then it is - // ignored. The name `frontend.globalNamespaceCount` is kept for consistency with the per-instance limit name, - // `frontend.namespaceCount`. - FrontendGlobalMaxConcurrentLongRunningRequests = "frontend.globalNamespaceCount" - // FrontendMaxNamespaceVisibilityRPSPerInstance is namespace rate limit per second for visibility APIs. - // This config is EXPERIMENTAL and may be changed or removed in a later release. - FrontendMaxNamespaceVisibilityRPSPerInstance = "frontend.namespaceRPS.visibility" - // FrontendMaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance is a per host/per namespace RPS limit for - // namespace replication inducing APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility). - // This config is EXPERIMENTAL and may be changed or removed in a later release. - FrontendMaxNamespaceNamespaceReplicationInducingAPIsRPSPerInstance = "frontend.namespaceRPS.namespaceReplicationInducingAPIs" - // FrontendMaxNamespaceVisibilityBurstRatioPerInstance is namespace burst limit for visibility APIs as a ratio of - // namespace visibility RPS. The RPS used here will be the effective RPS from global and per-instance limits. This - // config is EXPERIMENTAL and may be changed or removed in a later release. The value must be 1 or higher. - FrontendMaxNamespaceVisibilityBurstRatioPerInstance = "frontend.namespaceBurstRatio.visibility" - // FrontendMaxNamespaceNamespaceReplicationInducingAPIsBurstRatioPerInstance is a per host/per namespace burst limit for - // namespace replication inducing APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility) - // as a ratio of namespace ReplicationInducingAPIs RPS. The RPS used here will be the effective RPS from global and - // per-instance limits. This config is EXPERIMENTAL and may be changed or removed in a later release. The value must - // be 1 or higher. - FrontendMaxNamespaceNamespaceReplicationInducingAPIsBurstRatioPerInstance = "frontend.namespaceBurstRatio.namespaceReplicationInducingAPIs" - // FrontendGlobalNamespaceRPS is workflow namespace rate limit per second for the whole cluster. - // The limit is evenly distributed among available frontend service instances. - // If this is set, it overwrites per instance limit "frontend.namespaceRPS". - FrontendGlobalNamespaceRPS = "frontend.globalNamespaceRPS" - // InternalFrontendGlobalNamespaceRPS is workflow namespace rate limit per second across - // all internal-frontends. - InternalFrontendGlobalNamespaceRPS = "internal-frontend.globalNamespaceRPS" - // FrontendGlobalNamespaceVisibilityRPS is workflow namespace rate limit per second for the whole cluster for visibility API. - // The limit is evenly distributed among available frontend service instances. - // If this is set, it overwrites per instance limit "frontend.namespaceRPS.visibility". - // This config is EXPERIMENTAL and may be changed or removed in a later release. - FrontendGlobalNamespaceVisibilityRPS = "frontend.globalNamespaceRPS.visibility" - // FrontendGlobalNamespaceNamespaceReplicationInducingAPIsRPS is a cluster global, per namespace RPS limit for - // namespace replication inducing APIs (e.g. RegisterNamespace, UpdateNamespace, UpdateWorkerBuildIdCompatibility). - // The limit is evenly distributed among available frontend service instances. - // If this is set, it overwrites the per instance limit configured with - // "frontend.namespaceRPS.namespaceReplicationInducingAPIs". - // This config is EXPERIMENTAL and may be changed or removed in a later release. - FrontendGlobalNamespaceNamespaceReplicationInducingAPIsRPS = "frontend.globalNamespaceRPS.namespaceReplicationInducingAPIs" - // InternalFrontendGlobalNamespaceVisibilityRPS is workflow namespace rate limit per second - // across all internal-frontends. - // This config is EXPERIMENTAL and may be changed or removed in a later release. - InternalFrontendGlobalNamespaceVisibilityRPS = "internal-frontend.globalNamespaceRPS.visibility" - // FrontendThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger - FrontendThrottledLogRPS = "frontend.throttledLogRPS" - // FrontendShutdownDrainDuration is the duration of traffic drain during shutdown - FrontendShutdownDrainDuration = "frontend.shutdownDrainDuration" - // FrontendShutdownFailHealthCheckDuration is the duration of shutdown failure detection - FrontendShutdownFailHealthCheckDuration = "frontend.shutdownFailHealthCheckDuration" - // FrontendMaxBadBinaries is the max number of bad binaries in namespace config - FrontendMaxBadBinaries = "frontend.maxBadBinaries" - // SendRawWorkflowHistory is whether to enable raw history retrieving - SendRawWorkflowHistory = "frontend.sendRawWorkflowHistory" - // SearchAttributesNumberOfKeysLimit is the limit of number of keys - SearchAttributesNumberOfKeysLimit = "frontend.searchAttributesNumberOfKeysLimit" - // SearchAttributesSizeOfValueLimit is the size limit of each value - SearchAttributesSizeOfValueLimit = "frontend.searchAttributesSizeOfValueLimit" - // SearchAttributesTotalSizeLimit is the size limit of the whole map - SearchAttributesTotalSizeLimit = "frontend.searchAttributesTotalSizeLimit" - // VisibilityArchivalQueryMaxPageSize is the maximum page size for a visibility archival query - VisibilityArchivalQueryMaxPageSize = "frontend.visibilityArchivalQueryMaxPageSize" - // EnableServerVersionCheck is a flag that controls whether or not periodic version checking is enabled - EnableServerVersionCheck = "frontend.enableServerVersionCheck" - // EnableTokenNamespaceEnforcement enables enforcement that namespace in completion token matches namespace of the request - EnableTokenNamespaceEnforcement = "frontend.enableTokenNamespaceEnforcement" - // DisableListVisibilityByFilter is config to disable list open/close workflow using filter - DisableListVisibilityByFilter = "frontend.disableListVisibilityByFilter" - // KeepAliveMinTime is the minimum amount of time a client should wait before sending a keepalive ping. - KeepAliveMinTime = "frontend.keepAliveMinTime" - // KeepAlivePermitWithoutStream If true, server allows keepalive pings even when there are no active - // streams(RPCs). If false, and client sends ping when there are no active - // streams, server will send GOAWAY and close the connection. - KeepAlivePermitWithoutStream = "frontend.keepAlivePermitWithoutStream" - // KeepAliveMaxConnectionIdle is a duration for the amount of time after which an - // idle connection would be closed by sending a GoAway. Idleness duration is - // defined since the most recent time the number of outstanding RPCs became - // zero or the connection establishment. - KeepAliveMaxConnectionIdle = "frontend.keepAliveMaxConnectionIdle" - // KeepAliveMaxConnectionAge is a duration for the maximum amount of time a - // connection may exist before it will be closed by sending a GoAway. A - // random jitter of +/-10% will be added to MaxConnectionAge to spread out - // connection storms. - KeepAliveMaxConnectionAge = "frontend.keepAliveMaxConnectionAge" - // KeepAliveMaxConnectionAgeGrace is an additive period after MaxConnectionAge after - // which the connection will be forcibly closed. - KeepAliveMaxConnectionAgeGrace = "frontend.keepAliveMaxConnectionAgeGrace" - // KeepAliveTime After a duration of this time if the server doesn't see any activity it - // pings the client to see if the transport is still alive. - // If set below 1s, a minimum value of 1s will be used instead. - KeepAliveTime = "frontend.keepAliveTime" - // KeepAliveTimeout After having pinged for keepalive check, the server waits for a duration - // of Timeout and if no activity is seen even after that the connection is closed. - KeepAliveTimeout = "frontend.keepAliveTimeout" - // FrontendEnableSchedules enables schedule-related RPCs in the frontend - FrontendEnableSchedules = "frontend.enableSchedules" - // FrontendEnableNexusHTTPHandler enables serving Nexus HTTP requests in the frontend. - FrontendEnableNexusHTTPHandler = "frontend.enableNexusHTTPHandler" - // FrontendEnableCallbackAttachment enables attaching callbacks to workflows. - FrontendEnableCallbackAttachment = "frontend.enableCallbackAttachment" - // FrontendMaxConcurrentBatchOperationPerNamespace is the max concurrent batch operation job count per namespace - FrontendMaxConcurrentBatchOperationPerNamespace = "frontend.MaxConcurrentBatchOperationPerNamespace" - // FrontendMaxExecutionCountBatchOperationPerNamespace is the max execution count batch operation supports per namespace - FrontendMaxExecutionCountBatchOperationPerNamespace = "frontend.MaxExecutionCountBatchOperationPerNamespace" - // FrontendEnableBatcher enables batcher-related RPCs in the frontend - FrontendEnableBatcher = "frontend.enableBatcher" - // FrontendAccessHistoryFraction (0.0~1.0) is the fraction of history operations that are sent to the history - // service using the new RPCs. The remaining access history via the existing implementation. - // TODO: remove once migration completes. - FrontendAccessHistoryFraction = "frontend.accessHistoryFraction" - // FrontendAdminDeleteAccessHistoryFraction (0.0~1.0) is the fraction of admin DeleteWorkflowExecution requests - // that are sent to the history service using the new RPCs. The remaining access history via the existing implementation. - // TODO: remove once migration completes. - FrontendAdminDeleteAccessHistoryFraction = "frontend.adminDeleteAccessHistoryFraction" - - // FrontendEnableUpdateWorkflowExecution enables UpdateWorkflowExecution API in the frontend. - // The UpdateWorkflowExecution API has gone through rigorous testing efforts but this config's default is `false` until the - // feature gets more time in production. - FrontendEnableUpdateWorkflowExecution = "frontend.enableUpdateWorkflowExecution" - - // FrontendEnableUpdateWorkflowExecutionAsyncAccepted enables the form of - // asynchronous workflow execution update that waits on the "Accepted" - // lifecycle stage. Default value is `false`. - FrontendEnableUpdateWorkflowExecutionAsyncAccepted = "frontend.enableUpdateWorkflowExecutionAsyncAccepted" - - // EnableWorkflowIdConflictPolicy enables the `WorkflowIdConflictPolicy` option for Start and Signal-with-Start - EnableWorkflowIdConflictPolicy = "frontend.enableWorkflowIdConflictPolicy" - - // FrontendEnableWorkerVersioningDataAPIs enables worker versioning data read / write APIs. - FrontendEnableWorkerVersioningDataAPIs = "frontend.workerVersioningDataAPIs" - // FrontendEnableWorkerVersioningWorkflowAPIs enables worker versioning in workflow progress APIs. - FrontendEnableWorkerVersioningWorkflowAPIs = "frontend.workerVersioningWorkflowAPIs" - - // DeleteNamespaceDeleteActivityRPS is an RPS per every parallel delete executions activity. - // Total RPS is equal to DeleteNamespaceDeleteActivityRPS * DeleteNamespaceConcurrentDeleteExecutionsActivities. - // Default value is 100. - DeleteNamespaceDeleteActivityRPS = "frontend.deleteNamespaceDeleteActivityRPS" - // DeleteNamespacePageSize is a page size to read executions from visibility for delete executions activity. - // Default value is 1000. - DeleteNamespacePageSize = "frontend.deleteNamespaceDeletePageSize" - // DeleteNamespacePagesPerExecution is a number of pages before returning ContinueAsNew from delete executions activity. - // Default value is 256. - DeleteNamespacePagesPerExecution = "frontend.deleteNamespacePagesPerExecution" - // DeleteNamespaceConcurrentDeleteExecutionsActivities is a number of concurrent delete executions activities. - // Must be not greater than 256 and number of worker cores in the cluster. - // Default is 4. - DeleteNamespaceConcurrentDeleteExecutionsActivities = "frontend.deleteNamespaceConcurrentDeleteExecutionsActivities" - // DeleteNamespaceNamespaceDeleteDelay is a duration for how long namespace stays in database - // after all namespace resources (i.e. workflow executions) are deleted. - // Default is 0, means, namespace will be deleted immediately. - DeleteNamespaceNamespaceDeleteDelay = "frontend.deleteNamespaceNamespaceDeleteDelay" + FrontendEnableWorkerVersioningDataAPIs = NewNamespaceBoolSetting( + "frontend.workerVersioningDataAPIs", + false, + `FrontendEnableWorkerVersioningDataAPIs enables worker versioning data read / write APIs.`, + ) + FrontendEnableWorkerVersioningWorkflowAPIs = NewNamespaceBoolSetting( + "frontend.workerVersioningWorkflowAPIs", + true, + `FrontendEnableWorkerVersioningWorkflowAPIs enables worker versioning in workflow progress APIs.`, + ) + FrontendEnableWorkerVersioningRuleAPIs = NewNamespaceBoolSetting( + "frontend.workerVersioningRuleAPIs", + false, + `FrontendEnableWorkerVersioningRuleAPIs enables worker versioning in workflow progress APIs.`, + ) + + DeleteNamespaceDeleteActivityRPS = NewGlobalIntSetting( + "frontend.deleteNamespaceDeleteActivityRPS", + 100, + `DeleteNamespaceDeleteActivityRPS is an RPS per every parallel delete executions activity. +Total RPS is equal to DeleteNamespaceDeleteActivityRPS * DeleteNamespaceConcurrentDeleteExecutionsActivities. +Default value is 100. Despite starting with 'frontend.' this setting is used by a worker and can be changed while namespace is deleted.`, + ) + DeleteNamespacePageSize = NewGlobalIntSetting( + "frontend.deleteNamespaceDeletePageSize", + 1000, + `DeleteNamespacePageSize is a page size to read executions from visibility for delete executions activity. +Default value is 1000. Read once before delete of specified namespace is started.`, + ) + DeleteNamespacePagesPerExecution = NewGlobalIntSetting( + "frontend.deleteNamespacePagesPerExecution", + 256, + `DeleteNamespacePagesPerExecution is a number of pages before returning ContinueAsNew from delete executions activity. +Default value is 256. Read once before delete of specified namespace is started.`, + ) + DeleteNamespaceConcurrentDeleteExecutionsActivities = NewGlobalIntSetting( + "frontend.deleteNamespaceConcurrentDeleteExecutionsActivities", + 4, + `DeleteNamespaceConcurrentDeleteExecutionsActivities is a number of concurrent delete executions activities. +Must be not greater than 256 and number of worker cores in the cluster. +Default is 4. Read once before delete of specified namespace is started.`, + ) + DeleteNamespaceNamespaceDeleteDelay = NewGlobalDurationSetting( + "frontend.deleteNamespaceNamespaceDeleteDelay", + 0*time.Hour, + `DeleteNamespaceNamespaceDeleteDelay is a duration for how long namespace stays in database +after all namespace resources (i.e. workflow executions) are deleted. +Default is 0, means, namespace will be deleted immediately.`, + ) + ProtectedNamespaces = NewGlobalTypedSetting( + "worker.protectedNamespaces", + ([]string)(nil), + `List of namespace names that can't be deleted.`, + ) // keys for matching - // MatchingRPS is request rate per second for each matching host - MatchingRPS = "matching.rps" - // MatchingPersistenceMaxQPS is the max qps matching host can query DB - MatchingPersistenceMaxQPS = "matching.persistenceMaxQPS" - // MatchingPersistenceGlobalMaxQPS is the max qps matching cluster can query DB - MatchingPersistenceGlobalMaxQPS = "matching.persistenceGlobalMaxQPS" - // MatchingPersistenceNamespaceMaxQPS is the max qps each namespace on matching host can query DB - MatchingPersistenceNamespaceMaxQPS = "matching.persistenceNamespaceMaxQPS" - // MatchingPersistenceNamespaceMaxQPS is the max qps each namespace in matching cluster can query DB - MatchingPersistenceGlobalNamespaceMaxQPS = "matching.persistenceGlobalNamespaceMaxQPS" - // MatchingEnablePersistencePriorityRateLimiting indicates if priority rate limiting is enabled in matching persistence client - MatchingEnablePersistencePriorityRateLimiting = "matching.enablePersistencePriorityRateLimiting" - // MatchingPersistenceDynamicRateLimitingParams is a map that contains all adjustable dynamic rate limiting params - // see DefaultDynamicRateLimitingParams for available options and defaults - MatchingPersistenceDynamicRateLimitingParams = "matching.persistenceDynamicRateLimitingParams" - // MatchingMinTaskThrottlingBurstSize is the minimum burst size for task queue throttling - MatchingMinTaskThrottlingBurstSize = "matching.minTaskThrottlingBurstSize" - // MatchingGetTasksBatchSize is the maximum batch size to fetch from the task buffer - MatchingGetTasksBatchSize = "matching.getTasksBatchSize" - // MatchingLongPollExpirationInterval is the long poll expiration interval in the matching service - MatchingLongPollExpirationInterval = "matching.longPollExpirationInterval" - // MatchingSyncMatchWaitDuration is to wait time for sync match - MatchingSyncMatchWaitDuration = "matching.syncMatchWaitDuration" - // MatchingHistoryMaxPageSize is the maximum page size of history events returned on PollWorkflowTaskQueue requests - MatchingHistoryMaxPageSize = "matching.historyMaxPageSize" - // MatchingLoadUserData can be used to entirely disable loading user data from persistence (and the inter node RPCs - // that propoagate it). When turned off, features that rely on user data (e.g. worker versioning) will essentially - // be disabled. When disabled, matching will drop tasks for versioned workflows and activities to avoid breaking - // versioning semantics. Operator intervention will be required to reschedule the dropped tasks. - MatchingLoadUserData = "matching.loadUserData" - // MatchingUpdateAckInterval is the interval for update ack - MatchingUpdateAckInterval = "matching.updateAckInterval" - // MatchingMaxTaskQueueIdleTime is the time after which an idle task queue will be unloaded. - // Note: this should be greater than matching.longPollExpirationInterval and matching.getUserDataLongPollTimeout. - MatchingMaxTaskQueueIdleTime = "matching.maxTaskQueueIdleTime" - // MatchingOutstandingTaskAppendsThreshold is the threshold for outstanding task appends - MatchingOutstandingTaskAppendsThreshold = "matching.outstandingTaskAppendsThreshold" - // MatchingMaxTaskBatchSize is max batch size for task writer - MatchingMaxTaskBatchSize = "matching.maxTaskBatchSize" - // MatchingMaxTaskDeleteBatchSize is the max batch size for range deletion of tasks - MatchingMaxTaskDeleteBatchSize = "matching.maxTaskDeleteBatchSize" - // MatchingThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger - MatchingThrottledLogRPS = "matching.throttledLogRPS" - // MatchingNumTaskqueueWritePartitions is the number of write partitions for a task queue - MatchingNumTaskqueueWritePartitions = "matching.numTaskqueueWritePartitions" - // MatchingNumTaskqueueReadPartitions is the number of read partitions for a task queue - MatchingNumTaskqueueReadPartitions = "matching.numTaskqueueReadPartitions" - // MatchingForwarderMaxOutstandingPolls is the max number of inflight polls from the forwarder - MatchingForwarderMaxOutstandingPolls = "matching.forwarderMaxOutstandingPolls" - // MatchingForwarderMaxOutstandingTasks is the max number of inflight addTask/queryTask from the forwarder - MatchingForwarderMaxOutstandingTasks = "matching.forwarderMaxOutstandingTasks" - // MatchingForwarderMaxRatePerSecond is the max rate at which add/query can be forwarded - MatchingForwarderMaxRatePerSecond = "matching.forwarderMaxRatePerSecond" - // MatchingForwarderMaxChildrenPerNode is the max number of children per node in the task queue partition tree - MatchingForwarderMaxChildrenPerNode = "matching.forwarderMaxChildrenPerNode" - // MatchingAlignMembershipChange is a duration to align matching's membership changes to. - // This can help reduce effects of task queue movement. - MatchingAlignMembershipChange = "matching.alignMembershipChange" - // MatchingShutdownDrainDuration is the duration of traffic drain during shutdown - MatchingShutdownDrainDuration = "matching.shutdownDrainDuration" - // MatchingGetUserDataLongPollTimeout is the max length of long polls for GetUserData calls between partitions. - MatchingGetUserDataLongPollTimeout = "matching.getUserDataLongPollTimeout" - // MatchingBacklogNegligibleAge if the head of backlog gets older than this we stop sync match and - // forwarding to ensure more equal dispatch order among partitions. - MatchingBacklogNegligibleAge = "matching.backlogNegligibleAge" - // MatchingMaxWaitForPollerBeforeFwd in presence of a non-negligible backlog, we resume forwarding tasks if the - // duration since last poll exceeds this threshold. - MatchingMaxWaitForPollerBeforeFwd = "matching.maxWaitForPollerBeforeFwd" - // QueryPollerUnavailableWindow WF Queries are rejected after a while if no poller has been seen within the window - QueryPollerUnavailableWindow = "matching.queryPollerUnavailableWindow" - // MatchingListNexusIncomingServicesLongPollTimeout is the max length of long polls for ListNexusIncomingServices calls. - MatchingListNexusIncomingServicesLongPollTimeout = "matching.listNexusIncomingServicesLongPollTimeout" - // MatchingMembershipUnloadDelay is how long to wait to re-confirm loss of ownership before unloading a task queue. - // Set to zero to disable proactive unload. - MatchingMembershipUnloadDelay = "matching.membershipUnloadDelay" - // MatchingQueryWorkflowTaskTimeoutLogRate defines the sampling rate for logs when a query workflow task times out. Since - // these log lines can be noisy, we want to be able to turn on and sample selectively for each affected namespace. - MatchingQueryWorkflowTaskTimeoutLogRate = "matching.queryWorkflowTaskTimeoutLogRate" - - // for matching testing only: - - // TestMatchingDisableSyncMatch forces tasks to go through the db once - TestMatchingDisableSyncMatch = "test.matching.disableSyncMatch" - // TestMatchingLBForceReadPartition forces polls to go to a specific partition - TestMatchingLBForceReadPartition = "test.matching.lbForceReadPartition" - // TestMatchingLBForceWritePartition forces adds to go to a specific partition - TestMatchingLBForceWritePartition = "test.matching.lbForceWritePartition" + MatchingRPS = NewGlobalIntSetting( + "matching.rps", + 1200, + `MatchingRPS is request rate per second for each matching host`, + ) + MatchingNamespaceRPS = NewNamespaceIntSetting( + "matching.namespaceRPS", + 0, + `MatchingNamespaceRPS is namespace rate limit per second for each matching host. +If value less or equal to 0, will fall back to MatchingRPS`, + ) + MatchingPersistenceMaxQPS = NewGlobalIntSetting( + "matching.persistenceMaxQPS", + 3000, + `MatchingPersistenceMaxQPS is the max qps matching host can query DB`, + ) + MatchingPersistenceGlobalMaxQPS = NewGlobalIntSetting( + "matching.persistenceGlobalMaxQPS", + 0, + `MatchingPersistenceGlobalMaxQPS is the max qps matching cluster can query DB`, + ) + MatchingPersistenceNamespaceMaxQPS = NewNamespaceIntSetting( + "matching.persistenceNamespaceMaxQPS", + 0, + `MatchingPersistenceNamespaceMaxQPS is the max qps each namespace on matching host can query DB`, + ) + MatchingPersistenceGlobalNamespaceMaxQPS = NewNamespaceIntSetting( + "matching.persistenceGlobalNamespaceMaxQPS", + 0, + `MatchingPersistenceNamespaceMaxQPS is the max qps each namespace in matching cluster can query DB`, + ) + MatchingPersistenceDynamicRateLimitingParams = NewGlobalTypedSetting( + "matching.persistenceDynamicRateLimitingParams", + DefaultDynamicRateLimitingParams, + `MatchingPersistenceDynamicRateLimitingParams is a struct that contains all adjustable dynamic rate limiting params. +Fields: Enabled, RefreshInterval, LatencyThreshold, ErrorThreshold, RateBackoffStepSize, RateIncreaseStepSize, RateMultiMin, RateMultiMax. +See DynamicRateLimitingParams comments for more details.`, + ) + MatchingMinTaskThrottlingBurstSize = NewTaskQueueIntSetting( + "matching.minTaskThrottlingBurstSize", + 1, + `MatchingMinTaskThrottlingBurstSize is the minimum burst size for task queue throttling`, + ) + MatchingGetTasksBatchSize = NewTaskQueueIntSetting( + "matching.getTasksBatchSize", + 1000, + `How many backlog tasks to read from persistence at once`, + ) + MatchingGetTasksReloadAt = NewTaskQueueIntSetting( + "matching.getTasksReloadAt", + 100, + `Reload a batch of tasks when there are this many remaining. Must be less than MatchingGetTasksBatchSize. (Requires new matcher.)`, + ) + MatchingLongPollExpirationInterval = NewTaskQueueDurationSetting( + "matching.longPollExpirationInterval", + time.Minute, + `MatchingLongPollExpirationInterval is the long poll expiration interval in the matching service`, + ) + // TODO(pri): old matcher cleanup + MatchingSyncMatchWaitDuration = NewTaskQueueDurationSetting( + "matching.syncMatchWaitDuration", + 200*time.Millisecond, + `MatchingSyncMatchWaitDuration is to wait time for sync match`, + ) + MatchingHistoryMaxPageSize = NewNamespaceIntSetting( + "matching.historyMaxPageSize", + primitives.GetHistoryMaxPageSize, + `MatchingHistoryMaxPageSize is the maximum page size of history events returned on PollWorkflowTaskQueue requests`, + ) + MatchingUpdateAckInterval = NewTaskQueueDurationSettingWithConstrainedDefault( + "matching.updateAckInterval", + []TypedConstrainedValue[time.Duration]{ + // Use a longer default interval for the per-namespace internal worker queues. + { + Constraints: Constraints{ + TaskQueueName: primitives.PerNSWorkerTaskQueue, + }, + Value: 5 * time.Minute, + }, + // Default for everything else. + { + Value: 1 * time.Minute, + }, + }, + `MatchingUpdateAckInterval is the interval for update ack`, + ) + MatchingMetadataUpdateOnAppendInterval = NewTaskQueueDurationSetting( + "matching.metadataUpdateOnAppendInterval", + 5*time.Second, + `MatchingMetadataUpdateOnAppendInterval controls how often task queue metadata (e.g. +approximate backlog count) is written along with task appends. When using Cassandra, task appends +always require an LWT for the range ID check, but updating the full metadata on every append adds +extra write cost. This setting limits metadata updates to at most once per interval, piggybacking +on the append LWT. A value of 0 means always update metadata on every append (previous behavior).`, + ) + MatchingMaxTaskQueueIdleTime = NewTaskQueueDurationSetting( + "matching.maxTaskQueueIdleTime", + 5*time.Minute, + `MatchingMaxTaskQueueIdleTime is the time after which an idle task queue will be unloaded. +Note: this should be greater than matching.longPollExpirationInterval and matching.getUserDataLongPollTimeout.`, + ) + MatchingOutstandingTaskAppendsThreshold = NewTaskQueueIntSetting( + "matching.outstandingTaskAppendsThreshold", + 250, + `MatchingOutstandingTaskAppendsThreshold is the threshold for outstanding task appends`, + ) + MatchingMaxTaskBatchSize = NewTaskQueueIntSetting( + "matching.maxTaskBatchSize", + 100, + `MatchingMaxTaskBatchSize is max batch size for task writer`, + ) + MatchingMaxTaskDeleteBatchSize = NewTaskQueueIntSetting( + "matching.maxTaskDeleteBatchSize", + 100, + `MatchingMaxTaskDeleteBatchSize is the max batch size for range deletion of tasks`, + ) + MatchingTaskDeleteInterval = NewTaskQueueDurationSetting( + "matching.taskDeleteInterval", + 15*time.Second, + `MatchingTaskDeleteInterval is the minimum interval between task range deletions`, + ) + MatchingThrottledLogRPS = NewGlobalIntSetting( + "matching.throttledLogRPS", + 20, + `MatchingThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger`, + ) + MatchingNumTaskqueueWritePartitions = NewTaskQueueIntSettingWithConstrainedDefault( + "matching.numTaskqueueWritePartitions", + defaultNumTaskQueuePartitions, + `MatchingNumTaskqueueWritePartitions is the number of write partitions for a task queue`, + ) + MatchingNumTaskqueueReadPartitions = NewTaskQueueIntSettingWithConstrainedDefault( + "matching.numTaskqueueReadPartitions", + defaultNumTaskQueuePartitions, + `MatchingNumTaskqueueReadPartitions is the number of read partitions for a task queue`, + ) + MetricsBreakdownByTaskQueue = NewTaskQueueBoolSetting( + "metrics.breakdownByTaskQueue", + true, + `MetricsBreakdownByTaskQueue determines if the 'taskqueue' tag in Matching and History metrics should +contain the actual TQ name or a generic __omitted__ value. Disable this option if the cardinality is too high for your +observability stack. Disabling this option will disable all the per-Task Queue gauges such as backlog lag, count, and age.`, + ) + MetricsBreakdownByPartition = NewTaskQueueBoolSetting( + "metrics.breakdownByPartition", + true, + `MetricsBreakdownByPartition determines if the 'partition' tag in Matching metrics should +contain the actual normal partition ID or a generic __normal__ value. Regardless of this config, the tag value for sticky +queues will be "__sticky__". Disable this option if the partition cardinality is too high for your +observability stack. Disabling this option will disable all the per-Task Queue gauges such as backlog lag, count, and age.`, + ) + MetricsBreakdownByBuildID = NewTaskQueueBoolSetting( + "metrics.breakdownByBuildID", + true, + `MetricsBreakdownByBuildID determines if the 'worker_version' tag in Matching metrics should +contain the actual Worker Deployment Version or a generic "__versioned__" value. Regardless of this config, the tag value for unversioned +queues will be "__unversioned__". Disable this option if the version cardinality is too high for your +observability stack. Disabling this option will disable all the per-Task Queue gauges such as backlog lag, count, and age +for VERSIONED queues.`, + ) + MatchingForwarderMaxOutstandingPolls = NewTaskQueueIntSetting( + "matching.forwarderMaxOutstandingPolls", + 1, + `MatchingForwarderMaxOutstandingPolls is the max number of inflight polls from the forwarder`, + ) + MatchingForwarderMaxOutstandingTasks = NewTaskQueueIntSetting( + "matching.forwarderMaxOutstandingTasks", + 1, + `MatchingForwarderMaxOutstandingTasks is the max number of inflight addTask/queryTask from the forwarder`, + ) + MatchingForwarderMaxRatePerSecond = NewTaskQueueFloatSetting( + "matching.forwarderMaxRatePerSecond", + 10, + `MatchingForwarderMaxRatePerSecond is the max rate at which add/query can be forwarded`, + ) + MatchingForwarderMaxChildrenPerNode = NewTaskQueueIntSetting( + "matching.forwarderMaxChildrenPerNode", + 20, + `MatchingForwarderMaxChildrenPerNode is the max number of children per node in the task queue partition tree`, + ) + MatchingAlignMembershipChange = NewGlobalDurationSetting( + "matching.alignMembershipChange", + 0*time.Second, + `MatchingAlignMembershipChange is a duration to align matching's membership changes to. +This can help reduce effects of task queue movement.`, + ) + MatchingShutdownDrainDuration = NewGlobalDurationSetting( + "matching.shutdownDrainDuration", + 0*time.Second, + `MatchingShutdownDrainDuration is the duration of traffic drain during shutdown`, + ) + MatchingGetUserDataLongPollTimeout = NewGlobalDurationSetting( + "matching.getUserDataLongPollTimeout", + 5*time.Minute-10*time.Second, + `MatchingGetUserDataLongPollTimeout is the max length of long polls for GetUserData calls between partitions.`, + ) + MatchingGetUserDataRefresh = NewGlobalDurationSetting( + "matching.getUserDataRefresh", + 5*time.Minute, + `MatchingGetUserDataRefresh is how often the user data owner refreshes data from persistence.`, + ) + MatchingEphemeralDataUpdateInterval = NewTaskQueueDurationSetting( + "matching.ephemeralDataUpdateInterval", + 5*time.Second, + `How often to update ephemeral data (e.g. backlog size for forwarding sticky polls). +Set to zero to disable ephemeral data updates.`, + ) + MatchingBacklogMetricsEmitInterval = NewTaskQueueDurationSetting( + "matching.backlogMetricsEmitInterval", + time.Minute, + `How often to emit version-attributed backlog metrics. Done on an interval because accurate attribution requires checking the routing config of a task queue to correctly attribute the default queue's tasks to the appropriate current or ramping versions. Set to zero to disable version-attributed backlog metrics.`, + ) + MatchingPriorityBacklogForwarding = NewTaskQueueBoolSetting( + "matching.priorityBacklogForwarding", + true, + `Whether to forward polls to partitions with higher-priority backlog.`, + ) + MatchingBacklogNegligibleAge = NewTaskQueueDurationSetting( + "matching.backlogNegligibleAge", + 5*time.Second, + `MatchingBacklogNegligibleAge is a threshold for negligible vs significant backlogs: +If the head of the backlog is older than this, then we stop sync match and forwarding to ensure +more equal dispatch order among partitions. We also forward sticky polls to partitions with +higher-priority backlog.`, + ) + MatchingMaxWaitForPollerBeforeFwd = NewTaskQueueDurationSetting( + "matching.maxWaitForPollerBeforeFwd", + 200*time.Millisecond, + `MatchingMaxWaitForPollerBeforeFwd in presence of a non-negligible backlog, we resume forwarding tasks if the +duration since last poll exceeds this threshold.`, + ) + QueryPollerUnavailableWindow = NewGlobalDurationSetting( + "matching.queryPollerUnavailableWindow", + 20*time.Second, + `QueryPollerUnavailableWindow WF Queries are rejected after a while if no poller has been seen within the window`, + ) + MatchingEmitTaskDispatchLatencyAtPoll = NewTaskQueueBoolSetting( + "matching.emitTaskDispatchLatencyAtPoll", + true, + `When enabled, TaskDispatchLatencyPerTaskQueue is emitted when responding to poll requests (with extra tags +like partition and worker-version) instead of being emitted at the matcher level.`, + ) + MatchingListNexusEndpointsLongPollTimeout = NewGlobalDurationSetting( + "matching.listNexusEndpointsLongPollTimeout", + 5*time.Minute-10*time.Second, + `MatchingListNexusEndpointsLongPollTimeout is the max length of long polls for ListNexusEndpoints calls.`, + ) + MatchingNexusEndpointsRefreshInterval = NewGlobalDurationSetting( + "matching.nexusEndpointsRefreshInterval", + 10*time.Second, + `Time to wait between calls to check that the in-memory view of Nexus endpoints matches the persisted state.`, + ) + MatchingMembershipUnloadDelay = NewGlobalDurationSetting( + "matching.membershipUnloadDelay", + 500*time.Millisecond, + `MatchingMembershipUnloadDelay is how long to wait to re-confirm loss of ownership before unloading a task queue. +Set to zero to disable proactive unload.`, + ) + MatchingQueryWorkflowTaskTimeoutLogRate = NewTaskQueueFloatSetting( + "matching.queryWorkflowTaskTimeoutLogRate", + 0.0, + `MatchingQueryWorkflowTaskTimeoutLogRate defines the sampling rate for logs when a query workflow task times out. Since +these log lines can be noisy, we want to be able to turn on and sample selectively for each affected namespace.`, + ) + TaskQueueInfoByBuildIdTTL = NewTaskQueueDurationSetting( + "matching.TaskQueueInfoByBuildIdTTL", + 5*time.Second, + `TaskQueueInfoByBuildIdTTL serves as a TTL for the cache holding DescribeTaskQueue partition results`, + ) + MatchingDeploymentWorkflowVersion = NewNamespaceIntSetting( + "matching.deploymentWorkflowVersion", + 2, + `MatchingDeploymentWorkflowVersion controls what version of the logic should the manager workflows use.`, + ) + MatchingMaxTaskQueuesInDeployment = NewNamespaceIntSetting( + "matching.maxTaskQueuesInDeployment", + 1000, + `MatchingMaxTaskQueuesInDeployment represents the maximum number of task-queues that can be registed in a single deployment`, + ) + MatchingMaxDeployments = NewNamespaceIntSetting( + "matching.maxDeployments", + 100, + `MatchingMaxDeployments represents the maximum number of worker deployments that can be registered in a single namespace`, + ) + MatchingMaxVersionsInDeployment = NewNamespaceIntSetting( + "matching.maxVersionsInDeployment", + 100, + `MatchingMaxVersionsInDeployment represents the maximum number of versions that can be registered in a single worker deployment`, + ) + MatchingMaxVersionsInTaskQueue = NewNamespaceIntSetting( + "matching.maxVersionsInTaskQueue", + 200, + `MatchingMaxVersionsInTaskQueue represents the maximum number of versions that can be registered in a single task queue. + Should be larger than MatchingMaxVersionsInDeployment because a task queue can be in versions spanning across more than one deployments.`, + ) + MatchingMaxTaskQueuesInDeploymentVersion = NewNamespaceIntSetting( + "matching.maxTaskQueuesInDeploymentVersion", + 100, + `MatchingMaxTaskQueuesInDeployment represents the maximum number of task-queues that can be registered in a single worker deployment version`, + ) + MatchingPollerScalingBacklogAgeScaleUp = NewTaskQueueDurationSetting( + "matching.pollerScalingMinimumBacklog", + 200*time.Millisecond, + `MatchingPollerScalingBacklogAgeScaleUp is the minimum backlog age that must be accumulated before +a decision to scale up the number of pollers will be issued`, + ) + MatchingPollerScalingWaitTime = NewTaskQueueDurationSetting( + "matching.pollerScalingWaitTime", + 1*time.Second, + `MatchingPollerScalingWaitTime is the duration a sync-matched poller must exceed before +a decision to scale down the number of pollers will be issued`, + ) + MatchingPollerScalingDecisionsPerSecond = NewTaskQueueFloatSetting( + "matching.pollerScalingDecisionsPerSecond", + 10, + `MatchingPollerScalingDecisionsPerSecond is the maximum number of scaling decisions that will be issued per +second per poller by one physical queue manager`, + ) + MatchingUseNewMatcher = NewTaskQueueTypedSettingWithConverter( + "matching.useNewMatcher", + ConvertGradualChange(true), + StaticGradualChange(true), + `Use priority-enabled TaskMatcher`, + ) + MatchingEnableFairness = NewTaskQueueTypedSettingWithConverter( + "matching.enableFairness", + ConvertGradualChange(false), + StaticGradualChange(false), + `Enable fairness for task dispatching. Implies matching.useNewMatcher.`, + ) + MatchingEnableMigration = NewTaskQueueBoolSetting( + "matching.enableMigration", + true, + `Allows migration between v1 and v2 (fairness) task backlogs.`, + ) + MatchingPriorityLevels = NewTaskQueueIntSetting( + "matching.priorityLevels", + 5, + `Number of simple priority levels (requires new matcher)`, + ) + MatchingBacklogTaskForwardTimeout = NewTaskQueueDurationSetting( + "matching.backlogTaskForwardTimeout", + 60*time.Second, + `Timeout for forwarded backlog task (requires new matcher)`, + ) + MatchingForwardPollRetryMaxInterval = NewTaskQueueDurationSetting( + "matching.forwardPollRetryMaxInterval", + 10*time.Second, + `Max backoff interval when retrying a rate-limited ForwardPoll from a child partition`, + ) + MatchingFairnessCounter = NewTaskQueueTypedSetting( + "matching.fairnessCounter", + counter.DefaultCounterParams, + `Configuration for counter used in matching fairness.`, + ) + MatchingFairnessKeyRateLimitCacheSize = NewTaskQueueIntSetting( + "matching.fairnessKeyRateLimitCacheSize", + 2000, + "Cache size for fairness key rate limits.", + ) + MatchingMaxFairnessKeyWeightOverrides = NewTaskQueueIntSetting( + "matching.maxFairnessKeyWeightOverrides", + 1000, + "Maximum number of fairness key weight overrides that can be configured for a task queue at a time.", + ) + MatchingEnableWorkerPluginMetrics = NewGlobalBoolSetting( + "matching.enableWorkerPluginMetrics", + false, + `MatchingEnableWorkerPluginMetrics controls whether to export worker plugin metrics. +The metric has 2 dimensions: namespace_id and plugin_name. Disabled by default as this is +an optional feature and also requires a metrics collection system that can handle higher cardinalities.`, + ) + MatchingEnablePollerAutoscalingMetrics = NewGlobalBoolSetting( + "matching.enablePollerAutoscalingMetrics", + false, + `MatchingEnablePollerAutoscalingMetrics controls whether to export poller autoscaling metrics. +The metric has dimensions: namespace, taskqueue, and task_type (Workflow, Activity, Nexus). Disabled by +default as namespace cardinality can be high and this requires a metrics collection system that can handle it.`, + ) + MatchingAutoEnableV2 = NewTaskQueueBoolSetting( + "matching.autoEnableV2", + false, + `MatchingAutoEnableV2 automatically enables fairness when a fairness or priority key is seen`, + ) + + // Worker registry settings + MatchingWorkerRegistryNumBuckets = NewGlobalIntSetting( + "matching.workerRegistryNumBuckets", + 10, + `MatchingWorkerRegistryNumBuckets is the number of buckets used to partition the worker registry +keyspace for reduced lock contention. Changes require a restart to take effect.`, + ) + MatchingWorkerRegistryEntryTTL = NewGlobalDurationSetting( + "matching.workerRegistryEntryTTL", + 5*time.Minute, + `MatchingWorkerRegistryEntryTTL is the time after which worker heartbeat entries are considered expired +and eligible for eviction. Workers typically heartbeat every 30-60 seconds, so 5 minutes without a +heartbeat indicates the worker is likely dead.`, + ) + MatchingWorkerRegistryMinEvictAge = NewGlobalDurationSetting( + "matching.workerRegistryMinEvictAge", + 1*time.Minute, + `MatchingWorkerRegistryMinEvictAge is the minimum age of worker heartbeat entries before they can be +evicted due to capacity pressure. This prevents evicting recently-heartbeated workers even when +the registry is at capacity. Lower values help handle crash-looping workers more aggressively.`, + ) + MatchingWorkerRegistryMaxEntries = NewGlobalIntSetting( + "matching.workerRegistryMaxEntries", + 1_000_000, + `MatchingWorkerRegistryMaxEntries is the maximum number of worker heartbeat entries allowed across +all namespaces. When exceeded, the oldest entries (older than MinEvictAge) are evicted.`, + ) + MatchingWorkerRegistryEvictionInterval = NewGlobalDurationSetting( + "matching.workerRegistryEvictionInterval", + 1*time.Minute, + `MatchingWorkerRegistryEvictionInterval is how often the worker registry runs background eviction +to remove expired entries. Should be shorter than EntryTTL for timely cleanup. Lower values mean faster cleanup but more CPU overhead.`, + ) + MatchingSpreadRoutingBatchSize = NewGlobalTypedSettingWithConverter( + "matching.spreadRoutingBatchSize", + ConvertGradualChange[int](0), + StaticGradualChange[int](0), + `If non-zero, try to spread task queue partitions across matching nodes better, using the given batch size. +Don't change this on a live cluster without using the gradual change mechanism. +`, + ) // keys for history - // EnableReplicationStream turn on replication stream - EnableReplicationStream = "history.enableReplicationStream" - // EnableHistoryReplicationDLQV2 switches to the DLQ v2 implementation for history replication. See details in - // [go.temporal.io/server/common/persistence.QueueV2]. This feature is currently in development. Do NOT use it in - // production. - EnableHistoryReplicationDLQV2 = "history.enableHistoryReplicationDLQV2" - - // HistoryRPS is request rate per second for each history host - HistoryRPS = "history.rps" - // HistoryPersistenceMaxQPS is the max qps history host can query DB - HistoryPersistenceMaxQPS = "history.persistenceMaxQPS" - // HistoryPersistenceGlobalMaxQPS is the max qps history cluster can query DB - HistoryPersistenceGlobalMaxQPS = "history.persistenceGlobalMaxQPS" - // HistoryPersistenceNamespaceMaxQPS is the max qps each namespace on history host can query DB - // If value less or equal to 0, will fall back to HistoryPersistenceMaxQPS - HistoryPersistenceNamespaceMaxQPS = "history.persistenceNamespaceMaxQPS" - // HistoryPersistenceNamespaceMaxQPS is the max qps each namespace in history cluster can query DB - HistoryPersistenceGlobalNamespaceMaxQPS = "history.persistenceGlobalNamespaceMaxQPS" - // HistoryPersistencePerShardNamespaceMaxQPS is the max qps each namespace on a shard can query DB - HistoryPersistencePerShardNamespaceMaxQPS = "history.persistencePerShardNamespaceMaxQPS" - // HistoryEnablePersistencePriorityRateLimiting indicates if priority rate limiting is enabled in history persistence client - HistoryEnablePersistencePriorityRateLimiting = "history.enablePersistencePriorityRateLimiting" - // HistoryPersistenceDynamicRateLimitingParams is a map that contains all adjustable dynamic rate limiting params - // see DefaultDynamicRateLimitingParams for available options and defaults - HistoryPersistenceDynamicRateLimitingParams = "history.persistenceDynamicRateLimitingParams" - // HistoryLongPollExpirationInterval is the long poll expiration interval in the history service - HistoryLongPollExpirationInterval = "history.longPollExpirationInterval" - // HistoryCacheSizeBasedLimit if true, size of the history cache will be limited by HistoryCacheMaxSizeBytes - // and HistoryCacheHostLevelMaxSizeBytes. Otherwise, entry count in the history cache will be limited by - // HistoryCacheMaxSize and HistoryCacheHostLevelMaxSize. - HistoryCacheSizeBasedLimit = "history.cacheSizeBasedLimit" - // HistoryCacheInitialSize is initial size of history cache - HistoryCacheInitialSize = "history.cacheInitialSize" - // HistoryCacheMaxSize is the maximum number of entries in the shard level history cache - HistoryCacheMaxSize = "history.cacheMaxSize" - // HistoryCacheMaxSizeBytes is the maximum size of the shard level history cache in bytes. This is only used if - // HistoryCacheSizeBasedLimit is set to true. - HistoryCacheMaxSizeBytes = "history.cacheMaxSizeBytes" - // HistoryCacheTTL is TTL of history cache - HistoryCacheTTL = "history.cacheTTL" - // HistoryCacheNonUserContextLockTimeout controls how long non-user call (callerType != API or Operator) - // will wait on workflow lock acquisition. Requires service restart to take effect. - HistoryCacheNonUserContextLockTimeout = "history.cacheNonUserContextLockTimeout" - // EnableHostHistoryCache controls if the history cache is host level - EnableHostHistoryCache = "history.enableHostHistoryCache" - // HistoryCacheHostLevelMaxSize is the maximum number of entries in the host level history cache - HistoryCacheHostLevelMaxSize = "history.hostLevelCacheMaxSize" - // HistoryCacheHostLevelMaxSizeBytes is the maximum size of the host level history cache. This is only used if - // HistoryCacheSizeBasedLimit is set to true. - HistoryCacheHostLevelMaxSizeBytes = "history.hostLevelCacheMaxSizeBytes" - // EnableAPIGetCurrentRunIDLock controls if a lock should be acquired before getting current run ID for API requests - EnableAPIGetCurrentRunIDLock = "history.enableAPIGetCurrentRunIDLock" - // EnableMutableStateTransitionHistory controls whether to record state transition history in mutable state records. - // The feature is used in the hierarchical state machine framework and is considered unstable as the structure may - // change with the pending replication design. - EnableMutableStateTransitionHistory = "history.enableMutableStateTransitionHistory" - // HistoryStartupMembershipJoinDelay is the duration a history instance waits - // before joining membership after starting. - HistoryStartupMembershipJoinDelay = "history.startupMembershipJoinDelay" - // HistoryShutdownDrainDuration is the duration of traffic drain during shutdown - HistoryShutdownDrainDuration = "history.shutdownDrainDuration" - // XDCCacheMaxSizeBytes is max size of events cache in bytes - XDCCacheMaxSizeBytes = "history.xdcCacheMaxSizeBytes" - // EventsCacheMaxSizeBytes is max size of the shard level events cache in bytes - EventsCacheMaxSizeBytes = "history.eventsCacheMaxSizeBytes" - // EventsHostLevelCacheMaxSizeBytes is max size of the host level events cache in bytes - EventsHostLevelCacheMaxSizeBytes = "history.eventsHostLevelCacheMaxSizeBytes" - // EventsCacheTTL is TTL of events cache - EventsCacheTTL = "history.eventsCacheTTL" - // EnableHostLevelEventsCache controls if the events cache is host level - EnableHostLevelEventsCache = "history.enableHostLevelEventsCache" - // AcquireShardInterval is interval that timer used to acquire shard - AcquireShardInterval = "history.acquireShardInterval" - // AcquireShardConcurrency is number of goroutines that can be used to acquire shards in the shard controller. - AcquireShardConcurrency = "history.acquireShardConcurrency" - // ShardLingerOwnershipCheckQPS is the frequency to perform shard ownership - // checks while a shard is lingering. - ShardLingerOwnershipCheckQPS = "history.shardLingerOwnershipCheckQPS" - // ShardLingerTimeLimit configures if and for how long the shard controller - // will temporarily delay closing shards after a membership update, awaiting a - // shard ownership lost error from persistence. Not recommended with - // persistence layers that are missing AssertShardOwnership support. - // If set to zero, shards will not delay closing. - ShardLingerTimeLimit = "history.shardLingerTimeLimit" - // ShardOwnershipAssertionEnabled configures if the shard ownership is asserted - // for API requests when a NotFound or NamespaceNotFound error is returned from - // persistence. - // NOTE: Shard ownership assertion is not implemented by any persistence implementation - // in this codebase, because assertion is not needed for persistence implementation - // that guarantees read after write consistency. As a result, even if this config is - // enabled, it's a no-op. - ShardOwnershipAssertionEnabled = "history.shardOwnershipAssertionEnabled" - // HistoryClientOwnershipCachingEnabled configures if history clients try to cache - // shard ownership information, instead of checking membership for each request. - // Only inspected when an instance first creates a history client, so changes - // to this require a restart to take effect. - HistoryClientOwnershipCachingEnabled = "history.clientOwnershipCachingEnabled" - // ShardIOConcurrency controls the concurrency of persistence operations in shard context - ShardIOConcurrency = "history.shardIOConcurrency" - // StandbyClusterDelay is the artificial delay added to standby cluster's view of active cluster's time - StandbyClusterDelay = "history.standbyClusterDelay" - // StandbyTaskMissingEventsResendDelay is the amount of time standby cluster's will wait (if events are missing) - // before calling remote for missing events - StandbyTaskMissingEventsResendDelay = "history.standbyTaskMissingEventsResendDelay" - // StandbyTaskMissingEventsDiscardDelay is the amount of time standby cluster's will wait (if events are missing) - // before discarding the task - StandbyTaskMissingEventsDiscardDelay = "history.standbyTaskMissingEventsDiscardDelay" - // QueuePendingTaskCriticalCount is the max number of pending task in one queue - // before triggering queue slice splitting and unloading - QueuePendingTaskCriticalCount = "history.queuePendingTaskCriticalCount" - // QueueReaderStuckCriticalAttempts is the max number of task loading attempts for a certain task range - // before that task range is split into a separate slice to unblock loading for later range. - // currently only work for scheduled queues and the task range is 1s. - QueueReaderStuckCriticalAttempts = "history.queueReaderStuckCriticalAttempts" - // QueueCriticalSlicesCount is the max number of slices in one queue - // before force compacting slices - QueueCriticalSlicesCount = "history.queueCriticalSlicesCount" - // QueuePendingTaskMaxCount is the max number of task pending tasks in one queue before stop - // loading new tasks into memory. While QueuePendingTaskCriticalCount won't stop task loading - // for the entire queue but only trigger a queue action to unload tasks. Ideally this max count - // limit should not be hit and task unloading should happen once critical count is exceeded. But - // since queue action is async, we need this hard limit. - QueuePendingTaskMaxCount = "history.queuePendingTasksMaxCount" - // ContinueAsNewMinInterval is the minimal interval between continue_as_new executions. - // This is needed to prevent tight loop continue_as_new spin. Default is 1s. - ContinueAsNewMinInterval = "history.continueAsNewMinInterval" - - // TaskSchedulerEnableRateLimiter indicates if task scheduler rate limiter should be enabled - TaskSchedulerEnableRateLimiter = "history.taskSchedulerEnableRateLimiter" - // TaskSchedulerEnableRateLimiterShadowMode indicates if task scheduler rate limiter should run in shadow mode - // i.e. through rate limiter and emit metrics but do not actually block/throttle task scheduling - TaskSchedulerEnableRateLimiterShadowMode = "history.taskSchedulerEnableRateLimiterShadowMode" - // TaskSchedulerRateLimiterStartupDelay is the duration to wait after startup before enforcing task scheduler rate limiting - TaskSchedulerRateLimiterStartupDelay = "history.taskSchedulerRateLimiterStartupDelay" - // TaskSchedulerGlobalMaxQPS is the max qps all task schedulers in the cluster can schedule tasks - // If value less or equal to 0, will fall back to TaskSchedulerMaxQPS - TaskSchedulerGlobalMaxQPS = "history.taskSchedulerGlobalMaxQPS" - // TaskSchedulerMaxQPS is the max qps task schedulers on a host can schedule tasks - // If value less or equal to 0, will fall back to HistoryPersistenceMaxQPS - TaskSchedulerMaxQPS = "history.taskSchedulerMaxQPS" - // TaskSchedulerGlobalNamespaceMaxQPS is the max qps all task schedulers in the cluster can schedule tasks for a certain namespace - // If value less or equal to 0, will fall back to TaskSchedulerNamespaceMaxQPS - TaskSchedulerGlobalNamespaceMaxQPS = "history.taskSchedulerGlobalNamespaceMaxQPS" - // TaskSchedulerNamespaceMaxQPS is the max qps task schedulers on a host can schedule tasks for a certain namespace - // If value less or equal to 0, will fall back to HistoryPersistenceNamespaceMaxQPS - TaskSchedulerNamespaceMaxQPS = "history.taskSchedulerNamespaceMaxQPS" - - // TimerTaskBatchSize is batch size for timer processor to process tasks - TimerTaskBatchSize = "history.timerTaskBatchSize" - // TimerProcessorSchedulerWorkerCount is the number of workers in the host level task scheduler for timer processor - TimerProcessorSchedulerWorkerCount = "history.timerProcessorSchedulerWorkerCount" - // TimerProcessorSchedulerActiveRoundRobinWeights is the priority round robin weights used by timer task scheduler for active namespaces - TimerProcessorSchedulerActiveRoundRobinWeights = "history.timerProcessorSchedulerActiveRoundRobinWeights" - // TimerProcessorSchedulerStandbyRoundRobinWeights is the priority round robin weights used by timer task scheduler for standby namespaces - TimerProcessorSchedulerStandbyRoundRobinWeights = "history.timerProcessorSchedulerStandbyRoundRobinWeights" - // TimerProcessorUpdateAckInterval is update interval for timer processor - TimerProcessorUpdateAckInterval = "history.timerProcessorUpdateAckInterval" - // TimerProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient - TimerProcessorUpdateAckIntervalJitterCoefficient = "history.timerProcessorUpdateAckIntervalJitterCoefficient" - // TimerProcessorMaxPollRPS is max poll rate per second for timer processor - TimerProcessorMaxPollRPS = "history.timerProcessorMaxPollRPS" - // TimerProcessorMaxPollHostRPS is max poll rate per second for all timer processor on a host - TimerProcessorMaxPollHostRPS = "history.timerProcessorMaxPollHostRPS" - // TimerProcessorMaxPollInterval is max poll interval for timer processor - TimerProcessorMaxPollInterval = "history.timerProcessorMaxPollInterval" - // TimerProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient - TimerProcessorMaxPollIntervalJitterCoefficient = "history.timerProcessorMaxPollIntervalJitterCoefficient" - // TimerProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for timer processor - TimerProcessorPollBackoffInterval = "history.timerProcessorPollBackoffInterval" - // TimerProcessorMaxTimeShift is the max shift timer processor can have - TimerProcessorMaxTimeShift = "history.timerProcessorMaxTimeShift" - // TimerQueueMaxReaderCount is the max number of readers in one multi-cursor timer queue - TimerQueueMaxReaderCount = "history.timerQueueMaxReaderCount" - // RetentionTimerJitterDuration is a time duration jitter to distribute timer from T0 to T0 + jitter duration - RetentionTimerJitterDuration = "history.retentionTimerJitterDuration" - - // MemoryTimerProcessorSchedulerWorkerCount is the number of workers in the task scheduler for in memory timer processor. - MemoryTimerProcessorSchedulerWorkerCount = "history.memoryTimerProcessorSchedulerWorkerCount" - - // TransferTaskBatchSize is batch size for transferQueueProcessor - TransferTaskBatchSize = "history.transferTaskBatchSize" - // TransferProcessorMaxPollRPS is max poll rate per second for transferQueueProcessor - TransferProcessorMaxPollRPS = "history.transferProcessorMaxPollRPS" - // TransferProcessorMaxPollHostRPS is max poll rate per second for all transferQueueProcessor on a host - TransferProcessorMaxPollHostRPS = "history.transferProcessorMaxPollHostRPS" - // TransferProcessorSchedulerWorkerCount is the number of workers in the host level task scheduler for transferQueueProcessor - TransferProcessorSchedulerWorkerCount = "history.transferProcessorSchedulerWorkerCount" - // TransferProcessorSchedulerActiveRoundRobinWeights is the priority round robin weights used by transfer task scheduler for active namespaces - TransferProcessorSchedulerActiveRoundRobinWeights = "history.transferProcessorSchedulerActiveRoundRobinWeights" - // TransferProcessorSchedulerStandbyRoundRobinWeights is the priority round robin weights used by transfer task scheduler for standby namespaces - TransferProcessorSchedulerStandbyRoundRobinWeights = "history.transferProcessorSchedulerStandbyRoundRobinWeights" - // TransferProcessorMaxPollInterval max poll interval for transferQueueProcessor - TransferProcessorMaxPollInterval = "history.transferProcessorMaxPollInterval" - // TransferProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient - TransferProcessorMaxPollIntervalJitterCoefficient = "history.transferProcessorMaxPollIntervalJitterCoefficient" - // TransferProcessorUpdateAckInterval is update interval for transferQueueProcessor - TransferProcessorUpdateAckInterval = "history.transferProcessorUpdateAckInterval" - // TransferProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient - TransferProcessorUpdateAckIntervalJitterCoefficient = "history.transferProcessorUpdateAckIntervalJitterCoefficient" - // TransferProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for transferQueueProcessor - TransferProcessorPollBackoffInterval = "history.transferProcessorPollBackoffInterval" - // TransferProcessorEnsureCloseBeforeDelete means we ensure the execution is closed before we delete it - TransferProcessorEnsureCloseBeforeDelete = "history.transferProcessorEnsureCloseBeforeDelete" - // TransferQueueMaxReaderCount is the max number of readers in one multi-cursor transfer queue - TransferQueueMaxReaderCount = "history.transferQueueMaxReaderCount" - - // OutboundProcessorEnabled enables starting the outbound queue processor. - OutboundProcessorEnabled = "history.outboundProcessorEnabled" - // OutboundTaskBatchSize is batch size for outboundQueueFactory - OutboundTaskBatchSize = "history.outboundTaskBatchSize" - // OutboundProcessorMaxPollRPS is max poll rate per second for outboundQueueFactory - OutboundProcessorMaxPollRPS = "history.outboundProcessorMaxPollRPS" - // OutboundProcessorMaxPollHostRPS is max poll rate per second for all outboundQueueFactory on a host - OutboundProcessorMaxPollHostRPS = "history.outboundProcessorMaxPollHostRPS" - // OutboundProcessorUpdateShardTaskCount is update shard count for outboundQueueFactory - OutboundProcessorUpdateShardTaskCount = "history.outboundProcessorUpdateShardTaskCount" - // OutboundProcessorMaxPollInterval max poll interval for outboundQueueFactory - OutboundProcessorMaxPollInterval = "history.outboundProcessorMaxPollInterval" - // OutboundProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient - OutboundProcessorMaxPollIntervalJitterCoefficient = "history.outboundProcessorMaxPollIntervalJitterCoefficient" - // OutboundProcessorUpdateAckInterval is update interval for outboundQueueFactory - OutboundProcessorUpdateAckInterval = "history.outboundProcessorUpdateAckInterval" - // OutboundProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient - OutboundProcessorUpdateAckIntervalJitterCoefficient = "history.outboundProcessorUpdateAckIntervalJitterCoefficient" - // OutboundProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for outboundQueueFactory - OutboundProcessorPollBackoffInterval = "history.outboundProcessorPollBackoffInterval" - // OutboundQueueMaxReaderCount is the max number of readers in one multi-cursor outbound queue - OutboundQueueMaxReaderCount = "history.outboundQueueMaxReaderCount" - - // VisibilityTaskBatchSize is batch size for visibilityQueueProcessor - VisibilityTaskBatchSize = "history.visibilityTaskBatchSize" - // VisibilityProcessorMaxPollRPS is max poll rate per second for visibilityQueueProcessor - VisibilityProcessorMaxPollRPS = "history.visibilityProcessorMaxPollRPS" - // VisibilityProcessorMaxPollHostRPS is max poll rate per second for all visibilityQueueProcessor on a host - VisibilityProcessorMaxPollHostRPS = "history.visibilityProcessorMaxPollHostRPS" - // VisibilityProcessorSchedulerWorkerCount is the number of workers in the host level task scheduler for visibilityQueueProcessor - VisibilityProcessorSchedulerWorkerCount = "history.visibilityProcessorSchedulerWorkerCount" - // VisibilityProcessorSchedulerActiveRoundRobinWeights is the priority round robin weights by visibility task scheduler for active namespaces - VisibilityProcessorSchedulerActiveRoundRobinWeights = "history.visibilityProcessorSchedulerActiveRoundRobinWeights" - // VisibilityProcessorSchedulerStandbyRoundRobinWeights is the priority round robin weights by visibility task scheduler for standby namespaces - VisibilityProcessorSchedulerStandbyRoundRobinWeights = "history.visibilityProcessorSchedulerStandbyRoundRobinWeights" - // VisibilityProcessorMaxPollInterval max poll interval for visibilityQueueProcessor - VisibilityProcessorMaxPollInterval = "history.visibilityProcessorMaxPollInterval" - // VisibilityProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient - VisibilityProcessorMaxPollIntervalJitterCoefficient = "history.visibilityProcessorMaxPollIntervalJitterCoefficient" - // VisibilityProcessorUpdateAckInterval is update interval for visibilityQueueProcessor - VisibilityProcessorUpdateAckInterval = "history.visibilityProcessorUpdateAckInterval" - // VisibilityProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient - VisibilityProcessorUpdateAckIntervalJitterCoefficient = "history.visibilityProcessorUpdateAckIntervalJitterCoefficient" - // VisibilityProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for visibilityQueueProcessor - VisibilityProcessorPollBackoffInterval = "history.visibilityProcessorPollBackoffInterval" - // VisibilityProcessorEnsureCloseBeforeDelete means we ensure the visibility of an execution is closed before we delete its visibility records - VisibilityProcessorEnsureCloseBeforeDelete = "history.visibilityProcessorEnsureCloseBeforeDelete" - // VisibilityProcessorEnableCloseWorkflowCleanup to clean up the mutable state after visibility - // close task has been processed. Must use Elasticsearch as visibility store, otherwise workflow - // data (eg: search attributes) will be lost after workflow is closed. - VisibilityProcessorEnableCloseWorkflowCleanup = "history.visibilityProcessorEnableCloseWorkflowCleanup" - // VisibilityQueueMaxReaderCount is the max number of readers in one multi-cursor visibility queue - VisibilityQueueMaxReaderCount = "history.visibilityQueueMaxReaderCount" - - // ArchivalTaskBatchSize is batch size for archivalQueueProcessor - ArchivalTaskBatchSize = "history.archivalTaskBatchSize" - // ArchivalProcessorMaxPollRPS is max poll rate per second for archivalQueueProcessor - ArchivalProcessorMaxPollRPS = "history.archivalProcessorMaxPollRPS" - // ArchivalProcessorMaxPollHostRPS is max poll rate per second for all archivalQueueProcessor on a host - ArchivalProcessorMaxPollHostRPS = "history.archivalProcessorMaxPollHostRPS" - // ArchivalProcessorSchedulerWorkerCount is the number of workers in the host level task scheduler for - // archivalQueueProcessor - ArchivalProcessorSchedulerWorkerCount = "history.archivalProcessorSchedulerWorkerCount" - // ArchivalProcessorMaxPollInterval max poll interval for archivalQueueProcessor - ArchivalProcessorMaxPollInterval = "history.archivalProcessorMaxPollInterval" - // ArchivalProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient - ArchivalProcessorMaxPollIntervalJitterCoefficient = "history.archivalProcessorMaxPollIntervalJitterCoefficient" - // ArchivalProcessorUpdateAckInterval is update interval for archivalQueueProcessor - ArchivalProcessorUpdateAckInterval = "history.archivalProcessorUpdateAckInterval" - // ArchivalProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient - ArchivalProcessorUpdateAckIntervalJitterCoefficient = "history.archivalProcessorUpdateAckIntervalJitterCoefficient" - // ArchivalProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for - // archivalQueueProcessor - ArchivalProcessorPollBackoffInterval = "history.archivalProcessorPollBackoffInterval" - // ArchivalProcessorArchiveDelay is the delay before archivalQueueProcessor starts to process archival tasks - ArchivalProcessorArchiveDelay = "history.archivalProcessorArchiveDelay" - // ArchivalBackendMaxRPS is the maximum rate of requests per second to the archival backend - ArchivalBackendMaxRPS = "history.archivalBackendMaxRPS" - // ArchivalQueueMaxReaderCount is the max number of readers in one multi-cursor archival queue - ArchivalQueueMaxReaderCount = "history.archivalQueueMaxReaderCount" - - // WorkflowExecutionMaxInFlightUpdates is the max number of updates that can be in-flight (admitted but not yet completed) for any given workflow execution. - WorkflowExecutionMaxInFlightUpdates = "history.maxInFlightUpdates" - // WorkflowExecutionMaxTotalUpdates is the max number of updates that any given workflow execution can receive. - WorkflowExecutionMaxTotalUpdates = "history.maxTotalUpdates" - - // ReplicatorTaskBatchSize is batch size for ReplicatorProcessor - ReplicatorTaskBatchSize = "history.replicatorTaskBatchSize" - // ReplicatorMaxSkipTaskCount is maximum number of tasks that can be skipped during tasks pagination due to not meeting filtering conditions (e.g. missed namespace). - ReplicatorMaxSkipTaskCount = "history.replicatorMaxSkipTaskCount" - // ReplicatorProcessorMaxPollInterval is max poll interval for ReplicatorProcessor - ReplicatorProcessorMaxPollInterval = "history.replicatorProcessorMaxPollInterval" - // ReplicatorProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient - ReplicatorProcessorMaxPollIntervalJitterCoefficient = "history.replicatorProcessorMaxPollIntervalJitterCoefficient" - // MaximumBufferedEventsBatch is the maximum permissible number of buffered events for any given mutable state. - MaximumBufferedEventsBatch = "history.maximumBufferedEventsBatch" - // MaximumBufferedEventsSizeInBytes is the maximum permissible size of all buffered events for any given mutable - // state. The total size is determined by the sum of the size, in bytes, of each HistoryEvent proto. - MaximumBufferedEventsSizeInBytes = "history.maximumBufferedEventsSizeInBytes" - // MaximumSignalsPerExecution is max number of signals supported by single execution - MaximumSignalsPerExecution = "history.maximumSignalsPerExecution" - // ShardUpdateMinInterval is the minimal time interval which the shard info can be updated - ShardUpdateMinInterval = "history.shardUpdateMinInterval" - // ShardUpdateMinTasksCompleted is the minimum number of tasks which must be completed (across all queues) before the shard info can be updated. - // Note that once history.shardUpdateMinInterval amount of time has passed we'll update the shard info regardless of the number of tasks completed. - // When the this config is zero or lower we will only update shard info at most once every history.shardUpdateMinInterval. - ShardUpdateMinTasksCompleted = "history.shardUpdateMinTasksCompleted" - // ShardSyncMinInterval is the minimal time interval which the shard info should be sync to remote - ShardSyncMinInterval = "history.shardSyncMinInterval" - // EmitShardLagLog whether emit the shard lag log - EmitShardLagLog = "history.emitShardLagLog" - // DefaultEventEncoding is the encoding type for history events - DefaultEventEncoding = "history.defaultEventEncoding" - // DefaultActivityRetryPolicy represents the out-of-box retry policy for activities where - // the user has not specified an explicit RetryPolicy - DefaultActivityRetryPolicy = "history.defaultActivityRetryPolicy" - // DefaultWorkflowRetryPolicy represents the out-of-box retry policy for unset fields - // where the user has set an explicit RetryPolicy, but not specified all the fields - DefaultWorkflowRetryPolicy = "history.defaultWorkflowRetryPolicy" - // HistoryMaxAutoResetPoints is the key for max number of auto reset points stored in mutableState - HistoryMaxAutoResetPoints = "history.historyMaxAutoResetPoints" - // EnableParentClosePolicy whether to ParentClosePolicy - EnableParentClosePolicy = "history.enableParentClosePolicy" - // ParentClosePolicyThreshold decides that parent close policy will be processed by sys workers(if enabled) if - // the number of children greater than or equal to this threshold - ParentClosePolicyThreshold = "history.parentClosePolicyThreshold" - // NumParentClosePolicySystemWorkflows is key for number of parentClosePolicy system workflows running in total - NumParentClosePolicySystemWorkflows = "history.numParentClosePolicySystemWorkflows" - // HistoryThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger - HistoryThrottledLogRPS = "history.throttledLogRPS" - // StickyTTL is to expire a sticky taskqueue if no update more than this duration - StickyTTL = "history.stickyTTL" - // WorkflowTaskHeartbeatTimeout for workflow task heartbeat - WorkflowTaskHeartbeatTimeout = "history.workflowTaskHeartbeatTimeout" - // WorkflowTaskCriticalAttempts is the number of attempts for a workflow task that's regarded as critical - WorkflowTaskCriticalAttempts = "history.workflowTaskCriticalAttempt" - // WorkflowTaskRetryMaxInterval is the maximum interval added to a workflow task's startToClose timeout for slowing down retry - WorkflowTaskRetryMaxInterval = "history.workflowTaskRetryMaxInterval" - // DefaultWorkflowTaskTimeout for a workflow task - DefaultWorkflowTaskTimeout = "history.defaultWorkflowTaskTimeout" - // SkipReapplicationByNamespaceID is whether skipping a event re-application for a namespace - SkipReapplicationByNamespaceID = "history.SkipReapplicationByNamespaceID" - // StandbyTaskReReplicationContextTimeout is the context timeout for standby task re-replication - StandbyTaskReReplicationContextTimeout = "history.standbyTaskReReplicationContextTimeout" - // MaxBufferedQueryCount indicates max buffer query count - MaxBufferedQueryCount = "history.MaxBufferedQueryCount" - // MutableStateChecksumGenProbability is the probability [0-100] that checksum will be generated for mutable state - MutableStateChecksumGenProbability = "history.mutableStateChecksumGenProbability" - // MutableStateChecksumVerifyProbability is the probability [0-100] that checksum will be verified for mutable state - MutableStateChecksumVerifyProbability = "history.mutableStateChecksumVerifyProbability" - // MutableStateChecksumInvalidateBefore is the epoch timestamp before which all checksums are to be discarded - MutableStateChecksumInvalidateBefore = "history.mutableStateChecksumInvalidateBefore" - - // ReplicationTaskFetcherParallelism determines how many go routines we spin up for fetching tasks - ReplicationTaskFetcherParallelism = "history.ReplicationTaskFetcherParallelism" - // ReplicationTaskFetcherAggregationInterval determines how frequently the fetch requests are sent - ReplicationTaskFetcherAggregationInterval = "history.ReplicationTaskFetcherAggregationInterval" - // ReplicationTaskFetcherTimerJitterCoefficient is the jitter for fetcher timer - ReplicationTaskFetcherTimerJitterCoefficient = "history.ReplicationTaskFetcherTimerJitterCoefficient" - // ReplicationTaskFetcherErrorRetryWait is the wait time when fetcher encounters error - ReplicationTaskFetcherErrorRetryWait = "history.ReplicationTaskFetcherErrorRetryWait" - // ReplicationTaskProcessorErrorRetryWait is the initial retry wait when we see errors in applying replication tasks - ReplicationTaskProcessorErrorRetryWait = "history.ReplicationTaskProcessorErrorRetryWait" - // ReplicationTaskProcessorErrorRetryBackoffCoefficient is the retry wait backoff time coefficient - ReplicationTaskProcessorErrorRetryBackoffCoefficient = "history.ReplicationTaskProcessorErrorRetryBackoffCoefficient" - // ReplicationTaskProcessorErrorRetryMaxInterval is the retry wait backoff max duration - ReplicationTaskProcessorErrorRetryMaxInterval = "history.ReplicationTaskProcessorErrorRetryMaxInterval" - // ReplicationTaskProcessorErrorRetryMaxAttempts is the max retry attempts for applying replication tasks - ReplicationTaskProcessorErrorRetryMaxAttempts = "history.ReplicationTaskProcessorErrorRetryMaxAttempts" - // ReplicationTaskProcessorErrorRetryExpiration is the max retry duration for applying replication tasks - ReplicationTaskProcessorErrorRetryExpiration = "history.ReplicationTaskProcessorErrorRetryExpiration" - // ReplicationTaskProcessorNoTaskInitialWait is the wait time when not ask is returned - ReplicationTaskProcessorNoTaskInitialWait = "history.ReplicationTaskProcessorNoTaskInitialWait" - // ReplicationTaskProcessorCleanupInterval determines how frequently the cleanup replication queue - ReplicationTaskProcessorCleanupInterval = "history.ReplicationTaskProcessorCleanupInterval" - // ReplicationTaskProcessorCleanupJitterCoefficient is the jitter for cleanup timer - ReplicationTaskProcessorCleanupJitterCoefficient = "history.ReplicationTaskProcessorCleanupJitterCoefficient" - // ReplicationTaskProcessorStartWait is the wait time before each task processing batch - ReplicationTaskProcessorStartWait = "history.ReplicationTaskProcessorStartWait" - // ReplicationTaskProcessorHostQPS is the qps of task processing rate limiter on host level - ReplicationTaskProcessorHostQPS = "history.ReplicationTaskProcessorHostQPS" - // ReplicationTaskProcessorShardQPS is the qps of task processing rate limiter on shard level - ReplicationTaskProcessorShardQPS = "history.ReplicationTaskProcessorShardQPS" - // ReplicationEnableDLQMetrics is the flag to emit DLQ metrics - ReplicationEnableDLQMetrics = "history.ReplicationEnableDLQMetrics" - // ReplicationEnableUpdateWithNewTaskMerge is the flag controlling whether replication task merging logic - // should be enabled for non continuedAsNew workflow UpdateWithNew case. - ReplicationEnableUpdateWithNewTaskMerge = "history.ReplicationEnableUpdateWithNewTaskMerge" - // HistoryTaskDLQEnabled enables the history task DLQ. This applies to internal tasks like transfer and timer tasks. - // Do not turn this on if you aren't using Cassandra as the history task DLQ is not implemented for other databases. - HistoryTaskDLQEnabled = "history.TaskDLQEnabled" - // HistoryTaskDLQUnexpectedErrorAttempts is the number of task execution attempts before sending the task to DLQ. - HistoryTaskDLQUnexpectedErrorAttempts = "history.TaskDLQUnexpectedErrorAttempts" - // HistoryTaskDLQInternalErrors causes history task processing to send tasks failing with serviceerror.Internal to - // the dlq (or will drop them if not enabled) - HistoryTaskDLQInternalErrors = "history.TaskDLQInternalErrors" - // HistoryTaskDLQErrorPattern specifies a regular expression. If a task processing error matches with this regex, - // that task will be sent to DLQ. - HistoryTaskDLQErrorPattern = "history.TaskDLQErrorPattern" - - // ReplicationStreamSyncStatusDuration sync replication status duration - ReplicationStreamSyncStatusDuration = "history.ReplicationStreamSyncStatusDuration" - // ReplicationStreamMinReconnectDuration minimal replication stream reconnection duration - ReplicationStreamMinReconnectDuration = "history.ReplicationStreamMinReconnectDuration" - // ReplicationProcessorSchedulerQueueSize is the replication task executor queue size - ReplicationProcessorSchedulerQueueSize = "history.ReplicationProcessorSchedulerQueueSize" - // ReplicationProcessorSchedulerWorkerCount is the replication task executor worker count - ReplicationProcessorSchedulerWorkerCount = "history.ReplicationProcessorSchedulerWorkerCount" - // EnableEagerNamespaceRefresher is a feature flag for eagerly refresh namespace during processing replication task - EnableEagerNamespaceRefresher = "history.EnableEagerNamespaceRefresher" - // EnableReplicationTaskBatching is a feature flag for batching replicate history event task - EnableReplicationTaskBatching = "history.EnableReplicationTaskBatching" - // EnableReplicateLocalGeneratedEvents is a feature flag for replicating locally generated events - EnableReplicateLocalGeneratedEvents = "history.EnableReplicateLocalGeneratedEvents" + EnableReplicationStream = NewGlobalBoolSetting( + "history.enableReplicationStream", + true, + `EnableReplicationStream turn on replication stream`, + ) + EnableCloseInboundReplicationStreamOnShutdown = NewGlobalBoolSetting( + "history.enableCloseInboundReplicationStreamOnShutdown", + true, + `EnableCloseInboundReplicationStreamOnShutdown closes inbound replication streams on shutdown, signaling the remote sender to stop. Disable if this causes unexpected issues during rolling restarts.`, + ) + EnableSeparateReplicationEnableFlag = NewGlobalBoolSetting( + "history.enableSeparateReplicationEnableFlag", + false, + `EnableSeparateReplicationEnableFlag controls whether to use the new ReplicationEnabled flag to control replication streams separately from cluster connectivity. When false, falls back to using only the Enabled flag for both connectivity and replication.`, + ) + EnableHistoryReplicationDLQV2 = NewGlobalBoolSetting( + "history.enableHistoryReplicationDLQV2", + true, + `EnableHistoryReplicationDLQV2 switches to the DLQ v2 implementation for history replication. See details in +[go.temporal.io/server/common/persistence.QueueV2]`, + ) + + EnableDeleteWorkflowExecutionReplication = NewGlobalBoolSetting( + "history.enableDeleteWorkflowExecutionReplication", + false, + `EnableDeleteWorkflowExecutionReplication controls whether a replication task is generated when a workflow +execution is deleted. When enabled, workflow deletions on the active cluster will be replicated to passive clusters.`, + ) + + HistoryRPS = NewGlobalIntSetting( + "history.rps", + 3000, + `HistoryRPS is request rate per second for each history host`, + ) + HistoryNamespaceRPS = NewNamespaceIntSetting( + "history.namespaceRPS", + 0, + `HistoryNamespaceRPS is namespace rate limit per second for each history host. +If value less or equal to 0, will fall back to HistoryRPS`, + ) + EnableHistoryNamespaceFairness = NewGlobalBoolSetting( + "history.enableNamespaceFairness", + false, + `EnableHistoryNamespaceFairness turns on per-namespace fair-share demotion in the history host RPS rate limiter. +Requests from namespaces exceeding their fair share (computed from scaleFactor and the namespace's frontend cluster-wide +RPS budget) are routed to a lower-priority bucket`, + ) + HistoryNamespaceFairShareMultiplier = NewGlobalFloatSetting( + "history.namespaceFairShareMultiplier", + 1.0, + `HistoryNamespaceFairShareMultiplier scales the per-namespace fair share used by the history host RPS rate limiter. +share(ns) = scaleFactor * FrontendGlobalNamespaceRPS(ns) * HistoryNamespaceFairShareMultiplier`, + ) + HistoryPersistenceMaxQPS = NewGlobalIntSetting( + "history.persistenceMaxQPS", + 9000, + `HistoryPersistenceMaxQPS is the max qps history host can query DB`, + ) + HistoryPersistenceGlobalMaxQPS = NewGlobalIntSetting( + "history.persistenceGlobalMaxQPS", + 0, + `HistoryPersistenceGlobalMaxQPS is the max qps history cluster can query DB`, + ) + HistoryPersistenceNamespaceMaxQPS = NewNamespaceIntSetting( + "history.persistenceNamespaceMaxQPS", + 0, + `HistoryPersistenceNamespaceMaxQPS is the max qps each namespace on history host can query DB +If value less or equal to 0, will fall back to HistoryPersistenceMaxQPS`, + ) + HistoryPersistenceGlobalNamespaceMaxQPS = NewNamespaceIntSetting( + "history.persistenceGlobalNamespaceMaxQPS", + 0, + `HistoryPersistenceNamespaceMaxQPS is the max qps each namespace in history cluster can query DB`, + ) + HistoryPersistencePerShardNamespaceMaxQPS = NewNamespaceIntSetting( + "history.persistencePerShardNamespaceMaxQPS", + 0, + `HistoryPersistencePerShardNamespaceMaxQPS is the max qps each namespace on a shard can query DB`, + ) + HistoryPersistenceDynamicRateLimitingParams = NewGlobalTypedSetting( + "history.persistenceDynamicRateLimitingParams", + DefaultDynamicRateLimitingParams, + `HistoryPersistenceDynamicRateLimitingParams is a struct that contains all adjustable dynamic rate limiting params. +Fields: Enabled, RefreshInterval, LatencyThreshold, ErrorThreshold, RateBackoffStepSize, RateIncreaseStepSize, RateMultiMin, RateMultiMax. +See DynamicRateLimitingParams comments for more details.`, + ) + EnableBestEffortDeleteTasksOnWorkflowUpdate = NewGlobalBoolSetting( + "history.enableBestEffortDeleteTasksOnWorkflowUpdate", + false, + `Enable deletion of requested history tasks (e.g., WFT timeout tasks) right after a successful UpdateWorkflowExecution. + WARNING: Turning on this config can create a large number of tombstones in cassandra and degrade performance, use with caution.`, + ) + HistoryLongPollExpirationInterval = NewNamespaceDurationSetting( + "history.longPollExpirationInterval", + time.Second*20, + `HistoryLongPollExpirationInterval is the long poll expiration interval in the history service`, + ) + HistoryCacheSizeBasedLimit = NewGlobalBoolSetting( + "history.cacheSizeBasedLimit", + false, + `HistoryCacheSizeBasedLimit if true, size of the history cache will be limited by HistoryCacheMaxSizeBytes +and HistoryCacheHostLevelMaxSizeBytes. Otherwise, entry count in the history cache will be limited by +HistoryCacheMaxSize and HistoryCacheHostLevelMaxSize. Requires service restart to take effect.`, + ) + HistoryCacheTTL = NewGlobalDurationSetting( + "history.cacheTTL", + time.Hour, + `HistoryCacheTTL is TTL of history cache. Requires service restart to take effect.`, + ) + HistoryCacheNonUserContextLockTimeout = NewGlobalDurationSetting( + "history.cacheNonUserContextLockTimeout", + 500*time.Millisecond, + `HistoryCacheNonUserContextLockTimeout controls how long non-user call (callerType != API or Operator) +will wait on workflow lock acquisition. Requires service restart to take effect.`, + ) + HistoryCacheHostLevelMaxSize = NewGlobalIntSetting( + "history.hostLevelCacheMaxSize", + 128000, + `HistoryCacheHostLevelMaxSize is the maximum number of entries in the host level history cache. +Requires service restart to take effect.`, + ) + HistoryCacheHostLevelMaxSizeBytes = NewGlobalIntSetting( + "history.hostLevelCacheMaxSizeBytes", + 256000*4*1024, + `HistoryCacheHostLevelMaxSizeBytes is the maximum size of the host level history cache. This is only used if +HistoryCacheSizeBasedLimit is set to true. Requires service restart to take effect.`, + ) + HistoryCacheBackgroundEvict = NewGlobalTypedSetting( + "history.cacheBackgroundEvict", + DefaultHistoryCacheBackgroundEvictSettings, + `HistoryCacheBackgroundEvict configures background processing to purge expired entries from the history cache. +Requires service restart to take effect.`, + ) + EnableWorkflowExecutionTimeoutTimer = NewGlobalBoolSetting( + "history.enableWorkflowExecutionTimeoutTimer", + true, + `EnableWorkflowExecutionTimeoutTimer controls whether to enable the new logic for generating a workflow execution +timeout timer when execution timeout is specified when starting a workflow.`, + ) + EnableUpdateWorkflowModeIgnoreCurrent = NewGlobalBoolSetting( + "history.enableUpdateWorkflowModeIgnoreCurrent", + true, + `EnableUpdateWorkflowModeIgnoreCurrent controls whether to enable the new logic for updating closed workflow execution +by mutation using UpdateWorkflowModeIgnoreCurrent`, + ) + EnableTransitionHistory = NewNamespaceBoolSetting( + "history.enableTransitionHistory", + true, + `EnableTransitionHistory controls whether to enable the new logic for recording the history for each state transition.`, + ) + HistoryStartupMembershipJoinDelay = NewGlobalDurationSetting( + "history.startupMembershipJoinDelay", + 0*time.Second, + `HistoryStartupMembershipJoinDelay is the duration a history instance waits +before joining membership after starting.`, + ) + HistoryAlignMembershipChange = NewGlobalDurationSetting( + "history.alignMembershipChange", + 0*time.Second, + `HistoryAlignMembershipChange is a duration to align history's membership changes to. +This can help reduce effects of shard movement.`, + ) + HistoryShutdownDrainDuration = NewGlobalDurationSetting( + "history.shutdownDrainDuration", + 0*time.Second, + `HistoryShutdownDrainDuration is the duration of traffic drain during shutdown`, + ) + XDCCacheMaxSizeBytes = NewGlobalIntSetting( + "history.xdcCacheMaxSizeBytes", + 8*1024*1024, + `XDCCacheMaxSizeBytes is max size of events cache in bytes`, + ) + EventsCacheMaxSizeBytes = NewGlobalIntSetting( + "history.eventsCacheMaxSizeBytes", + 512*1024, + `EventsCacheMaxSizeBytes is max size of the shard level events cache in bytes. Requires service restart to take effect.`, + ) + EventsHostLevelCacheMaxSizeBytes = NewGlobalIntSetting( + "history.eventsHostLevelCacheMaxSizeBytes", + 512*512*1024, + `EventsHostLevelCacheMaxSizeBytes is max size of the host level events cache in bytes. Requires service restart to take effect.`, + ) + EventsCacheTTL = NewGlobalDurationSetting( + "history.eventsCacheTTL", + time.Hour, + `EventsCacheTTL is TTL of events cache. Requires service restart to take effect.`, + ) + EnableHostLevelEventsCache = NewGlobalBoolSetting( + "history.enableHostLevelEventsCache", + false, + `EnableHostLevelEventsCache controls if the events cache is host level. Requires service restart to take effect.`, + ) + AcquireShardInterval = NewGlobalDurationSetting( + "history.acquireShardInterval", + time.Minute, + `AcquireShardInterval is interval that timer used to acquire shard`, + ) + AcquireShardConcurrency = NewGlobalIntSetting( + "history.acquireShardConcurrency", + 10, + `AcquireShardConcurrency is number of goroutines that can be used to acquire shards in the shard controller.`, + ) + ShardLingerOwnershipCheckQPS = NewGlobalIntSetting( + "history.shardLingerOwnershipCheckQPS", + 4, + `ShardLingerOwnershipCheckQPS is the frequency to perform shard ownership +checks while a shard is lingering.`, + ) + ShardLingerTimeLimit = NewGlobalDurationSetting( + "history.shardLingerTimeLimit", + 0, + `ShardLingerTimeLimit configures if and for how long the shard controller +will temporarily delay closing shards after a membership update, awaiting a +shard ownership lost error from persistence. If set to zero, shards will not delay closing. +Do NOT use non-zero value with persistence layers that are missing AssertShardOwnership support.`, + ) + ShardFinalizerTimeout = NewGlobalDurationSetting( + "history.shardFinalizerTimeout", + 2*time.Second, + `ShardFinalizerTimeout configures if and for how long the shard will attempt +to cleanup any of its associated data, such as workflow contexts. If set to zero, the finalizer is disabled.`, + ) + HistoryClientOwnershipCachingEnabled = NewGlobalBoolSetting( + "history.clientOwnershipCachingEnabled", + false, + `HistoryClientOwnershipCachingEnabled configures if history clients try to cache +shard ownership information, instead of checking membership for each request. +Only inspected when an instance first creates a history client, so changes +to this require a restart to take effect.`, + ) + HistoryClientOwnershipCachingStaleTTL = NewGlobalDurationSetting( + "history.clientOwnershipCachingUnusedTTL", + 30*time.Second, + `HistoryClientOwnershipCachingStaleTTL, if non-zero, configures the TTL +for cached shard ownership entries after a membership update.`, + ) + ShardIOConcurrency = NewGlobalIntSetting( + "history.shardIOConcurrency", + 1, + `ShardIOConcurrency controls the concurrency of persistence operations in shard context`, + ) + ShardIOTimeout = NewGlobalDurationSetting( + "history.shardIOTimeout", + 5*time.Second*debug.TimeoutMultiplier, + `ShardIOTimeout sets the timeout for persistence operations in the shard context`, + ) + StandbyClusterDelay = NewGlobalDurationSetting( + "history.standbyClusterDelay", + 5*time.Minute, + `StandbyClusterDelay is the artificial delay added to standby cluster's view of active cluster's time`, + ) + StandbyTaskMissingEventsResendDelay = NewTaskTypeDurationSetting( + "history.standbyTaskMissingEventsResendDelay", + 10*time.Minute, + `StandbyTaskMissingEventsResendDelay is the amount of time standby cluster's will wait (if events are missing) +before calling remote for missing events`, + ) + StandbyTaskMissingEventsDiscardDelay = NewTaskTypeDurationSetting( + "history.standbyTaskMissingEventsDiscardDelay", + 15*time.Minute, + `StandbyTaskMissingEventsDiscardDelay is the amount of time standby cluster's will wait (if events are missing) +before discarding the task`, + ) + ChasmStandbyTaskDiscardDelay = NewChasmTaskTypeDurationSetting( + "history.ChasmStandbyTaskDiscardDelay", + 24*time.Hour, + `ChasmStandbyTaskDiscardDelay is the amount of time standby cluster will wait +before discarding a CHASM task. Configurable per RegistrableTask type (e.g. "activity.dispatch"). +The default is intentionally much higher than the non CHASM standby discard delay because +discarding a CHASM task can leave the execution in a stuck state after failover. Task types +that can be safely offloaded should be configured with a shorter delay.`, + ) + QueuePendingTaskCriticalCount = NewGlobalIntSetting( + "history.queuePendingTaskCriticalCount", + 9000, + `Max number of pending tasks in a history queue before triggering slice splitting and unloading. +NOTE: The outbound queue has a separate configuration: outboundQueuePendingTaskCriticalCount.`, + ) + QueueReaderStuckCriticalAttempts = NewGlobalIntSetting( + "history.queueReaderStuckCriticalAttempts", + 3, + `QueueReaderStuckCriticalAttempts is the max number of task loading attempts for a certain task range +before that task range is split into a separate slice to unblock loading for later range. +currently only work for scheduled queues and the task range is 1s.`, + ) + QueueCriticalSlicesCount = NewGlobalIntSetting( + "history.queueCriticalSlicesCount", + 50, + `QueueCriticalSlicesCount is the max number of slices in one queue +before force compacting slices`, + ) + QueuePendingTaskMaxCount = NewGlobalIntSetting( + "history.queuePendingTasksMaxCount", + 10000, + `The max number of task pending tasks in a history queue before stopping loading new tasks into memory. This +limit is in addition to queuePendingTaskCriticalCount which controls when to unload already loaded tasks but doesn't +prevent loading new tasks. Ideally this max count limit should not be hit and task unloading should happen once critical +count is exceeded. But since queue action is async, we need this hard limit. +NOTE: The outbound queue has a separate configuration: outboundQueuePendingTaskMaxCount. +`, + ) + QueueMaxPredicateSize = NewGlobalIntSetting( + "history.queueMaxPredicateSize", + 10*1024, + `The max size of the multi-cursor predicate structure stored in the shard info record. 0 is considered +unlimited. When the predicate size is surpassed for a given scope, the predicate is converted to a universal predicate, +which causes all tasks in the scope's range to eventually be reprocessed without applying any filtering logic. +NOTE: The outbound queue has a separate configuration: outboundQueueMaxPredicateSize. +`, + ) + QueueMoveGroupTaskCountBase = NewGlobalIntSetting( + "history.queueMoveGroupTaskCountBase", + 500, + `The base number of pending tasks count for a task group to be moved to the next level reader. +The actual count is calculated as base * (multiplier ^ level)`, + ) + QueueMoveGroupTaskCountMultiplier = NewGlobalFloatSetting( + "history.queueMoveGroupTaskCountMultiplier", + 3.0, + `The multiplier used to calculate the number of pending tasks for a task group to be moved to the next level reader. +The actual count is calculated as base * (multiplier ^ level)`, + ) + + TaskSchedulerEnableRateLimiter = NewGlobalBoolSetting( + "history.taskSchedulerEnableRateLimiter", + false, + `TaskSchedulerEnableRateLimiter indicates if task scheduler rate limiter should be enabled`, + ) + TaskSchedulerEnableRateLimiterShadowMode = NewGlobalBoolSetting( + "history.taskSchedulerEnableRateLimiterShadowMode", + true, + `TaskSchedulerEnableRateLimiterShadowMode indicates if task scheduler rate limiter should run in shadow mode +i.e. through rate limiter and emit metrics but do not actually block/throttle task scheduling`, + ) + TaskSchedulerRateLimiterStartupDelay = NewGlobalDurationSetting( + "history.taskSchedulerRateLimiterStartupDelay", + 5*time.Second, + `TaskSchedulerRateLimiterStartupDelay is the duration to wait after startup before enforcing task scheduler rate limiting`, + ) + TaskSchedulerGlobalMaxQPS = NewGlobalIntSetting( + "history.taskSchedulerGlobalMaxQPS", + 0, + `TaskSchedulerGlobalMaxQPS is the max qps all task schedulers in the cluster can schedule tasks +If value less or equal to 0, will fall back to TaskSchedulerMaxQPS`, + ) + TaskSchedulerMaxQPS = NewGlobalIntSetting( + "history.taskSchedulerMaxQPS", + 0, + `TaskSchedulerMaxQPS is the max qps task schedulers on a host can schedule tasks +If value less or equal to 0, will fall back to HistoryPersistenceMaxQPS`, + ) + TaskSchedulerGlobalNamespaceMaxQPS = NewNamespaceIntSetting( + "history.taskSchedulerGlobalNamespaceMaxQPS", + 0, + `TaskSchedulerGlobalNamespaceMaxQPS is the max qps all task schedulers in the cluster can schedule tasks for a certain namespace +If value less or equal to 0, will fall back to TaskSchedulerNamespaceMaxQPS`, + ) + TaskSchedulerNamespaceMaxQPS = NewNamespaceIntSetting( + "history.taskSchedulerNamespaceMaxQPS", + 0, + `TaskSchedulerNamespaceMaxQPS is the max qps task schedulers on a host can schedule tasks for a certain namespace +If value less or equal to 0, will fall back to HistoryPersistenceNamespaceMaxQPS`, + ) + TaskSchedulerInactiveChannelDeletionDelay = NewGlobalDurationSetting( + "history.taskSchedulerInactiveChannelDeletionDelay", + time.Hour, + `TaskSchedulerInactiveChannelDeletionDelay the time delay before a namespace's' channel is removed from the scheduler`, + ) + TaskSchedulerEnableExecutionQueueScheduler = NewGlobalBoolSetting( + "history.taskSchedulerEnableExecutionQueueScheduler", + false, + `TaskSchedulerEnableExecutionQueueScheduler enables the execution queue scheduler +that processes tasks for contended workflows sequentially to avoid busy workflow errors`, + ) + TaskSchedulerExecutionQueueSchedulerMaxQueues = NewGlobalIntSetting( + "history.taskSchedulerExecutionQueueSchedulerMaxQueues", + 500, + `TaskSchedulerExecutionQueueSchedulerMaxQueues is the maximum number of concurrent per-workflow queues in the execution queue scheduler. +When this limit is reached, new workflows will fall back to the base FIFO scheduler.`, + ) + TaskSchedulerExecutionQueueSchedulerQueueTTL = NewGlobalDurationSetting( + "history.taskSchedulerExecutionQueueSchedulerQueueTTL", + 5*time.Second, + `TaskSchedulerExecutionQueueSchedulerQueueTTL is how long a per-workflow queue goroutine waits idle before exiting.`, + ) + + TaskSchedulerExecutionQueueSchedulerQueueConcurrency = NewGlobalIntSetting( + "history.taskSchedulerExecutionQueueSchedulerQueueConcurrency", + 2, + `TaskSchedulerExecutionQueueSchedulerQueueConcurrency is the max number of worker goroutines per workflow queue. +Higher values allow limited parallelism per workflow. Values <= 0 are capped to 1.`, + ) + + TimerTaskBatchSize = NewGlobalIntSetting( + "history.timerTaskBatchSize", + 100, + `TimerTaskBatchSize is batch size for timer processor to process tasks`, + ) + TimerProcessorSchedulerWorkerCount = NewGlobalIntSetting( + "history.timerProcessorSchedulerWorkerCount", + 512, + `TimerProcessorSchedulerWorkerCount is the number of workers in the host level task scheduler for timer processor`, + ) + TimerProcessorSchedulerActiveRoundRobinWeights = NewNamespaceMapSetting( + "history.timerProcessorSchedulerActiveRoundRobinWeights", + nil, // actual default is in service/history/configs package + `TimerProcessorSchedulerActiveRoundRobinWeights is the priority round robin weights used by timer task scheduler for active namespaces`, + ) + TimerProcessorSchedulerStandbyRoundRobinWeights = NewNamespaceMapSetting( + "history.timerProcessorSchedulerStandbyRoundRobinWeights", + nil, // actual default is in service/history/configs package + `TimerProcessorSchedulerStandbyRoundRobinWeights is the priority round robin weights used by timer task scheduler for standby namespaces`, + ) + TimerProcessorUpdateAckInterval = NewGlobalDurationSetting( + "history.timerProcessorUpdateAckInterval", + 30*time.Second, + `TimerProcessorUpdateAckInterval is update interval for timer processor`, + ) + TimerProcessorUpdateAckIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.timerProcessorUpdateAckIntervalJitterCoefficient", + 0.15, + `TimerProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient`, + ) + TimerProcessorMaxPollRPS = NewGlobalIntSetting( + "history.timerProcessorMaxPollRPS", + 20, + `TimerProcessorMaxPollRPS is max poll rate per second for timer processor`, + ) + TimerProcessorMaxPollHostRPS = NewGlobalIntSetting( + "history.timerProcessorMaxPollHostRPS", + 0, + `TimerProcessorMaxPollHostRPS is max poll rate per second for all timer processor on a host`, + ) + TimerProcessorMaxPollInterval = NewGlobalDurationSetting( + "history.timerProcessorMaxPollInterval", + 5*time.Minute, + `TimerProcessorMaxPollInterval is max poll interval for timer processor`, + ) + TimerProcessorMaxPollIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.timerProcessorMaxPollIntervalJitterCoefficient", + 0.15, + `TimerProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient`, + ) + TimerProcessorPollBackoffInterval = NewGlobalDurationSetting( + "history.timerProcessorPollBackoffInterval", + 5*time.Second, + `TimerProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for timer processor`, + ) + TimerProcessorMaxTimeShift = NewGlobalDurationSetting( + "history.timerProcessorMaxTimeShift", + 1*time.Second, + `TimerProcessorMaxTimeShift is the max shift timer processor can have`, + ) + TimerQueueMaxReaderCount = NewGlobalIntSetting( + "history.timerQueueMaxReaderCount", + 2, + `TimerQueueMaxReaderCount is the max number of readers in one multi-cursor timer queue`, + ) + RetentionTimerJitterDuration = NewGlobalDurationSetting( + "history.retentionTimerJitterDuration", + 30*time.Minute, + `RetentionTimerJitterDuration is a time duration jitter to distribute timer from T0 to T0 + jitter duration`, + ) + + MemoryTimerProcessorSchedulerWorkerCount = NewGlobalIntSetting( + "history.memoryTimerProcessorSchedulerWorkerCount", + 64, + `MemoryTimerProcessorSchedulerWorkerCount is the number of workers in the task scheduler for in memory timer processor.`, + ) + + TransferTaskBatchSize = NewGlobalIntSetting( + "history.transferTaskBatchSize", + 100, + `TransferTaskBatchSize is batch size for transferQueueProcessor`, + ) + TransferProcessorMaxPollRPS = NewGlobalIntSetting( + "history.transferProcessorMaxPollRPS", + 20, + `TransferProcessorMaxPollRPS is max poll rate per second for transferQueueProcessor`, + ) + TransferProcessorMaxPollHostRPS = NewGlobalIntSetting( + "history.transferProcessorMaxPollHostRPS", + 0, + `TransferProcessorMaxPollHostRPS is max poll rate per second for all transferQueueProcessor on a host`, + ) + TransferProcessorSchedulerWorkerCount = NewGlobalIntSetting( + "history.transferProcessorSchedulerWorkerCount", + 512, + `TransferProcessorSchedulerWorkerCount is the number of workers in the host level task scheduler for transferQueueProcessor`, + ) + TransferProcessorSchedulerActiveRoundRobinWeights = NewNamespaceMapSetting( + "history.transferProcessorSchedulerActiveRoundRobinWeights", + nil, // actual default is in service/history/configs package + `TransferProcessorSchedulerActiveRoundRobinWeights is the priority round robin weights used by transfer task scheduler for active namespaces`, + ) + TransferProcessorSchedulerStandbyRoundRobinWeights = NewNamespaceMapSetting( + "history.transferProcessorSchedulerStandbyRoundRobinWeights", + nil, // actual default is in service/history/configs package + `TransferProcessorSchedulerStandbyRoundRobinWeights is the priority round robin weights used by transfer task scheduler for standby namespaces`, + ) + TransferProcessorMaxPollInterval = NewGlobalDurationSetting( + "history.transferProcessorMaxPollInterval", + 1*time.Minute, + `TransferProcessorMaxPollInterval max poll interval for transferQueueProcessor`, + ) + TransferProcessorMaxPollIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.transferProcessorMaxPollIntervalJitterCoefficient", + 0.15, + `TransferProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient`, + ) + TransferProcessorUpdateAckInterval = NewGlobalDurationSetting( + "history.transferProcessorUpdateAckInterval", + 30*time.Second, + `TransferProcessorUpdateAckInterval is update interval for transferQueueProcessor`, + ) + TransferProcessorUpdateAckIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.transferProcessorUpdateAckIntervalJitterCoefficient", + 0.15, + `TransferProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient`, + ) + TransferProcessorPollBackoffInterval = NewGlobalDurationSetting( + "history.transferProcessorPollBackoffInterval", + 5*time.Second, + `TransferProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for transferQueueProcessor`, + ) + TransferProcessorEnsureCloseBeforeDelete = NewGlobalBoolSetting( + "history.transferProcessorEnsureCloseBeforeDelete", + true, + `TransferProcessorEnsureCloseBeforeDelete means we ensure the execution is closed before we delete it`, + ) + TransferQueueMaxReaderCount = NewGlobalIntSetting( + "history.transferQueueMaxReaderCount", + 2, + `TransferQueueMaxReaderCount is the max number of readers in one multi-cursor transfer queue`, + ) + + OutboundTaskBatchSize = NewGlobalIntSetting( + "history.outboundTaskBatchSize", + 100, + `OutboundTaskBatchSize is batch size for outboundQueueFactory`, + ) + OutboundQueuePendingTaskMaxCount = NewGlobalIntSetting( + "history.outboundQueuePendingTasksMaxCount", + 10000, + `The max number of task pending tasks in the outbound queue before stopping loading new tasks into memory. This +limit is in addition to outboundQueuePendingTaskCriticalCount which controls when to unload already loaded tasks but +doesn't prevent loading new tasks. Ideally this max count limit should not be hit and task unloading should happen once +critical count is exceeded. But since queue action is async, we need this hard limit. +`, + ) + OutboundQueuePendingTaskCriticalCount = NewGlobalIntSetting( + "history.outboundQueuePendingTaskCriticalCount", + 9000, + `Max number of pending tasks in the outbound queue before triggering slice splitting and unloading.`, + ) + OutboundQueueMaxPredicateSize = NewGlobalIntSetting( + "history.outboundQueueMaxPredicateSize", + 10*1024, + `The max size of the multi-cursor predicate structure stored in the shard info record for the outbound queue. 0 +is considered unlimited. When the predicate size is surpassed for a given scope, the predicate is converted to a +universal predicate, which causes all tasks in the scope's range to eventually be reprocessed without applying any +filtering logic. +`, + ) + + OutboundProcessorMaxPollRPS = NewGlobalIntSetting( + "history.outboundProcessorMaxPollRPS", + 20, + `OutboundProcessorMaxPollRPS is max poll rate per second for outboundQueueFactory`, + ) + OutboundProcessorMaxPollHostRPS = NewGlobalIntSetting( + "history.outboundProcessorMaxPollHostRPS", + 0, + `OutboundProcessorMaxPollHostRPS is max poll rate per second for all outboundQueueFactory on a host`, + ) + OutboundProcessorMaxPollInterval = NewGlobalDurationSetting( + "history.outboundProcessorMaxPollInterval", + 1*time.Minute, + `OutboundProcessorMaxPollInterval max poll interval for outboundQueueFactory`, + ) + OutboundProcessorMaxPollIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.outboundProcessorMaxPollIntervalJitterCoefficient", + 0.15, + `OutboundProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient`, + ) + OutboundProcessorUpdateAckInterval = NewGlobalDurationSetting( + "history.outboundProcessorUpdateAckInterval", + 30*time.Second, + `OutboundProcessorUpdateAckInterval is update interval for outboundQueueFactory`, + ) + OutboundProcessorUpdateAckIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.outboundProcessorUpdateAckIntervalJitterCoefficient", + 0.15, + `OutboundProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient`, + ) + OutboundProcessorPollBackoffInterval = NewGlobalDurationSetting( + "history.outboundProcessorPollBackoffInterval", + 5*time.Second, + `OutboundProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for outboundQueueFactory`, + ) + OutboundQueueMaxReaderCount = NewGlobalIntSetting( + "history.outboundQueueMaxReaderCount", + 4, + `OutboundQueueMaxReaderCount is the max number of readers in one multi-cursor outbound queue`, + ) + OutboundQueueGroupLimiterBufferSize = NewDestinationIntSetting( + "history.outboundQueue.groupLimiter.bufferSize", + 100, + `OutboundQueueGroupLimiterBufferSize is the max buffer size of the group limiter`, + ) + OutboundQueueGroupLimiterConcurrency = NewDestinationIntSetting( + "history.outboundQueue.groupLimiter.concurrency", + 100, + `OutboundQueueGroupLimiterConcurrency is the concurrency of the group limiter`, + ) + OutboundQueueHostSchedulerMaxTaskRPS = NewDestinationFloatSetting( + "history.outboundQueue.hostScheduler.maxTaskRPS", + 100.0, + `OutboundQueueHostSchedulerMaxTaskRPS is the host scheduler max task RPS`, + ) + OutboundQueueCircuitBreakerSettings = NewDestinationTypedSetting( + "history.outboundQueue.circuitBreakerSettings", + CircuitBreakerSettings{}, + `OutboundQueueCircuitBreakerSettings are circuit breaker settings. +Fields (see gobreaker reference for more details): +- MaxRequests: Maximum number of requests allowed to pass through when it is half-open (default 1). +- Interval (duration): Cyclic period in closed state to clear the internal counts; + if interval is 0, then it never clears the internal counts (default 0). +- Timeout (duration): Period of open state before changing to half-open state (default 60s).`, + ) + OutboundStandbyTaskMissingEventsDiscardDelay = NewDestinationDurationSetting( + "history.outboundQueue.standbyTaskMissingEventsDiscardDelay", + // This is effectively equivalent to never discarding outbound tasks since it's 290+ years. + time.Duration(math.MaxInt64), + `OutboundStandbyTaskMissingEventsDiscardDelay is the equivalent of +StandbyTaskMissingEventsDiscardDelay for outbound standby task processor.`, + ) + OutboundStandbyTaskMissingEventsDestinationDownErr = NewDestinationBoolSetting( + "history.outboundQueue.standbyTaskMissingEventsDestinationDownErr", + true, + `OutboundStandbyTaskMissingEventsDestinationDownErr enables returning DestinationDownError when +the outbound standby task failed to be processed due to missing events.`, + ) + + VisibilityTaskBatchSize = NewGlobalIntSetting( + "history.visibilityTaskBatchSize", + 100, + `VisibilityTaskBatchSize is batch size for visibilityQueueProcessor`, + ) + VisibilityProcessorMaxPollRPS = NewGlobalIntSetting( + "history.visibilityProcessorMaxPollRPS", + 20, + `VisibilityProcessorMaxPollRPS is max poll rate per second for visibilityQueueProcessor`, + ) + VisibilityProcessorMaxPollHostRPS = NewGlobalIntSetting( + "history.visibilityProcessorMaxPollHostRPS", + 0, + `VisibilityProcessorMaxPollHostRPS is max poll rate per second for all visibilityQueueProcessor on a host`, + ) + VisibilityProcessorSchedulerWorkerCount = NewGlobalIntSetting( + "history.visibilityProcessorSchedulerWorkerCount", + 512, + `VisibilityProcessorSchedulerWorkerCount is the number of workers in the host level task scheduler for visibilityQueueProcessor`, + ) + VisibilityProcessorSchedulerActiveRoundRobinWeights = NewNamespaceMapSetting( + "history.visibilityProcessorSchedulerActiveRoundRobinWeights", + nil, // actual default is in service/history/configs package + `VisibilityProcessorSchedulerActiveRoundRobinWeights is the priority round robin weights by visibility task scheduler for active namespaces`, + ) + VisibilityProcessorSchedulerStandbyRoundRobinWeights = NewNamespaceMapSetting( + "history.visibilityProcessorSchedulerStandbyRoundRobinWeights", + nil, // actual default is in service/history/configs package + `VisibilityProcessorSchedulerStandbyRoundRobinWeights is the priority round robin weights by visibility task scheduler for standby namespaces`, + ) + VisibilityProcessorMaxPollInterval = NewGlobalDurationSetting( + "history.visibilityProcessorMaxPollInterval", + 1*time.Minute, + `VisibilityProcessorMaxPollInterval max poll interval for visibilityQueueProcessor`, + ) + VisibilityProcessorMaxPollIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.visibilityProcessorMaxPollIntervalJitterCoefficient", + 0.15, + `VisibilityProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient`, + ) + VisibilityProcessorUpdateAckInterval = NewGlobalDurationSetting( + "history.visibilityProcessorUpdateAckInterval", + 30*time.Second, + `VisibilityProcessorUpdateAckInterval is update interval for visibilityQueueProcessor`, + ) + VisibilityProcessorUpdateAckIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.visibilityProcessorUpdateAckIntervalJitterCoefficient", + 0.15, + `VisibilityProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient`, + ) + VisibilityProcessorPollBackoffInterval = NewGlobalDurationSetting( + "history.visibilityProcessorPollBackoffInterval", + 5*time.Second, + `VisibilityProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for visibilityQueueProcessor`, + ) + VisibilityProcessorEnsureCloseBeforeDelete = NewGlobalBoolSetting( + "history.visibilityProcessorEnsureCloseBeforeDelete", + false, + `VisibilityProcessorEnsureCloseBeforeDelete means we ensure the visibility of an execution is closed before we delete its visibility records`, + ) + VisibilityProcessorEnableCloseWorkflowCleanup = NewNamespaceBoolSetting( + "history.visibilityProcessorEnableCloseWorkflowCleanup", + false, + `VisibilityProcessorEnableCloseWorkflowCleanup to clean up the mutable state after visibility +close task has been processed. Must use Elasticsearch as visibility store, otherwise workflow +data (eg: search attributes) will be lost after workflow is closed.`, + ) + VisibilityProcessorRelocateAttributesMinBlobSize = NewNamespaceIntSetting( + "history.visibilityProcessorRelocateAttributesMinBlobSize", + 0, + `VisibilityProcessorRelocateAttributesMinBlobSize is the minimum size in bytes of memo or search +attributes.`, + ) + VisibilityQueueMaxReaderCount = NewGlobalIntSetting( + "history.visibilityQueueMaxReaderCount", + 2, + `VisibilityQueueMaxReaderCount is the max number of readers in one multi-cursor visibility queue`, + ) + + DisableFetchRelocatableAttributesFromVisibility = NewNamespaceBoolSetting( + "history.disableFetchRelocatableAttributesFromVisibility", + false, + `DisableFetchRelocatableAttributesFromVisibility disables fetching memo and search attributes from +visibility if they were removed from the mutable state`, + ) + + ArchivalTaskBatchSize = NewGlobalIntSetting( + "history.archivalTaskBatchSize", + 100, + `ArchivalTaskBatchSize is batch size for archivalQueueProcessor`, + ) + ArchivalProcessorMaxPollRPS = NewGlobalIntSetting( + "history.archivalProcessorMaxPollRPS", + 20, + `ArchivalProcessorMaxPollRPS is max poll rate per second for archivalQueueProcessor`, + ) + ArchivalProcessorMaxPollHostRPS = NewGlobalIntSetting( + "history.archivalProcessorMaxPollHostRPS", + 0, + `ArchivalProcessorMaxPollHostRPS is max poll rate per second for all archivalQueueProcessor on a host`, + ) + ArchivalProcessorSchedulerWorkerCount = NewGlobalIntSetting( + "history.archivalProcessorSchedulerWorkerCount", + 512, + `ArchivalProcessorSchedulerWorkerCount is the number of workers in the host level task scheduler for +archivalQueueProcessor`, + ) + ArchivalProcessorMaxPollInterval = NewGlobalDurationSetting( + "history.archivalProcessorMaxPollInterval", + 5*time.Minute, + `ArchivalProcessorMaxPollInterval max poll interval for archivalQueueProcessor`, + ) + ArchivalProcessorMaxPollIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.archivalProcessorMaxPollIntervalJitterCoefficient", + 0.15, + `ArchivalProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient`, + ) + ArchivalProcessorUpdateAckInterval = NewGlobalDurationSetting( + "history.archivalProcessorUpdateAckInterval", + 30*time.Second, + `ArchivalProcessorUpdateAckInterval is update interval for archivalQueueProcessor`, + ) + ArchivalProcessorUpdateAckIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.archivalProcessorUpdateAckIntervalJitterCoefficient", + 0.15, + `ArchivalProcessorUpdateAckIntervalJitterCoefficient is the update interval jitter coefficient`, + ) + ArchivalProcessorPollBackoffInterval = NewGlobalDurationSetting( + "history.archivalProcessorPollBackoffInterval", + 5*time.Second, + `ArchivalProcessorPollBackoffInterval is the poll backoff interval if task redispatcher's size exceeds limit for +archivalQueueProcessor`, + ) + ArchivalProcessorArchiveDelay = NewGlobalDurationSetting( + "history.archivalProcessorArchiveDelay", + 5*time.Minute, + `ArchivalProcessorArchiveDelay is the delay before archivalQueueProcessor starts to process archival tasks`, + ) + ArchivalBackendMaxRPS = NewGlobalFloatSetting( + "history.archivalBackendMaxRPS", + 10000.0, + `ArchivalBackendMaxRPS is the maximum rate of requests per second to the archival backend`, + ) + ArchivalQueueMaxReaderCount = NewGlobalIntSetting( + "history.archivalQueueMaxReaderCount", + 2, + `ArchivalQueueMaxReaderCount is the max number of readers in one multi-cursor archival queue`, + ) + + WorkflowExecutionMaxInFlightUpdates = NewNamespaceIntSetting( + "history.maxInFlightUpdates", + 10, + `WorkflowExecutionMaxInFlightUpdates is the max number of updates that can be in-flight (admitted but not yet completed) for any given workflow execution. Set to zero to disable limit.`, + ) + WorkflowExecutionMaxInFlightUpdatePayloads = NewNamespaceIntSetting( + "history.maxInFlightUpdatePayloads", + 20*1024*1024, + `WorkflowExecutionMaxInFlightUpdatePayloads is the max total payload size (in bytes) of in-flight updates (admitted but not yet completed) for any given workflow execution. Set to zero to disable.`, + ) + WorkflowExecutionMaxTotalUpdates = NewNamespaceIntSetting( + "history.maxTotalUpdates", + 2000, + `WorkflowExecutionMaxTotalUpdates is the max number of updates that any given workflow execution can receive. Set to zero to disable.`, + ) + WorkflowExecutionMaxTotalUpdatesSuggestContinueAsNewThreshold = NewNamespaceFloatSetting( + "history.maxTotalUpdates.suggestContinueAsNewThreshold", + 0.9, + `WorkflowExecutionMaxTotalUpdatesSuggestContinueAsNewThreshold is the percentage threshold of total updates that any given workflow execution can receive before suggesting to continue-as-new.`, + ) + EnableUpdateWithStartRetryOnClosedWorkflowAbort = NewNamespaceBoolSetting( + "history.enableUpdateWithStartRetryOnClosedWorkflowAbort", + true, + `EnableUpdateWithStartRetryOnClosedWorkflowAbort enables retrying Update-with-Start's update if it was aborted by a closing workflow.`, + ) + EnableUpdateWithStartRetryableErrorOnClosedWorkflowAbort = NewNamespaceBoolSetting( + "history.enableUpdateWithStartRetryableErrorOnClosedWorkflowAbort", + true, + `EnableUpdateWithStartRetryableErrorOnClosedWorkflowAbort enables sending back a retryable status code when the Update-with-Start's update was aborted by a closing workflow.`, + ) + + ReplicatorTaskBatchSize = NewGlobalIntSetting( + "history.replicatorTaskBatchSize", + 100, + `ReplicatorTaskBatchSize is batch size for ReplicatorProcessor`, + ) + ReplicatorMaxSkipTaskCount = NewGlobalIntSetting( + "history.replicatorMaxSkipTaskCount", + 250, + `ReplicatorMaxSkipTaskCount is maximum number of tasks that can be skipped during tasks pagination due to not meeting filtering conditions (e.g. missed namespace).`, + ) + ReplicatorProcessorMaxPollInterval = NewGlobalDurationSetting( + "history.replicatorProcessorMaxPollInterval", + 1*time.Minute, + `ReplicatorProcessorMaxPollInterval is max poll interval for ReplicatorProcessor`, + ) + ReplicatorProcessorMaxPollIntervalJitterCoefficient = NewGlobalFloatSetting( + "history.replicatorProcessorMaxPollIntervalJitterCoefficient", + 0.15, + `ReplicatorProcessorMaxPollIntervalJitterCoefficient is the max poll interval jitter coefficient`, + ) + MaximumBufferedEventsBatch = NewGlobalIntSetting( + "history.maximumBufferedEventsBatch", + 100, + `MaximumBufferedEventsBatch is the maximum permissible number of buffered events for any given mutable state.`, + ) + MaximumBufferedEventsSizeInBytes = NewGlobalIntSetting( + "history.maximumBufferedEventsSizeInBytes", + 2*1024*1024, + `MaximumBufferedEventsSizeInBytes is the maximum permissible size of all buffered events for any given mutable +state. The total size is determined by the sum of the size, in bytes, of each HistoryEvent proto.`, + ) + MaximumSignalsPerExecution = NewNamespaceIntSetting( + "history.maximumSignalsPerExecution", + 10000, + `MaximumSignalsPerExecution is max number of signals supported by single execution`, + ) + ShardUpdateMinInterval = NewGlobalDurationSetting( + "history.shardUpdateMinInterval", + 5*time.Minute, + `ShardUpdateMinInterval is the minimal time interval which the shard info can be updated`, + ) + ShardFirstUpdateInterval = NewGlobalDurationSetting( + "history.shardFirstUpdateInterval", + 10*time.Second, + `ShardFirstUpdateInterval is the time interval after which the first shard info update will happen. + It should be smaller than ShardUpdateMinInterval`, + ) + ShardUpdateMinTasksCompleted = NewGlobalIntSetting( + "history.shardUpdateMinTasksCompleted", + 1000, + `ShardUpdateMinTasksCompleted is the minimum number of tasks which must be completed (across all queues) before the shard info can be updated. +Note that once history.shardUpdateMinInterval amount of time has passed we'll update the shard info regardless of the number of tasks completed. +When the this config is zero or lower we will only update shard info at most once every history.shardUpdateMinInterval.`, + ) + ShardSyncMinInterval = NewGlobalDurationSetting( + "history.shardSyncMinInterval", + 5*time.Minute, + `ShardSyncMinInterval is the minimal time interval which the shard info should be sync to remote`, + ) + EmitShardLagLog = NewGlobalBoolSetting( + "history.emitShardLagLog", + false, + `EmitShardLagLog whether emit the shard lag log`, + ) + DefaultActivityRetryPolicy = NewNamespaceTypedSetting( + "history.defaultActivityRetryPolicy", + retrypolicy.DefaultDefaultRetrySettings, + `DefaultActivityRetryPolicy represents the out-of-box retry policy for activities where +the user has not specified an explicit RetryPolicy`, + ) + DefaultWorkflowRetryPolicy = NewNamespaceTypedSetting( + "history.defaultWorkflowRetryPolicy", + retrypolicy.DefaultDefaultRetrySettings, + `DefaultWorkflowRetryPolicy represents the out-of-box retry policy for unset fields +where the user has set an explicit RetryPolicy, but not specified all the fields`, + ) + AllowResetWithPendingChildren = NewNamespaceBoolSetting( + "history.allowResetWithPendingChildren", + true, + `Allows resetting of workflows with pending children when set to true`, + ) + HistoryMaxAutoResetPoints = NewNamespaceIntSetting( + "history.historyMaxAutoResetPoints", + primitives.DefaultHistoryMaxAutoResetPoints, + `HistoryMaxAutoResetPoints is the key for max number of auto reset points stored in mutableState`, + ) + EnableParentClosePolicy = NewNamespaceBoolSetting( + "history.enableParentClosePolicy", + true, + `EnableParentClosePolicy whether to ParentClosePolicy`, + ) + ParentClosePolicyThreshold = NewNamespaceIntSetting( + "history.parentClosePolicyThreshold", + 10, + `ParentClosePolicyThreshold decides that parent close policy will be processed by sys workers(if enabled) if +the number of children greater than or equal to this threshold`, + ) + NumParentClosePolicySystemWorkflows = NewGlobalIntSetting( + "history.numParentClosePolicySystemWorkflows", + 1000, + `NumParentClosePolicySystemWorkflows is key for number of parentClosePolicy system workflows running in total`, + ) + HistoryThrottledLogRPS = NewGlobalIntSetting( + "history.throttledLogRPS", + 4, + `HistoryThrottledLogRPS is the rate limit on number of log messages emitted per second for throttled logger`, + ) + WorkflowTaskHeartbeatTimeout = NewNamespaceDurationSetting( + "history.workflowTaskHeartbeatTimeout", + time.Minute*30, + `WorkflowTaskHeartbeatTimeout for workflow task heartbeat`, + ) + WorkflowTaskCriticalAttempts = NewGlobalIntSetting( + "history.workflowTaskCriticalAttempt", + 10, + `WorkflowTaskCriticalAttempts is the number of attempts for a workflow task that's regarded as critical`, + ) + WorkflowTaskRetryMaxInterval = NewGlobalDurationSetting( + "history.workflowTaskRetryMaxInterval", + time.Minute*10, + `WorkflowTaskRetryMaxInterval is the maximum interval added to a workflow task's startToClose timeout for slowing down retry`, + ) + EnableWorkflowTaskStampIncrementOnFailure = NewGlobalBoolSetting( + "history.enableWorkflowTaskStampIncrementOnFailure", + false, + `EnableWorkflowTaskStampIncrementOnFailure controls whether the workflow task stamp is incremented when a workflow task fails and is rescheduled`, + ) + DiscardSpeculativeWorkflowTaskMaximumEventsCount = NewGlobalIntSetting( + "history.discardSpeculativeWorkflowTaskMaximumEventsCount", + 10, + `If speculative workflow task shipped more than DiscardSpeculativeWorkflowTaskMaximumEventsCount events, it can't be discarded`, + ) + EnableDropRepeatedWorkflowTaskFailures = NewNamespaceBoolSetting( + "history.enableDropRepeatedWorkflowTaskFailures", + false, + `EnableDropRepeatedWorkflowTaskFailures whether to silently drop repeated workflow task failures`, + ) + SendTransientOrSpeculativeWorkflowTaskEvents = NewNamespaceBoolSetting( + "history.sendTransientOrSpeculativeWorkflowTaskEvents", + true, + `SendTransientOrSpeculativeWorkflowTaskEvents controls whether GetWorkflowExecutionHistory returns non-durable transient or speculative workflow task events. Enabled by default but can be disabled per namespace if it causes compatibility problems.`, + ) + DefaultWorkflowTaskTimeout = NewNamespaceDurationSetting( + "history.defaultWorkflowTaskTimeout", + primitives.DefaultWorkflowTaskTimeout, + `DefaultWorkflowTaskTimeout for a workflow task`, + ) + SkipReapplicationByNamespaceID = NewNamespaceIDBoolSetting( + "history.SkipReapplicationByNamespaceID", + false, + `SkipReapplicationByNamespaceID is whether skipping a event re-application for a namespace`, + ) + StandbyTaskReReplicationContextTimeout = NewNamespaceIDDurationSetting( + "history.standbyTaskReReplicationContextTimeout", + 30*time.Second, + `StandbyTaskReReplicationContextTimeout is the context timeout for standby task re-replication`, + ) + MaxBufferedQueryCount = NewGlobalIntSetting( + "history.MaxBufferedQueryCount", + 1, + `MaxBufferedQueryCount indicates max buffer query count`, + ) + MutableStateChecksumGenProbability = NewNamespaceIntSetting( + "history.mutableStateChecksumGenProbability", + 0, + `MutableStateChecksumGenProbability is the probability [0-100] that checksum will be generated for mutable state`, + ) + MutableStateChecksumVerifyProbability = NewNamespaceIntSetting( + "history.mutableStateChecksumVerifyProbability", + 0, + `MutableStateChecksumVerifyProbability is the probability [0-100] that checksum will be verified for mutable state`, + ) + MutableStateChecksumInvalidateBefore = NewGlobalFloatSetting( + "history.mutableStateChecksumInvalidateBefore", + 0, + `MutableStateChecksumInvalidateBefore is the epoch timestamp before which all checksums are to be discarded`, + ) + + ReplicationTaskApplyTimeout = NewGlobalDurationSetting( + "history.ReplicationTaskApplyTimeout", + 20*time.Second, + `ReplicationTaskApplyTimeout is the context timeout for replication task apply`, + ) + ReplicationTaskFetcherParallelism = NewGlobalIntSetting( + "history.ReplicationTaskFetcherParallelism", + 4, + `ReplicationTaskFetcherParallelism determines how many go routines we spin up for fetching tasks`, + ) + ReplicationTaskFetcherAggregationInterval = NewGlobalDurationSetting( + "history.ReplicationTaskFetcherAggregationInterval", + 2*time.Second, + `ReplicationTaskFetcherAggregationInterval determines how frequently the fetch requests are sent`, + ) + ReplicationTaskFetcherTimerJitterCoefficient = NewGlobalFloatSetting( + "history.ReplicationTaskFetcherTimerJitterCoefficient", + 0.15, + `ReplicationTaskFetcherTimerJitterCoefficient is the jitter for fetcher timer`, + ) + ReplicationTaskFetcherErrorRetryWait = NewGlobalDurationSetting( + "history.ReplicationTaskFetcherErrorRetryWait", + time.Second, + `ReplicationTaskFetcherErrorRetryWait is the wait time when fetcher encounters error`, + ) + ReplicationTaskProcessorErrorRetryWait = NewShardIDDurationSetting( + "history.ReplicationTaskProcessorErrorRetryWait", + 1*time.Second, + `ReplicationTaskProcessorErrorRetryWait is the initial retry wait when we see errors in applying replication tasks`, + ) + ReplicationTaskProcessorErrorRetryBackoffCoefficient = NewShardIDFloatSetting( + "history.ReplicationTaskProcessorErrorRetryBackoffCoefficient", + 1.2, + `ReplicationTaskProcessorErrorRetryBackoffCoefficient is the retry wait backoff time coefficient`, + ) + ReplicationTaskProcessorErrorRetryMaxInterval = NewShardIDDurationSetting( + "history.ReplicationTaskProcessorErrorRetryMaxInterval", + 5*time.Second, + `ReplicationTaskProcessorErrorRetryMaxInterval is the retry wait backoff max duration`, + ) + ReplicationTaskProcessorErrorRetryMaxAttempts = NewShardIDIntSetting( + "history.ReplicationTaskProcessorErrorRetryMaxAttempts", + 80, + `ReplicationTaskProcessorErrorRetryMaxAttempts is the max retry attempts for applying replication tasks`, + ) + ReplicationTaskProcessorErrorRetryExpiration = NewShardIDDurationSetting( + "history.ReplicationTaskProcessorErrorRetryExpiration", + 5*time.Minute, + `ReplicationTaskProcessorErrorRetryExpiration is the max retry duration for applying replication tasks`, + ) + ReplicationTaskProcessorNoTaskInitialWait = NewShardIDDurationSetting( + "history.ReplicationTaskProcessorNoTaskInitialWait", + 2*time.Second, + `ReplicationTaskProcessorNoTaskInitialWait is the wait time when not ask is returned`, + ) + ReplicationTaskProcessorCleanupInterval = NewShardIDDurationSetting( + "history.ReplicationTaskProcessorCleanupInterval", + 1*time.Minute, + `ReplicationTaskProcessorCleanupInterval determines how frequently the cleanup replication queue`, + ) + ReplicationTaskProcessorCleanupJitterCoefficient = NewShardIDFloatSetting( + "history.ReplicationTaskProcessorCleanupJitterCoefficient", + 0.15, + `ReplicationTaskProcessorCleanupJitterCoefficient is the jitter for cleanup timer`, + ) + ReplicationTaskProcessorHostQPS = NewGlobalFloatSetting( + "history.ReplicationTaskProcessorHostQPS", + 1500, + `ReplicationTaskProcessorHostQPS is the qps of task processing rate limiter on host level`, + ) + ReplicationTaskProcessorShardQPS = NewGlobalFloatSetting( + "history.ReplicationTaskProcessorShardQPS", + 30, + `ReplicationTaskProcessorShardQPS is the qps of task processing rate limiter on shard level`, + ) + ReplicationEnableDLQMetrics = NewGlobalBoolSetting( + "history.ReplicationEnableDLQMetrics", + true, + `ReplicationEnableDLQMetrics is the flag to emit DLQ metrics`, + ) + ReplicationEnableUpdateWithNewTaskMerge = NewGlobalBoolSetting( + "history.ReplicationEnableUpdateWithNewTaskMerge", + false, + `ReplicationEnableUpdateWithNewTaskMerge is the flag controlling whether replication task merging logic +should be enabled for non continuedAsNew workflow UpdateWithNew case.`, + ) + ReplicationMultipleBatches = NewGlobalBoolSetting( + "history.ReplicationMultipleBatches", + false, + `ReplicationMultipleBatches is the flag to enable replication of multiple history event batches`, + ) + HistoryTaskDLQEnabled = NewGlobalBoolSetting( + "history.TaskDLQEnabled", + true, + `HistoryTaskDLQEnabled enables the history task DLQ. This applies to internal tasks like transfer and timer tasks. +Do not turn this on if you aren't using Cassandra as the history task DLQ is not implemented for other databases.`, + ) + HistoryTaskDLQUnexpectedErrorAttempts = NewGlobalIntSetting( + "history.TaskDLQUnexpectedErrorAttempts", + 70, // 70 attempts takes about an hour + `HistoryTaskDLQUnexpectedErrorAttempts is the number of task execution attempts before sending the task to DLQ.`, + ) + HistoryTaskDLQInternalErrors = NewGlobalBoolSetting( + "history.TaskDLQInternalErrors", + false, + `HistoryTaskDLQInternalErrors causes history task processing to send tasks failing with serviceerror.Internal to +the dlq (or will drop them if not enabled)`, + ) + HistoryTaskDLQErrorPattern = NewGlobalStringSetting( + "history.TaskDLQErrorPattern", + "", + `HistoryTaskDLQErrorPattern specifies a regular expression. If a task processing error matches with this regex, +that task will be sent to DLQ.`, + ) + + MaxLocalParentWorkflowVerificationDuration = NewGlobalDurationSetting( + "history.maxLocalParentWorkflowVerificationDuration", + 5*time.Minute, + `MaxLocalParentWorkflowVerificationDuration controls the maximum duration to verify on the local cluster before requesting to resend parent workflow.`, + ) + + ReplicationStreamSyncStatusDuration = NewGlobalDurationSetting( + "history.ReplicationStreamSyncStatusDuration", + 1*time.Second, + `ReplicationStreamSyncStatusDuration sync replication status duration`, + ) + ReplicationProcessorSchedulerQueueSize = NewGlobalIntSetting( + "history.ReplicationProcessorSchedulerQueueSize", + 128, + `ReplicationProcessorSchedulerQueueSize is the replication task executor queue size`, + ) + ReplicationProcessorSchedulerWorkerCount = NewGlobalIntSetting( + "history.ReplicationProcessorSchedulerWorkerCount", + 512, + `ReplicationProcessorSchedulerWorkerCount is the replication task executor worker count`, + ) + ReplicationLowPriorityProcessorSchedulerWorkerCount = NewGlobalIntSetting( + "history.ReplicationLowPriorityProcessorSchedulerWorkerCount", + 128, + `ReplicationLowPriorityProcessorSchedulerWorkerCount is the low priority replication task executor worker count`, + ) + ReplicationLowPriorityTaskParallelism = NewGlobalIntSetting( + "history.ReplicationLowPriorityTaskParallelism", + 1, + `ReplicationLowPriorityTaskParallelism is the number of executions' low priority replication tasks that can be processed in parallel`, + ) + + EnableReplicationTaskBatching = NewGlobalBoolSetting( + "history.EnableReplicationTaskBatching", + false, + `EnableReplicationTaskBatching is a feature flag for batching replicate history event task`, + ) + EnableReplicationTaskTieredProcessing = NewGlobalBoolSetting( + "history.EnableReplicationTaskTieredProcessing", + false, + `EnableReplicationTaskTieredProcessing is a feature flag for enabling tiered replication task processing stack`, + ) + ReplicationStreamSenderHighPriorityQPS = NewGlobalIntSetting( + "history.ReplicationStreamSenderHighPriorityQPS", + 100, + `Maximum number of high priority replication tasks that can be sent per second per shard`, + ) + ReplicationStreamSenderLowPriorityQPS = NewGlobalIntSetting( + "history.ReplicationStreamSenderLowPriorityQPS", + 100, + `Maximum number of low priority replication tasks that can be sent per second per shard`, + ) + ReplicationStreamEventLoopRetryMaxAttempts = NewGlobalIntSetting( + "history.ReplicationStreamEventLoopRetryMaxAttempts", + 100, // 0 means retry forever + `Max attempts for retrying replication stream event loop`, + ) + ReplicationReceiverMaxOutstandingTaskCount = NewGlobalIntSetting( + "history.ReplicationReceiverMaxOutstandingTaskCount", + 500, + `Maximum number of outstanding tasks allowed for a single shard in the stream receiver`, + ) + ReplicationReceiverSlowSubmissionLatencyThreshold = NewGlobalDurationSetting( + "history.ReplicationReceiverSubmissionLatencyThreshold", + 1*time.Second, + `Scheduler latency threshold for recording slow scheduler submission`, + ) + ReplicationReceiverSlowSubmissionWindow = NewGlobalDurationSetting( + "history.ReplicationReceiverSlowSubmissionWindow", + 10*time.Second, + `Time window within which a slow submission will pause replication flow control`, + ) + EnableReplicationReceiverSlowSubmissionFlowControl = NewGlobalBoolSetting( + "history.EnableReplicationReceiverSlowSubmissionFlowControl", + false, + `Enable slow submission flow control check in replication receiver`, + ) + ReplicationResendMaxBatchCount = NewGlobalIntSetting( + "history.ReplicationResendMaxBatchCount", + 10, + `Maximum number of resend events batch for a single replication request`, + ) + ReplicationProgressCacheMaxSize = NewGlobalIntSetting( + "history.ReplicationProgressCacheMaxSize", + 128000, + `ReplicationProgressCacheMaxSize is the maximum number of entries in the replication progress cache`, + ) + ReplicationProgressCacheTTL = NewGlobalDurationSetting( + "history.ReplicationProgressCacheTTL", + time.Hour, + `ReplicationProgressCacheTTL is TTL of replication progress cache`, + ) + ReplicationStreamSendEmptyTaskDuration = NewGlobalDurationSetting( + "history.ReplicationStreamSendEmptyTaskDuration", + time.Minute, + `ReplicationStreamSendEmptyTaskDuration is the interval to sync status when there is no replication task`, + ) + ReplicationStreamReceiverLivenessMultiplier = NewGlobalIntSetting( + "history.ReplicationReceiverLivenessMultiplier", + 3, + "ReplicationStreamSendEmptyTask is the multiplier of liveness check interval on stream receiver", + ) + ReplicationStreamSenderLivenessMultiplier = NewGlobalIntSetting( + "history.ReplicationStreamSenderLivenessMultiplier", + 10, + "ReplicationStreamSenderLivenessMultiplier is the multiplier of liveness check interval on stream sender", + ) + EnableHistoryReplicationRateLimiter = NewNamespaceBoolSetting( + "history.EnableHistoryReplicationRateLimiter", + false, + "EnableHistoryReplicationRateLimiter is the feature flag to enable rate limiter on history event replication", + ) + ReplicationEnableRateLimit = NewGlobalBoolSetting( + "history.ReplicationEnableRateLimit", + true, + `ReplicationEnableRateLimit is the feature flag to enable replication global rate limiter`, + ) + ReplicationEnableRateLimitShadowMode = NewGlobalBoolSetting( + "history.ReplicationEnableRateLimitShadowMode", + false, + `ReplicationEnableRateLimitShadowMode enables shadow mode for replication rate limiter (emit metrics only, no throttling)`, + ) + ReplicationStreamSenderErrorRetryWait = NewGlobalDurationSetting( + "history.ReplicationStreamSenderErrorRetryWait", + 1*time.Second, + `ReplicationStreamSenderErrorRetryWait is the initial retry wait when we see errors in sending replication tasks`, + ) + ReplicationStreamSenderErrorRetryBackoffCoefficient = NewGlobalFloatSetting( + "history.ReplicationStreamSenderErrorRetryBackoffCoefficient", + 1.2, + `ReplicationStreamSenderErrorRetryBackoffCoefficient is the retry wait backoff time coefficient`, + ) + ReplicationStreamSenderErrorRetryMaxInterval = NewGlobalDurationSetting( + "history.ReplicationStreamSenderErrorRetryMaxInterval", + 3*time.Second, + `ReplicationStreamSenderErrorRetryMaxInterval is the retry wait backoff max duration`, + ) + ReplicationStreamSenderErrorRetryMaxAttempts = NewGlobalIntSetting( + "history.ReplicationStreamSenderErrorRetryMaxAttempts", + 80, + `ReplicationStreamSenderErrorRetryMaxAttempts is the max retry attempts for sending replication tasks`, + ) + ReplicationStreamSenderErrorRetryExpiration = NewGlobalDurationSetting( + "history.ReplicationStreamSenderErrorRetryExpiration", + 3*time.Minute, + `ReplicationStreamSenderErrorRetryExpiration is the max retry duration for sending replication tasks`, + ) + ReplicationExecutableTaskErrorRetryWait = NewGlobalDurationSetting( + "history.ReplicationExecutableTaskErrorRetryWait", + 1*time.Second, + `ReplicationExecutableTaskErrorRetryWait is the initial retry wait when we see errors in executing replication tasks`, + ) + ReplicationExecutableTaskErrorRetryBackoffCoefficient = NewGlobalFloatSetting( + "history.ReplicationExecutableTaskErrorRetryBackoffCoefficient", + 1.2, + `ReplicationExecutableTaskErrorRetryBackoffCoefficient is the retry wait backoff time coefficient`, + ) + ReplicationExecutableTaskErrorRetryMaxInterval = NewGlobalDurationSetting( + "history.ReplicationExecutableTaskErrorRetryMaxInterval", + 5*time.Second, + `ReplicationExecutableTaskErrorRetryMaxInterval is the retry wait backoff max duration`, + ) + ReplicationExecutableTaskErrorRetryMaxAttempts = NewGlobalIntSetting( + "history.ReplicationExecutableTaskErrorRetryMaxAttempts", + 80, + `ReplicationExecutableTaskErrorRetryMaxAttempts is the max retry attempts for executing replication tasks`, + ) + ReplicationExecutableTaskErrorRetryExpiration = NewGlobalDurationSetting( + "history.ReplicationExecutableTaskErrorRetryExpiration", + 10*time.Minute, + `ReplicationExecutableTaskErrorRetryExpiration is the max retry duration for executing replication tasks`, + ) + WorkflowIdReuseMinimalInterval = NewNamespaceDurationSetting( + "history.workflowIdReuseMinimalInterval", + 1*time.Second, + `WorkflowIdReuseMinimalInterval is used for timing how soon users can create new workflow with the same workflow ID.`, + ) + EnableWorkflowIdReuseStartTimeValidation = NewNamespaceBoolSetting( + "history.enableWorkflowIdReuseStartTimeValidation", + false, + `If true, validate the start time of the old workflow is older than WorkflowIdReuseMinimalInterval when reusing workflow ID.`, + ) + BusinessIDReuseRate = NewNamespaceIntSetting( + "history.businessIDReuseRate", + 0, + `BusinessIDReuseRate limits the rate of new execution creation per +(namespace, businessID, archetype) tuple on a single history host. 0 = disabled (default).`, + ) + BusinessIDReuseBurstRatio = NewNamespaceFloatSetting( + "history.businessIDReuseBurstRatio", + 1.0, + `BusinessIDReuseBurstRatio is the burst-to-rate ratio for the per-(namespace, businessID, archetype) +start rate limiter. Burst = max(1, int(rps * ratio)). Default 1.0 (no burst above rate).`, + ) + BusinessIDReuseLimiterCacheSize = NewGlobalIntSetting( + "history.businessIDReuseLimiterCacheSize", + 10000, + `BusinessIDReuseLimiterCacheSize is the max number of per-(namespace, businessID, archetype) rate limiters +cached on a single history shard. Requires service restart to take effect.`, + ) + BusinessIDReuseLimiterCacheTTL = NewGlobalDurationSetting( + "history.businessIDReuseLimiterCacheTTL", + 60*time.Second, + `BusinessIDReuseLimiterCacheTTL is the TTL for per-(namespace, businessID, archetype) rate limiter cache entries. +Requires service restart to take effect.`, + ) + HealthPersistenceLatencyFailure = NewGlobalFloatSetting( + "history.healthPersistenceLatencyFailure", + 500, + "History service health check on persistence average latency (millisecond) threshold", + ) + HealthPersistenceErrorRatio = NewGlobalFloatSetting( + "history.healthPersistenceErrorRatio", + 0.90, + "History service health check on persistence error ratio", + ) + HealthRPCLatencyFailure = NewGlobalFloatSetting( + "history.healthRPCLatencyFailure", + 500, + "History service health check on RPC average latency (millisecond) threshold", + ) + HealthRPCErrorRatio = NewGlobalFloatSetting( + "history.healthRPCErrorRatio", + 0.90, + "History service health check on RPC error ratio", + ) + HealthHistoryInitializationTime = NewGlobalDurationSetting( + "history.healthHistoryInitializationTime", + 60*time.Second, + "gRPC health server NOT_SERVING will be suppressed from DeepHealthCheck for this long") + SendRawHistoryBetweenInternalServices = NewGlobalBoolSetting( + "history.sendRawHistoryBetweenInternalServices", + false, + `SendRawHistoryBetweenInternalServices is whether to send raw history events between internal temporal services`, + ) + // SendRawHistoryBytesToMatchingService controls which field is used when sending raw history + // from history service to matching service. IMPORTANT: Only enable this flag after all services + // (history, matching, frontend) are upgraded to a version that supports this feature. + // NOTE: This flag only has effect when SendRawHistoryBetweenInternalServices is also enabled. + // If SendRawHistoryBetweenInternalServices is false, this flag is ignored. + SendRawHistoryBytesToMatchingService = NewGlobalBoolSetting( + "history.sendRawHistoryBytesToMatchingService", + false, + `SendRawHistoryBytesToMatchingService controls whether to use the new raw_history_bytes field (21) instead of raw_history field (20) when sending history to matching service. Only enable after all services are upgraded. NOTE: This flag only has effect when SendRawHistoryBetweenInternalServices is also enabled.`, + ) + + EnableChasm = NewNamespaceBoolSetting( + "history.enableChasm", + true, + "Use real chasm tree implementation instead of the noop one", + ) + + ChasmMaxInMemoryPureTasks = NewGlobalIntSetting( + "history.chasmMaxInMemoryPureTasks", + 32, + `ChasmMaxInMemoryPureTasks is the maximum number of physical pure tasks that can be held in memory for best effort task deletion.`, + ) + + EnableCHASMSchedulerCreation = NewNamespaceBoolSetting( + "history.enableCHASMSchedulerCreation", + false, + `EnableCHASMSchedulerCreation controls whether new schedules are created using the CHASM (V2) implementation +instead of the existing (V1) implementation.`, + ) + + EnableCHASMSchedulerRouting = NewNamespaceBoolSetting( + "history.enableCHASMSchedulerRouting", + true, + `EnableCHASMSchedulerRouting controls whether schedule RPCs are routed to the CHASM (V2) implementation +first (with fallback to V1), excluding CreateSchedule.`, + ) + + EnableCHASMSchedulerMigration = NewNamespaceBoolSetting( + "history.enableCHASMSchedulerMigration", + false, + `EnableCHASMSchedulerMigration controls whether existing V1 schedules are automatically migrated +to the CHASM (V2) implementation on active scheduler workflows.`, + ) + + EnableCHASMSchedulerSentinels = NewNamespaceBoolSetting( + "history.enableCHASMSchedulerSentinels", + true, + `EnableCHASMSchedulerSentinels enables ID-space collision sentinels, and must be enabled and propagated in advance of EnableCHASMSchedulerCreation.`, + ) + + EnableCHASMCallbacks = NewNamespaceBoolSetting( + "history.enableCHASMCallbacks", + false, + `Controls whether new callbacks are created using the CHASM implementation +instead of the previous HSM backed implementation.`, + ) + + EnableCHASMSignalBacklinks = NewNamespaceBoolSetting( + "history.enableCHASMSignalBacklinks", + false, + `Controls whether incoming signal request IDs are tracked in the CHASM IncomingSignals +map to enable DescribeWorkflow to resolve RequestIDRef signal backlinks. Requires EnableChasm. +Only enable once all servers in the fleet have been upgraded to a version that understands +the IncomingSignals CHASM field.`, + ) + + VersionMembershipCacheTTL = NewGlobalDurationSetting( + "history.versionMembershipCacheTTL", + 1*time.Second, + `TTL for caching RPC results that check whether a version is present in a task queue.`, + ) + + VersionMembershipCacheMaxSize = NewGlobalIntSetting( + "history.versionMembershipCacheMaxSize", + 10000, + `Maximum number of entries in the version membership cache.`, + ) + + ReactivationSignalDedupCacheMaxSize = NewGlobalIntSetting( + "worker.reactivationSignalDedupCacheMaxSize", + 10000, + `Maximum number of entries in the per-pod reactivation-signal dedup cache on the + worker deployment client. Each entry tracks the highest revision signaled for one + target version workflow.`, + ) + + EnableVersionReactivationSignals = NewGlobalBoolSetting( + "history.enableVersionReactivationSignals", + false, + `EnableVersionReactivationSignals controls whether reactivation signals are sent to version workflows + when workflows are pinned to a potentially DRAINED/INACTIVE version. Set to false to disable signals + globally if load becomes problematic.`, + ) + + RoutingInfoCacheTTL = NewGlobalDurationSetting( + "history.routingInfoCacheTTL", + 1*time.Second, + `TTL for caching task queue routing info (deployment versions and ramping state).`, + ) + + RoutingInfoCacheMaxSize = NewGlobalIntSetting( + "history.routingInfoCacheMaxSize", + 10000, + `Maximum number of entries in the routing info cache.`, + ) + + ExternalPayloadsEnabled = NewNamespaceBoolSetting( + "history.externalPayloadsEnabled", + true, + `ExternalPayloadsEnabled controls whether external payload features are enabled for a namespace.`, + ) // keys for worker - // WorkerPersistenceMaxQPS is the max qps worker host can query DB - WorkerPersistenceMaxQPS = "worker.persistenceMaxQPS" - // WorkerPersistenceGlobalMaxQPS is the max qps worker cluster can query DB - WorkerPersistenceGlobalMaxQPS = "worker.persistenceGlobalMaxQPS" - // WorkerPersistenceNamespaceMaxQPS is the max qps each namespace on worker host can query DB - WorkerPersistenceNamespaceMaxQPS = "worker.persistenceNamespaceMaxQPS" - // WorkerPersistenceNamespaceMaxQPS is the max qps each namespace in worker cluster can query DB - WorkerPersistenceGlobalNamespaceMaxQPS = "worker.persistenceGlobalNamespaceMaxQPS" - // WorkerEnablePersistencePriorityRateLimiting indicates if priority rate limiting is enabled in worker persistence client - WorkerEnablePersistencePriorityRateLimiting = "worker.enablePersistencePriorityRateLimiting" - // WorkerPersistenceDynamicRateLimitingParams is a map that contains all adjustable dynamic rate limiting params - // see DefaultDynamicRateLimitingParams for available options and defaults - WorkerPersistenceDynamicRateLimitingParams = "worker.persistenceDynamicRateLimitingParams" - // WorkerIndexerConcurrency is the max concurrent messages to be processed at any given time - WorkerIndexerConcurrency = "worker.indexerConcurrency" - // WorkerESProcessorNumOfWorkers is num of workers for esProcessor - WorkerESProcessorNumOfWorkers = "worker.ESProcessorNumOfWorkers" - // WorkerESProcessorBulkActions is max number of requests in bulk for esProcessor - WorkerESProcessorBulkActions = "worker.ESProcessorBulkActions" - // WorkerESProcessorBulkSize is max total size of bulk in bytes for esProcessor - WorkerESProcessorBulkSize = "worker.ESProcessorBulkSize" - // WorkerESProcessorFlushInterval is flush interval for esProcessor - WorkerESProcessorFlushInterval = "worker.ESProcessorFlushInterval" - // WorkerESProcessorAckTimeout is the timeout that store will wait to get ack signal from ES processor. - // Should be at least WorkerESProcessorFlushInterval+